- html - 出于某种原因,IE8 对我的 Sass 文件中继承的 html5 CSS 不友好?
- JMeter 在响应断言中使用 span 标签的问题
- html - 在 :hover and :active? 上具有不同效果的 CSS 动画
- html - 相对于居中的 html 内容固定的 CSS 重复背景?
从 LLVM 3.0 开始,Analysis 目录下有 CostModel.cpp。引用它的文档,它说
This file defines the cost model analysis. It provides a very basic cost estimation for LLVM-IR. This analysis uses the services of the codegen to approximate the cost of any IR instruction when lowered to machine instructions. The cost results are unit-less and the cost number represents the throughput of the machine assuming that all loads hit the cache, all branches are predicted, etc. The cost numbers can be added in order to compare two or more transformation alternatives.
最佳答案
以下是对我有用的示例:
要测试的主要功能文件
#include <iostream>
#include <string>
#include <llvm/Support/MemoryBuffer.h>
#include <llvm/Support/ErrorOr.h>
#include <llvm/IR/Module.h>
#include <llvm/IR/LLVMContext.h>
#include <llvm/Bitcode/BitcodeReader.h>
#include <llvm/Support/raw_ostream.h>
#include <llvm/Analysis/Passes.h>
#include <llvm/Analysis/TargetTransformInfo.h>
#include <llvm/Analysis/CostModelDummy.h>
#include "llvm/IRReader/IRReader.h"
#include "llvm/Support/SourceMgr.h"
using namespace llvm;
int main(int argc, char *argv[]) {
StringRef filename = "FILE_NAME";
LLVMContext context;
ErrorOr<std::unique_ptr<MemoryBuffer>> fileOrErr =
MemoryBuffer::getFileOrSTDIN(filename);
if (std::error_code ec = fileOrErr.getError()) {
std::cerr << " Error opening input file: " + ec.message() << std::endl;
return 2;
}
Expected<std::unique_ptr<Module>> moduleOrErr =
parseBitcodeFile(fileOrErr.get()->getMemBufferRef(), context);
if (std::error_code ec = fileOrErr.getError()) {
std::cerr << "Error reading Moduule: " + ec.message() << std::endl;
return 3;
}
llvm::SMDiagnostic Err;
llvm::LLVMContext Context;
std::unique_ptr<llvm::Module> m(parseIRFile(filename, Err, Context));
if (!m)
return 4;
std::cout << "Successfully read Module:" << std::endl;
std::cout << " Name: " << m->getName().str() << std::endl;
std::cout << " Target triple: " << m->getTargetTriple() << std::endl;
for (auto iter1 = m->getFunctionList().begin(); iter1 != m->getFunctionList().end(); iter1++) {
Function &f = *iter1;
CostModelAnalysisDummy obj;
std::cout << " STEP: 1 Function: " << f.getName().str() << std::endl;
obj.runOnFunction(f);
std::cout << " STEP: 2 Function: " << f.getName().str() << std::endl;
for (auto iter2 = f.getBasicBlockList().begin(); iter2 != f.getBasicBlockList().end(); iter2++) {
BasicBlock &bb = *iter2;
std::cout << " BasicBlock: " << bb.getName().str() << std::endl;
for (auto iter3 = bb.begin(); iter3 != bb.end(); iter3++) {
Instruction &inst = *iter3;
std::cout << std::endl << " Number of Cycles" << obj.getInstructionCost(&inst) << std::endl;
std::cout << " Instruction " << &inst << " : " << inst.getOpcodeName();
unsigned int i = 0;
unsigned int opnt_cnt = inst.getNumOperands();
for (; i < opnt_cnt; ++i)
{
Value *opnd = inst.getOperand(i);
std::string o;
// raw_string_ostream os(o);
// opnd->print(os);
//opnd->printAsOperand(os, true, m);
if (opnd->hasName()) {
o = opnd->getName();
std::cout << " " << o << ",";
}
else {
std::cout << " ptr" << opnd << ",";
}
}
std::cout << std::endl;
}
}
}
return 0;
}
//===- CostModel.cpp ------ Cost Model Analysis ---------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines the cost model analysis. It provides a very basic cost
// estimation for LLVM-IR. This analysis uses the services of the codegen
// to approximate the cost of any IR instruction when lowered to machine
// instructions. The cost results are unit-less and the cost number represents
// the throughput of the machine assuming that all loads hit the cache, all
// branches are predicted, etc. The cost numbers can be added in order to
// compare two or more transformation alternatives.
//
//===----------------------------------------------------------------------===//
#include "llvm/Analysis/CostModelDummy.h"
using namespace llvm;
// Register this pass.
char CostModelAnalysisDummy::ID = 0;
static const char cm_name[] = "Cost Model Analysis";
INITIALIZE_PASS_BEGIN(CostModelAnalysisDummy, CM_NAME, cm_name, false, true)
INITIALIZE_PASS_END(CostModelAnalysisDummy, CM_NAME, cm_name, false, true)
static cl::opt<bool> EnableReduxCost("costmodel-reduxcost-Dummy", cl::init(false),
cl::Hidden,
cl::desc("Recognize reduction patterns."));
FunctionPass *createCostModelAnalysisDummyPass() {
return new CostModelAnalysisDummy();
}
void
CostModelAnalysisDummy::getAnalysisUsage(AnalysisUsage &AU) const {
AU.setPreservesAll();
}
bool
CostModelAnalysisDummy::runOnFunction(Function &F) {
this->F = &F;
PMDataManager *DM = getAsPMDataManager();
AnalysisResolver *AR = new AnalysisResolver(*DM);
setResolver(AR);
setTopLevelManager(new CostModelAnalysisDummy());
recordAvailableAnalysis(new TargetTransformInfoWrapperPass());
auto *TTIWP = getAnalysisIfAvailable<TargetTransformInfoWrapperPass>();
TTI = TTIWP ? &TTIWP->getTTI(F) : nullptr;
return false;
}
static bool isReverseVectorMask(ArrayRef<int> Mask) {
for (unsigned i = 0, MaskSize = Mask.size(); i < MaskSize; ++i)
if (Mask[i] >= 0 && Mask[i] != (int)(MaskSize - 1 - i))
return false;
return true;
}
static bool isSingleSourceVectorMask(ArrayRef<int> Mask) {
bool Vec0 = false;
bool Vec1 = false;
for (unsigned i = 0, NumVecElts = Mask.size(); i < NumVecElts; ++i) {
if (Mask[i] >= 0) {
if ((unsigned)Mask[i] >= NumVecElts)
Vec1 = true;
else
Vec0 = true;
}
}
return !(Vec0 && Vec1);
}
static bool isZeroEltBroadcastVectorMask(ArrayRef<int> Mask) {
for (unsigned i = 0; i < Mask.size(); ++i)
if (Mask[i] > 0)
return false;
return true;
}
static bool isAlternateVectorMask(ArrayRef<int> Mask) {
bool isAlternate = true;
unsigned MaskSize = Mask.size();
// Example: shufflevector A, B, <0,5,2,7>
for (unsigned i = 0; i < MaskSize && isAlternate; ++i) {
if (Mask[i] < 0)
continue;
isAlternate = Mask[i] == (int)((i & 1) ? MaskSize + i : i);
}
if (isAlternate)
return true;
isAlternate = true;
// Example: shufflevector A, B, <4,1,6,3>
for (unsigned i = 0; i < MaskSize && isAlternate; ++i) {
if (Mask[i] < 0)
continue;
isAlternate = Mask[i] == (int)((i & 1) ? i : MaskSize + i);
}
return isAlternate;
}
static TargetTransformInfo::OperandValueKind getOperandInfo(Value *V) {
TargetTransformInfo::OperandValueKind OpInfo =
TargetTransformInfo::OK_AnyValue;
// Check for a splat of a constant or for a non uniform vector of constants.
if (isa<ConstantVector>(V) || isa<ConstantDataVector>(V)) {
OpInfo = TargetTransformInfo::OK_NonUniformConstantValue;
if (cast<Constant>(V)->getSplatValue() != nullptr)
OpInfo = TargetTransformInfo::OK_UniformConstantValue;
}
// Check for a splat of a uniform value. This is not loop aware, so return
// true only for the obviously uniform cases (argument, globalvalue)
const Value *Splat = getSplatValue(V);
if (Splat && (isa<Argument>(Splat) || isa<GlobalValue>(Splat)))
OpInfo = TargetTransformInfo::OK_UniformValue;
return OpInfo;
}
static bool matchPairwiseShuffleMask(ShuffleVectorInst *SI, bool IsLeft,
unsigned Level) {
// We don't need a shuffle if we just want to have element 0 in position 0 of
// the vector.
if (!SI && Level == 0 && IsLeft)
return true;
else if (!SI)
return false;
SmallVector<int, 32> Mask(SI->getType()->getVectorNumElements(), -1);
// Build a mask of 0, 2, ... (left) or 1, 3, ... (right) depending on whether
// we look at the left or right side.
for (unsigned i = 0, e = (1 << Level), val = !IsLeft; i != e; ++i, val += 2)
Mask[i] = val;
SmallVector<int, 16> ActualMask = SI->getShuffleMask();
return Mask == ActualMask;
}
static bool matchPairwiseReductionAtLevel(const BinaryOperator *BinOp,
unsigned Level, unsigned NumLevels) {
// Match one level of pairwise operations.
// %rdx.shuf.0.0 = shufflevector <4 x float> %rdx, <4 x float> undef,
// <4 x i32> <i32 0, i32 2 , i32 undef, i32 undef>
// %rdx.shuf.0.1 = shufflevector <4 x float> %rdx, <4 x float> undef,
// <4 x i32> <i32 1, i32 3, i32 undef, i32 undef>
// %bin.rdx.0 = fadd <4 x float> %rdx.shuf.0.0, %rdx.shuf.0.1
if (BinOp == nullptr)
return false;
assert(BinOp->getType()->isVectorTy() && "Expecting a vector type");
unsigned Opcode = BinOp->getOpcode();
Value *L = BinOp->getOperand(0);
Value *R = BinOp->getOperand(1);
ShuffleVectorInst *LS = dyn_cast<ShuffleVectorInst>(L);
if (!LS && Level)
return false;
ShuffleVectorInst *RS = dyn_cast<ShuffleVectorInst>(R);
if (!RS && Level)
return false;
// On level 0 we can omit one shufflevector instruction.
if (!Level && !RS && !LS)
return false;
// Shuffle inputs must match.
Value *NextLevelOpL = LS ? LS->getOperand(0) : nullptr;
Value *NextLevelOpR = RS ? RS->getOperand(0) : nullptr;
Value *NextLevelOp = nullptr;
if (NextLevelOpR && NextLevelOpL) {
// If we have two shuffles their operands must match.
if (NextLevelOpL != NextLevelOpR)
return false;
NextLevelOp = NextLevelOpL;
} else if (Level == 0 && (NextLevelOpR || NextLevelOpL)) {
// On the first level we can omit the shufflevector <0, undef,...>. So the
// input to the other shufflevector <1, undef> must match with one of the
// inputs to the current binary operation.
// Example:
// %NextLevelOpL = shufflevector %R, <1, undef ...>
// %BinOp = fadd %NextLevelOpL, %R
if (NextLevelOpL && NextLevelOpL != R)
return false;
else if (NextLevelOpR && NextLevelOpR != L)
return false;
NextLevelOp = NextLevelOpL ? R : L;
} else
return false;
// Check that the next levels binary operation exists and matches with the
// current one.
BinaryOperator *NextLevelBinOp = nullptr;
if (Level + 1 != NumLevels) {
if (!(NextLevelBinOp = dyn_cast<BinaryOperator>(NextLevelOp)))
return false;
else if (NextLevelBinOp->getOpcode() != Opcode)
return false;
}
// Shuffle mask for pairwise operation must match.
if (matchPairwiseShuffleMask(LS, true, Level)) {
if (!matchPairwiseShuffleMask(RS, false, Level))
return false;
} else if (matchPairwiseShuffleMask(RS, true, Level)) {
if (!matchPairwiseShuffleMask(LS, false, Level))
return false;
} else
return false;
if (++Level == NumLevels)
return true;
// Match next level.
return matchPairwiseReductionAtLevel(NextLevelBinOp, Level, NumLevels);
}
static bool matchPairwiseReduction(const ExtractElementInst *ReduxRoot,
unsigned &Opcode, Type *&Ty) {
if (!EnableReduxCost)
return false;
// Need to extract the first element.
ConstantInt *CI = dyn_cast<ConstantInt>(ReduxRoot->getOperand(1));
unsigned Idx = ~0u;
if (CI)
Idx = CI->getZExtValue();
if (Idx != 0)
return false;
BinaryOperator *RdxStart = dyn_cast<BinaryOperator>(ReduxRoot->getOperand(0));
if (!RdxStart)
return false;
Type *VecTy = ReduxRoot->getOperand(0)->getType();
unsigned NumVecElems = VecTy->getVectorNumElements();
if (!isPowerOf2_32(NumVecElems))
return false;
// We look for a sequence of shuffle,shuffle,add triples like the following
// that builds a pairwise reduction tree.
//
// (X0, X1, X2, X3)
// (X0 + X1, X2 + X3, undef, undef)
// ((X0 + X1) + (X2 + X3), undef, undef, undef)
//
// %rdx.shuf.0.0 = shufflevector <4 x float> %rdx, <4 x float> undef,
// <4 x i32> <i32 0, i32 2 , i32 undef, i32 undef>
// %rdx.shuf.0.1 = shufflevector <4 x float> %rdx, <4 x float> undef,
// <4 x i32> <i32 1, i32 3, i32 undef, i32 undef>
// %bin.rdx.0 = fadd <4 x float> %rdx.shuf.0.0, %rdx.shuf.0.1
// %rdx.shuf.1.0 = shufflevector <4 x float> %bin.rdx.0, <4 x float> undef,
// <4 x i32> <i32 0, i32 undef, i32 undef, i32 undef>
// %rdx.shuf.1.1 = shufflevector <4 x float> %bin.rdx.0, <4 x float> undef,
// <4 x i32> <i32 1, i32 undef, i32 undef, i32 undef>
// %bin.rdx8 = fadd <4 x float> %rdx.shuf.1.0, %rdx.shuf.1.1
// %r = extractelement <4 x float> %bin.rdx8, i32 0
if (!matchPairwiseReductionAtLevel(RdxStart, 0, Log2_32(NumVecElems)))
return false;
Opcode = RdxStart->getOpcode();
Ty = VecTy;
return true;
}
static std::pair<Value *, ShuffleVectorInst *>
getShuffleAndOtherOprd(BinaryOperator *B) {
Value *L = B->getOperand(0);
Value *R = B->getOperand(1);
ShuffleVectorInst *S = nullptr;
if ((S = dyn_cast<ShuffleVectorInst>(L)))
return std::make_pair(R, S);
S = dyn_cast<ShuffleVectorInst>(R);
return std::make_pair(L, S);
}
static bool matchVectorSplittingReduction(const ExtractElementInst *ReduxRoot,
unsigned &Opcode, Type *&Ty) {
if (!EnableReduxCost)
return false;
// Need to extract the first element.
ConstantInt *CI = dyn_cast<ConstantInt>(ReduxRoot->getOperand(1));
unsigned Idx = ~0u;
if (CI)
Idx = CI->getZExtValue();
if (Idx != 0)
return false;
BinaryOperator *RdxStart = dyn_cast<BinaryOperator>(ReduxRoot->getOperand(0));
if (!RdxStart)
return false;
unsigned RdxOpcode = RdxStart->getOpcode();
Type *VecTy = ReduxRoot->getOperand(0)->getType();
unsigned NumVecElems = VecTy->getVectorNumElements();
if (!isPowerOf2_32(NumVecElems))
return false;
// We look for a sequence of shuffles and adds like the following matching one
// fadd, shuffle vector pair at a time.
//
// %rdx.shuf = shufflevector <4 x float> %rdx, <4 x float> undef,
// <4 x i32> <i32 2, i32 3, i32 undef, i32 undef>
// %bin.rdx = fadd <4 x float> %rdx, %rdx.shuf
// %rdx.shuf7 = shufflevector <4 x float> %bin.rdx, <4 x float> undef,
// <4 x i32> <i32 1, i32 undef, i32 undef, i32 undef>
// %bin.rdx8 = fadd <4 x float> %bin.rdx, %rdx.shuf7
// %r = extractelement <4 x float> %bin.rdx8, i32 0
unsigned MaskStart = 1;
Value *RdxOp = RdxStart;
SmallVector<int, 32> ShuffleMask(NumVecElems, 0);
unsigned NumVecElemsRemain = NumVecElems;
while (NumVecElemsRemain - 1) {
// Check for the right reduction operation.
BinaryOperator *BinOp;
if (!(BinOp = dyn_cast<BinaryOperator>(RdxOp)))
return false;
if (BinOp->getOpcode() != RdxOpcode)
return false;
Value *NextRdxOp;
ShuffleVectorInst *Shuffle;
std::tie(NextRdxOp, Shuffle) = getShuffleAndOtherOprd(BinOp);
// Check the current reduction operation and the shuffle use the same value.
if (Shuffle == nullptr)
return false;
if (Shuffle->getOperand(0) != NextRdxOp)
return false;
// Check that shuffle masks matches.
for (unsigned j = 0; j != MaskStart; ++j)
ShuffleMask[j] = MaskStart + j;
// Fill the rest of the mask with -1 for undef.
std::fill(&ShuffleMask[MaskStart], ShuffleMask.end(), -1);
SmallVector<int, 16> Mask = Shuffle->getShuffleMask();
if (ShuffleMask != Mask)
return false;
RdxOp = NextRdxOp;
NumVecElemsRemain /= 2;
MaskStart *= 2;
}
Opcode = RdxOpcode;
Ty = VecTy;
return true;
}
unsigned CostModelAnalysisDummy::getInstructionCost(const Instruction *I) const {
if (!TTI)
return -1;
switch (I->getOpcode()) {
case Instruction::GetElementPtr:
return TTI->getUserCost(I);
case Instruction::Ret:
case Instruction::PHI:
case Instruction::Br: {
return TTI->getCFInstrCost(I->getOpcode());
}
case Instruction::Add:
case Instruction::FAdd:
case Instruction::Sub:
case Instruction::FSub:
case Instruction::Mul:
case Instruction::FMul:
case Instruction::UDiv:
case Instruction::SDiv:
case Instruction::FDiv:
case Instruction::URem:
case Instruction::SRem:
case Instruction::FRem:
case Instruction::Shl:
case Instruction::LShr:
case Instruction::AShr:
case Instruction::And:
case Instruction::Or:
case Instruction::Xor: {
TargetTransformInfo::OperandValueKind Op1VK =
getOperandInfo(I->getOperand(0));
TargetTransformInfo::OperandValueKind Op2VK =
getOperandInfo(I->getOperand(1));
SmallVector<const Value*, 2> Operands(I->operand_values());
return TTI->getArithmeticInstrCost(I->getOpcode(), I->getType(), Op1VK,
Op2VK, TargetTransformInfo::OP_None,
TargetTransformInfo::OP_None,
Operands);
}
case Instruction::Select: {
const SelectInst *SI = cast<SelectInst>(I);
Type *CondTy = SI->getCondition()->getType();
return TTI->getCmpSelInstrCost(I->getOpcode(), I->getType(), CondTy);
}
case Instruction::ICmp:
case Instruction::FCmp: {
Type *ValTy = I->getOperand(0)->getType();
return TTI->getCmpSelInstrCost(I->getOpcode(), ValTy);
}
case Instruction::Store: {
const StoreInst *SI = cast<StoreInst>(I);
Type *ValTy = SI->getValueOperand()->getType();
return TTI->getMemoryOpCost(I->getOpcode(), ValTy,
SI->getAlignment(),
SI->getPointerAddressSpace());
}
case Instruction::Load: {
const LoadInst *LI = cast<LoadInst>(I);
return TTI->getMemoryOpCost(I->getOpcode(), I->getType(),
LI->getAlignment(),
LI->getPointerAddressSpace());
}
case Instruction::ZExt:
case Instruction::SExt:
case Instruction::FPToUI:
case Instruction::FPToSI:
case Instruction::FPExt:
case Instruction::PtrToInt:
case Instruction::IntToPtr:
case Instruction::SIToFP:
case Instruction::UIToFP:
case Instruction::Trunc:
case Instruction::FPTrunc:
case Instruction::BitCast:
case Instruction::AddrSpaceCast: {
Type *SrcTy = I->getOperand(0)->getType();
return TTI->getCastInstrCost(I->getOpcode(), I->getType(), SrcTy);
}
case Instruction::ExtractElement: {
const ExtractElementInst * EEI = cast<ExtractElementInst>(I);
ConstantInt *CI = dyn_cast<ConstantInt>(I->getOperand(1));
unsigned Idx = -1;
if (CI)
Idx = CI->getZExtValue();
// Try to match a reduction sequence (series of shufflevector and vector
// adds followed by a extractelement).
unsigned ReduxOpCode;
Type *ReduxType;
if (matchVectorSplittingReduction(EEI, ReduxOpCode, ReduxType))
return TTI->getArithmeticReductionCost(ReduxOpCode, ReduxType, false);
else if (matchPairwiseReduction(EEI, ReduxOpCode, ReduxType))
return TTI->getArithmeticReductionCost(ReduxOpCode, ReduxType, true);
return TTI->getVectorInstrCost(I->getOpcode(),
EEI->getOperand(0)->getType(), Idx);
}
case Instruction::InsertElement: {
const InsertElementInst * IE = cast<InsertElementInst>(I);
ConstantInt *CI = dyn_cast<ConstantInt>(IE->getOperand(2));
unsigned Idx = -1;
if (CI)
Idx = CI->getZExtValue();
return TTI->getVectorInstrCost(I->getOpcode(),
IE->getType(), Idx);
}
case Instruction::ShuffleVector: {
const ShuffleVectorInst *Shuffle = cast<ShuffleVectorInst>(I);
Type *VecTypOp0 = Shuffle->getOperand(0)->getType();
unsigned NumVecElems = VecTypOp0->getVectorNumElements();
SmallVector<int, 16> Mask = Shuffle->getShuffleMask();
if (NumVecElems == Mask.size()) {
if (isReverseVectorMask(Mask))
return TTI->getShuffleCost(TargetTransformInfo::SK_Reverse, VecTypOp0,
0, nullptr);
if (isAlternateVectorMask(Mask))
return TTI->getShuffleCost(TargetTransformInfo::SK_SELECT,
VecTypOp0, 0, nullptr);
if (isZeroEltBroadcastVectorMask(Mask))
return TTI->getShuffleCost(TargetTransformInfo::SK_Broadcast,
VecTypOp0, 0, nullptr);
if (isSingleSourceVectorMask(Mask))
return TTI->getShuffleCost(TargetTransformInfo::SK_PermuteSingleSrc,
VecTypOp0, 0, nullptr);
return TTI->getShuffleCost(TargetTransformInfo::SK_PermuteTwoSrc,
VecTypOp0, 0, nullptr);
}
return -1;
}
case Instruction::Call:
if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
SmallVector<Value *, 4> Args;
for (unsigned J = 0, JE = II->getNumArgOperands(); J != JE; ++J)
Args.push_back(II->getArgOperand(J));
FastMathFlags FMF;
if (auto *FPMO = dyn_cast<FPMathOperator>(II))
FMF = FPMO->getFastMathFlags();
return TTI->getIntrinsicInstrCost(II->getIntrinsicID(), II->getType(),
Args, FMF);
}
return -1;
default:
// We don't have any information on this instruction.
return -1;
}
}
void CostModelAnalysisDummy::print(raw_ostream &OS, const Module*) const {
if (!F)
return;
for (BasicBlock &B : *F) {
for (Instruction &Inst : B) {
unsigned Cost = getInstructionCost(&Inst);
if (Cost != (unsigned)-1)
OS << "Cost Model: Found an estimated cost of " << Cost;
else
OS << "Cost Model: Unknown cost";
OS << " for instruction: " << Inst << "\n";
}
}
}
#include "llvm/ADT/STLExtras.h"
#include "llvm/Analysis/Passes.h"
#include "llvm/Analysis/TargetTransformInfo.h"
#include "llvm/Analysis/VectorUtils.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/Value.h"
#include "llvm/Pass.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
#include <iostream>
#include "llvm/IR/LegacyPassManagers.h"
using namespace llvm;
#define CM_NAME "cost-model-sanji"
#define DEBUG_TYPE CM_NAME
class CostModelAnalysisDummy : public PMDataManager, public FunctionPass, public PMTopLevelManager {
public:
static char ID; // Class identification, replacement for typeinfo
CostModelAnalysisDummy() : FunctionPass(ID), PMDataManager(), PMTopLevelManager(new FPPassManager()), F(nullptr), TTI(nullptr) {
llvm::initializeCostModelAnalysisDummyPass(
*PassRegistry::getPassRegistry());
}
/// Returns the expected cost of the instruction.
/// Returns -1 if the cost is unknown.
/// Note, this method does not cache the cost calculation and it
/// can be expensive in some cases.
unsigned getInstructionCost(const Instruction *I) const;
bool runOnFunction(Function &F) override;
PMDataManager *getAsPMDataManager() override { return this; }
Pass *getAsPass() override { return this; }
PassManagerType getTopLevelPassManagerType() override {
return PMT_BasicBlockPassManager;
}
FPPassManager *getContainedManager(unsigned N) {
assert(N < PassManagers.size() && "Pass number out of range!");
FPPassManager *FP = static_cast<FPPassManager *>(PassManagers[N]);
return FP;
}
private:
void getAnalysisUsage(AnalysisUsage &AU) const override;
void print(raw_ostream &OS, const Module*) const override;
/// The function that we analyze.
Function *F;
/// Target information.
const TargetTransformInfo *TTI;
};
FunctionPass *createCostModelAnalysisDummyPass();
关于llvm - 使用 CostModel 获取 LLVM IR 的 CPU 周期,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/24793849/
我想知道在谈到 CPU 使用率和 CPU 利用率时,术语是否存在科学差异。我觉得这两个词都被用作同义词。它们都描述了 CPU 时间和 CPU 容量之间的关系。 Wikipedia称之为 CPU 使用率
我研究了一些关于处理器和 Tomasulo 算法的指令重新排序的内容。 为了更深入地了解这个主题,我想知道是否有任何方法可以(获取跟踪)查看为给定程序完成的实际动态重新排序? 我想给出一个输入程序并查
我有一台配备 2 个 Intel Xeon CPU E5-2620 (Sandy Bridge) 和 10Gbps 82599 NIC(2 个端口)的服务器,用于高性能计算。从 PCI 关联性中,我看
您能详细解释一下“用户 CPU 时间”和“系统 CPU 时间”吗?我读了很多,但我不太理解。 最佳答案 区别在于时间花在用户空间还是内核空间。用户 CPU 时间是处理器运行程序代码(或库中的代码)所花
我想知道如何识别 CPU 是否与 ARM v5 指令集兼容。 假设 ARM v7 指令与 ARM v5 兼容是否正确? 最佳答案 您可以阅读 CPUID base register获得PARTNO。然
我目前在具有多个六核 CPU 的服务器上使用 C 多线程。我想将我的一些线程的亲和性设置为单个 CPU 的各个核心。我使用过 pthread_setaffinity_np() 和 sched_seta
1) 独占时间是在方法中花费的时间2) 包含时间是在方法中花费的时间加上在任何被调用函数中花费的时间3)我们称调用方法为“ parent ”,称方法为“子”。引用链接:Click here 这里的问题
关闭。这个问题需要多问focused 。目前不接受答案。 想要改进此问题吗?更新问题,使其仅关注一个问题 editing this post . 已关闭 5 年前。 Improve this ques
好的,所以编译器可以出于性能原因自由地重新排序代码片段。让我们假设一些代码片段,在没有应用优化的情况下直接翻译成机器代码,看起来像这样: machine_instruction_1 machine_i
我在 zabbix 中有以下默认图表,但我不知道如何解释这些值。谁能解释一下? 最佳答案 操作系统是一件非常忙碌的事情,尤其是当你让它做某事时(即使你没有做)。当我们看到一个活跃的企业环境时,总会发生
换句话说,L1、L2、L3 等缓存是否总是反射(reflect) CPU的字节序 ? 或者总是将数据存储在某些 的缓存中更有意义吗?特定字节序 ? 有没有总体设计决策 ? 最佳答案 大多数现代缓存不会
我想知道当前的 cpus 是否避免在其中至少一个为零时将两个数字相乘。谢谢 最佳答案 这取决于 CPU 和(在某些情况下)操作数的类型。 较旧/较简单的 CPU 通常使用如下乘法算法: integer
我有一个 CUDA 应用程序,它在一台计算机(配备 GTX 275)上运行良好,而在另一台配备 GeForce 8400 的计算机上运行速度慢了大约 100 倍。我怀疑有某种回退使代码实际上在 CPU
例如,对于 8 位 CPU,堆栈大小预计为 8 位宽,16 位 CPU 与 16 位堆栈宽度,以及 32 位、64 位 CPU,等等。是否适用于所有架构? 最佳答案 CPU 具有数据总线和地址总线。它
实现 SIMD 是否需要多核 CPU? 在阅读有关 SIMD 的维基百科时,我发现了以下短语“多处理元素”。那么这句话和“多核CPU”有什么区别呢? 最佳答案 不,每个内核通常都可以执行指令集中的大多
我遗漏了一些基本的东西。 CPU 流水线:在基本层面上,为什么指令需要不同数量的时钟周期才能完成,为什么有些指令在多级 CPU 中只需要 1 个周期? 除了明显的“不同的指令需要不同的工作量才能完成”
超线程 CPU 是实现并行还是仅实现并发(上下文切换)? 我的猜测是没有并行性,只有通过上下文切换的并发性。 最佳答案 单个物理 CPU 具有超线程的核心显示为 两个逻辑 CPU 到操作系统。 CPU
关闭。这个问题不符合Stack Overflow guidelines .它目前不接受答案。 这个问题似乎不是关于 a specific programming problem, a softwar
背景是这样的:下周我们的办公室将有一天因为维护而没有暖气。预计室外温度在 7 至 12 摄氏度之间,因此可能会变冷。可移植电取暖器数量太少,无法满足所有人的需求。 但是,在我大约 6-8 平方米的办公
我开发了一个应用程序,该应用程序在我的开发箱上的三个容器中运行,该开发箱具有带超线程的四核,这意味着系统和 docker 使用 8 个核心。 容器的 CPU 分配由 docker-compose 完成
我是一名优秀的程序员,十分优秀!