Unnamed: 0
int64 0
0
| repo_id
stringlengths 5
186
| file_path
stringlengths 15
223
| content
stringlengths 1
32.8M
⌀ |
---|---|---|---|
0 | repos/DirectXShaderCompiler/lib/DebugInfo/PDB | repos/DirectXShaderCompiler/lib/DebugInfo/PDB/DIA/DIAEnumSymbols.cpp | //==- DIAEnumSymbols.cpp - DIA Symbol Enumerator impl ------------*- C++ -*-==//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "llvm/DebugInfo/PDB/PDBSymbol.h"
#include "llvm/DebugInfo/PDB/DIA/DIAEnumSymbols.h"
#include "llvm/DebugInfo/PDB/DIA/DIARawSymbol.h"
#include "llvm/DebugInfo/PDB/DIA/DIASession.h"
using namespace llvm;
DIAEnumSymbols::DIAEnumSymbols(const DIASession &PDBSession,
CComPtr<IDiaEnumSymbols> DiaEnumerator)
: Session(PDBSession), Enumerator(DiaEnumerator) {}
uint32_t DIAEnumSymbols::getChildCount() const {
LONG Count = 0;
return (S_OK == Enumerator->get_Count(&Count)) ? Count : 0;
}
std::unique_ptr<PDBSymbol>
DIAEnumSymbols::getChildAtIndex(uint32_t Index) const {
CComPtr<IDiaSymbol> Item;
if (S_OK != Enumerator->Item(Index, &Item))
return nullptr;
std::unique_ptr<DIARawSymbol> RawSymbol(new DIARawSymbol(Session, Item));
return std::unique_ptr<PDBSymbol>(PDBSymbol::create(Session, std::move(RawSymbol)));
}
std::unique_ptr<PDBSymbol> DIAEnumSymbols::getNext() {
CComPtr<IDiaSymbol> Item;
ULONG NumFetched = 0;
if (S_OK != Enumerator->Next(1, &Item, &NumFetched))
return nullptr;
std::unique_ptr<DIARawSymbol> RawSymbol(new DIARawSymbol(Session, Item));
return std::unique_ptr<PDBSymbol>(
PDBSymbol::create(Session, std::move(RawSymbol)));
}
void DIAEnumSymbols::reset() { Enumerator->Reset(); }
DIAEnumSymbols *DIAEnumSymbols::clone() const {
CComPtr<IDiaEnumSymbols> EnumeratorClone;
if (S_OK != Enumerator->Clone(&EnumeratorClone))
return nullptr;
return new DIAEnumSymbols(Session, EnumeratorClone);
}
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/DxilHash/CMakeLists.txt |
add_llvm_library(LLVMDxilHash
DxilHash.cpp)
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/DxilHash/DxilHash.cpp | ///////////////////////////////////////////////////////////////////////////////
// //
// DxilHash.cpp //
// Copyright (C) Microsoft Corporation. All rights reserved. //
// This file is distributed under the University of Illinois Open Source //
// License. See LICENSE.TXT for details. //
// //
// DXBC/DXIL container hashing functions //
// //
///////////////////////////////////////////////////////////////////////////////
#include "assert.h"
#ifdef _WIN32
#include <windows.h>
#else
#include "dxc/WinAdapter.h"
typedef unsigned char UINT8;
#endif
// RSA Data Security, Inc. M
// D
// 5 Message-Digest Algorithm
#define S11 7
#define S12 12
#define S13 17
#define S14 22
#define S21 5
#define S22 9
#define S23 14
#define S24 20
#define S31 4
#define S32 11
#define S33 16
#define S34 23
#define S41 6
#define S42 10
#define S43 15
#define S44 21
const BYTE padding[64] = {0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
void FF(UINT &a, UINT b, UINT c, UINT d, UINT x, UINT8 s, UINT ac) {
a += ((b & c) | (~b & d)) + x + ac;
a = ((a << s) | (a >> (32 - s))) + b;
}
void GG(UINT &a, UINT b, UINT c, UINT d, UINT x, UINT8 s, UINT ac) {
a += ((b & d) | (c & ~d)) + x + ac;
a = ((a << s) | (a >> (32 - s))) + b;
}
void HH(UINT &a, UINT b, UINT c, UINT d, UINT x, UINT8 s, UINT ac) {
a += (b ^ c ^ d) + x + ac;
a = ((a << s) | (a >> (32 - s))) + b;
}
void II(UINT &a, UINT b, UINT c, UINT d, UINT x, UINT8 s, UINT ac) {
a += (c ^ (b | ~d)) + x + ac;
a = ((a << s) | (a >> (32 - s))) + b;
}
// **************************************************************************************
// **** DO NOT USE THESE ROUTINES TO PROVIDE FUNCTIONALITY THAT NEEDS TO BE
// SECURE!!! ***
// **************************************************************************************
void ComputeM_D_5Hash(const BYTE *pData, UINT byteCount, BYTE *pOutHash) {
UINT leftOver = byteCount & 0x3f;
UINT padAmount;
bool bTwoRowsPadding = false;
if (leftOver < 56) {
padAmount = 56 - leftOver;
} else {
padAmount = 120 - leftOver;
bTwoRowsPadding = true;
}
UINT padAmountPlusSize = padAmount + 8;
UINT state[4] = {0x67452301, 0xefcdab89, 0x98badcfe, 0x10325476};
UINT N = (byteCount + padAmountPlusSize) >> 6;
UINT offset = 0;
UINT NextEndState = bTwoRowsPadding ? N - 2 : N - 1;
const BYTE *pCurrData = pData;
for (UINT i = 0; i < N; i++, offset += 64, pCurrData += 64) {
assert(byteCount - offset <= byteCount); // prefast doesn't understand this
// - no underflow will happen
assert(byteCount <
64 * i + 65); // prefast doesn't understand this - no overflows will
// happen in any memcpy below
assert(byteCount < leftOver + 64 * i + 9);
assert(byteCount < leftOver + 64 * i + 1);
UINT x[16];
const UINT *pX;
if (i == NextEndState) {
if (!bTwoRowsPadding && i == N - 1) {
UINT remainder = byteCount - offset;
memcpy(x, pCurrData, remainder); // could copy nothing
memcpy((BYTE *)x + remainder, padding, padAmount);
x[14] = byteCount << 3; // sizepad lo
x[15] = 0; // sizepad hi
} else if (bTwoRowsPadding) {
if (i == N - 2) {
UINT remainder = byteCount - offset;
memcpy(x, pCurrData, remainder);
memcpy((BYTE *)x + remainder, padding, padAmount - 56);
NextEndState = N - 1;
} else if (i == N - 1) {
memcpy(x, padding + padAmount - 56, 56);
x[14] = byteCount << 3; // sizepad lo
x[15] = 0; // sizepad hi
}
}
pX = x;
} else {
pX = (const UINT *)pCurrData;
}
UINT a = state[0];
UINT b = state[1];
UINT c = state[2];
UINT d = state[3];
/* Round 1 */
FF(a, b, c, d, pX[0], S11, 0xd76aa478); /* 1 */
FF(d, a, b, c, pX[1], S12, 0xe8c7b756); /* 2 */
FF(c, d, a, b, pX[2], S13, 0x242070db); /* 3 */
FF(b, c, d, a, pX[3], S14, 0xc1bdceee); /* 4 */
FF(a, b, c, d, pX[4], S11, 0xf57c0faf); /* 5 */
FF(d, a, b, c, pX[5], S12, 0x4787c62a); /* 6 */
FF(c, d, a, b, pX[6], S13, 0xa8304613); /* 7 */
FF(b, c, d, a, pX[7], S14, 0xfd469501); /* 8 */
FF(a, b, c, d, pX[8], S11, 0x698098d8); /* 9 */
FF(d, a, b, c, pX[9], S12, 0x8b44f7af); /* 10 */
FF(c, d, a, b, pX[10], S13, 0xffff5bb1); /* 11 */
FF(b, c, d, a, pX[11], S14, 0x895cd7be); /* 12 */
FF(a, b, c, d, pX[12], S11, 0x6b901122); /* 13 */
FF(d, a, b, c, pX[13], S12, 0xfd987193); /* 14 */
FF(c, d, a, b, pX[14], S13, 0xa679438e); /* 15 */
FF(b, c, d, a, pX[15], S14, 0x49b40821); /* 16 */
/* Round 2 */
GG(a, b, c, d, pX[1], S21, 0xf61e2562); /* 17 */
GG(d, a, b, c, pX[6], S22, 0xc040b340); /* 18 */
GG(c, d, a, b, pX[11], S23, 0x265e5a51); /* 19 */
GG(b, c, d, a, pX[0], S24, 0xe9b6c7aa); /* 20 */
GG(a, b, c, d, pX[5], S21, 0xd62f105d); /* 21 */
GG(d, a, b, c, pX[10], S22, 0x2441453); /* 22 */
GG(c, d, a, b, pX[15], S23, 0xd8a1e681); /* 23 */
GG(b, c, d, a, pX[4], S24, 0xe7d3fbc8); /* 24 */
GG(a, b, c, d, pX[9], S21, 0x21e1cde6); /* 25 */
GG(d, a, b, c, pX[14], S22, 0xc33707d6); /* 26 */
GG(c, d, a, b, pX[3], S23, 0xf4d50d87); /* 27 */
GG(b, c, d, a, pX[8], S24, 0x455a14ed); /* 28 */
GG(a, b, c, d, pX[13], S21, 0xa9e3e905); /* 29 */
GG(d, a, b, c, pX[2], S22, 0xfcefa3f8); /* 30 */
GG(c, d, a, b, pX[7], S23, 0x676f02d9); /* 31 */
GG(b, c, d, a, pX[12], S24, 0x8d2a4c8a); /* 32 */
/* Round 3 */
HH(a, b, c, d, pX[5], S31, 0xfffa3942); /* 33 */
HH(d, a, b, c, pX[8], S32, 0x8771f681); /* 34 */
HH(c, d, a, b, pX[11], S33, 0x6d9d6122); /* 35 */
HH(b, c, d, a, pX[14], S34, 0xfde5380c); /* 36 */
HH(a, b, c, d, pX[1], S31, 0xa4beea44); /* 37 */
HH(d, a, b, c, pX[4], S32, 0x4bdecfa9); /* 38 */
HH(c, d, a, b, pX[7], S33, 0xf6bb4b60); /* 39 */
HH(b, c, d, a, pX[10], S34, 0xbebfbc70); /* 40 */
HH(a, b, c, d, pX[13], S31, 0x289b7ec6); /* 41 */
HH(d, a, b, c, pX[0], S32, 0xeaa127fa); /* 42 */
HH(c, d, a, b, pX[3], S33, 0xd4ef3085); /* 43 */
HH(b, c, d, a, pX[6], S34, 0x4881d05); /* 44 */
HH(a, b, c, d, pX[9], S31, 0xd9d4d039); /* 45 */
HH(d, a, b, c, pX[12], S32, 0xe6db99e5); /* 46 */
HH(c, d, a, b, pX[15], S33, 0x1fa27cf8); /* 47 */
HH(b, c, d, a, pX[2], S34, 0xc4ac5665); /* 48 */
/* Round 4 */
II(a, b, c, d, pX[0], S41, 0xf4292244); /* 49 */
II(d, a, b, c, pX[7], S42, 0x432aff97); /* 50 */
II(c, d, a, b, pX[14], S43, 0xab9423a7); /* 51 */
II(b, c, d, a, pX[5], S44, 0xfc93a039); /* 52 */
II(a, b, c, d, pX[12], S41, 0x655b59c3); /* 53 */
II(d, a, b, c, pX[3], S42, 0x8f0ccc92); /* 54 */
II(c, d, a, b, pX[10], S43, 0xffeff47d); /* 55 */
II(b, c, d, a, pX[1], S44, 0x85845dd1); /* 56 */
II(a, b, c, d, pX[8], S41, 0x6fa87e4f); /* 57 */
II(d, a, b, c, pX[15], S42, 0xfe2ce6e0); /* 58 */
II(c, d, a, b, pX[6], S43, 0xa3014314); /* 59 */
II(b, c, d, a, pX[13], S44, 0x4e0811a1); /* 60 */
II(a, b, c, d, pX[4], S41, 0xf7537e82); /* 61 */
II(d, a, b, c, pX[11], S42, 0xbd3af235); /* 62 */
II(c, d, a, b, pX[2], S43, 0x2ad7d2bb); /* 63 */
II(b, c, d, a, pX[9], S44, 0xeb86d391); /* 64 */
state[0] += a;
state[1] += b;
state[2] += c;
state[3] += d;
}
memcpy(pOutHash, state, 16);
}
// **************************************************************************************
// **** DO NOT USE THESE ROUTINES TO PROVIDE FUNCTIONALITY THAT NEEDS TO BE
// SECURE!!! ***
// **************************************************************************************
void ComputeHashRetail(const BYTE *pData, UINT byteCount, BYTE *pOutHash) {
UINT leftOver = byteCount & 0x3f;
UINT padAmount;
bool bTwoRowsPadding = false;
if (leftOver < 56) {
padAmount = 56 - leftOver;
} else {
padAmount = 120 - leftOver;
bTwoRowsPadding = true;
}
UINT padAmountPlusSize = padAmount + 8;
UINT state[4] = {0x67452301, 0xefcdab89, 0x98badcfe, 0x10325476};
UINT N = (byteCount + padAmountPlusSize) >> 6;
UINT offset = 0;
UINT NextEndState = bTwoRowsPadding ? N - 2 : N - 1;
const BYTE *pCurrData = pData;
for (UINT i = 0; i < N; i++, offset += 64, pCurrData += 64) {
UINT x[16];
const UINT *pX;
if (i == NextEndState) {
if (!bTwoRowsPadding && i == N - 1) {
UINT remainder = byteCount - offset;
x[0] = byteCount << 3;
assert(byteCount - offset <= byteCount); // check for underflow
assert(pCurrData + remainder == pData + byteCount);
memcpy((BYTE *)x + 4, pCurrData, remainder); // could copy nothing
memcpy((BYTE *)x + 4 + remainder, padding, padAmount);
x[15] = 1 | (byteCount << 1);
} else if (bTwoRowsPadding) {
if (i == N - 2) {
UINT remainder = byteCount - offset;
assert(byteCount - offset <= byteCount); // check for underflow
assert(pCurrData + remainder == pData + byteCount);
memcpy(x, pCurrData, remainder);
memcpy((BYTE *)x + remainder, padding, padAmount - 56);
NextEndState = N - 1;
} else if (i == N - 1) {
x[0] = byteCount << 3;
memcpy((BYTE *)x + 4, padding + padAmount - 56, 56);
x[15] = 1 | (byteCount << 1);
}
}
pX = x;
} else {
assert(pCurrData + 64 <= pData + byteCount);
pX = (const UINT *)pCurrData;
}
UINT a = state[0];
UINT b = state[1];
UINT c = state[2];
UINT d = state[3];
/* Round 1 */
FF(a, b, c, d, pX[0], S11, 0xd76aa478); /* 1 */
FF(d, a, b, c, pX[1], S12, 0xe8c7b756); /* 2 */
FF(c, d, a, b, pX[2], S13, 0x242070db); /* 3 */
FF(b, c, d, a, pX[3], S14, 0xc1bdceee); /* 4 */
FF(a, b, c, d, pX[4], S11, 0xf57c0faf); /* 5 */
FF(d, a, b, c, pX[5], S12, 0x4787c62a); /* 6 */
FF(c, d, a, b, pX[6], S13, 0xa8304613); /* 7 */
FF(b, c, d, a, pX[7], S14, 0xfd469501); /* 8 */
FF(a, b, c, d, pX[8], S11, 0x698098d8); /* 9 */
FF(d, a, b, c, pX[9], S12, 0x8b44f7af); /* 10 */
FF(c, d, a, b, pX[10], S13, 0xffff5bb1); /* 11 */
FF(b, c, d, a, pX[11], S14, 0x895cd7be); /* 12 */
FF(a, b, c, d, pX[12], S11, 0x6b901122); /* 13 */
FF(d, a, b, c, pX[13], S12, 0xfd987193); /* 14 */
FF(c, d, a, b, pX[14], S13, 0xa679438e); /* 15 */
FF(b, c, d, a, pX[15], S14, 0x49b40821); /* 16 */
/* Round 2 */
GG(a, b, c, d, pX[1], S21, 0xf61e2562); /* 17 */
GG(d, a, b, c, pX[6], S22, 0xc040b340); /* 18 */
GG(c, d, a, b, pX[11], S23, 0x265e5a51); /* 19 */
GG(b, c, d, a, pX[0], S24, 0xe9b6c7aa); /* 20 */
GG(a, b, c, d, pX[5], S21, 0xd62f105d); /* 21 */
GG(d, a, b, c, pX[10], S22, 0x2441453); /* 22 */
GG(c, d, a, b, pX[15], S23, 0xd8a1e681); /* 23 */
GG(b, c, d, a, pX[4], S24, 0xe7d3fbc8); /* 24 */
GG(a, b, c, d, pX[9], S21, 0x21e1cde6); /* 25 */
GG(d, a, b, c, pX[14], S22, 0xc33707d6); /* 26 */
GG(c, d, a, b, pX[3], S23, 0xf4d50d87); /* 27 */
GG(b, c, d, a, pX[8], S24, 0x455a14ed); /* 28 */
GG(a, b, c, d, pX[13], S21, 0xa9e3e905); /* 29 */
GG(d, a, b, c, pX[2], S22, 0xfcefa3f8); /* 30 */
GG(c, d, a, b, pX[7], S23, 0x676f02d9); /* 31 */
GG(b, c, d, a, pX[12], S24, 0x8d2a4c8a); /* 32 */
/* Round 3 */
HH(a, b, c, d, pX[5], S31, 0xfffa3942); /* 33 */
HH(d, a, b, c, pX[8], S32, 0x8771f681); /* 34 */
HH(c, d, a, b, pX[11], S33, 0x6d9d6122); /* 35 */
HH(b, c, d, a, pX[14], S34, 0xfde5380c); /* 36 */
HH(a, b, c, d, pX[1], S31, 0xa4beea44); /* 37 */
HH(d, a, b, c, pX[4], S32, 0x4bdecfa9); /* 38 */
HH(c, d, a, b, pX[7], S33, 0xf6bb4b60); /* 39 */
HH(b, c, d, a, pX[10], S34, 0xbebfbc70); /* 40 */
HH(a, b, c, d, pX[13], S31, 0x289b7ec6); /* 41 */
HH(d, a, b, c, pX[0], S32, 0xeaa127fa); /* 42 */
HH(c, d, a, b, pX[3], S33, 0xd4ef3085); /* 43 */
HH(b, c, d, a, pX[6], S34, 0x4881d05); /* 44 */
HH(a, b, c, d, pX[9], S31, 0xd9d4d039); /* 45 */
HH(d, a, b, c, pX[12], S32, 0xe6db99e5); /* 46 */
HH(c, d, a, b, pX[15], S33, 0x1fa27cf8); /* 47 */
HH(b, c, d, a, pX[2], S34, 0xc4ac5665); /* 48 */
/* Round 4 */
II(a, b, c, d, pX[0], S41, 0xf4292244); /* 49 */
II(d, a, b, c, pX[7], S42, 0x432aff97); /* 50 */
II(c, d, a, b, pX[14], S43, 0xab9423a7); /* 51 */
II(b, c, d, a, pX[5], S44, 0xfc93a039); /* 52 */
II(a, b, c, d, pX[12], S41, 0x655b59c3); /* 53 */
II(d, a, b, c, pX[3], S42, 0x8f0ccc92); /* 54 */
II(c, d, a, b, pX[10], S43, 0xffeff47d); /* 55 */
II(b, c, d, a, pX[1], S44, 0x85845dd1); /* 56 */
II(a, b, c, d, pX[8], S41, 0x6fa87e4f); /* 57 */
II(d, a, b, c, pX[15], S42, 0xfe2ce6e0); /* 58 */
II(c, d, a, b, pX[6], S43, 0xa3014314); /* 59 */
II(b, c, d, a, pX[13], S44, 0x4e0811a1); /* 60 */
II(a, b, c, d, pX[4], S41, 0xf7537e82); /* 61 */
II(d, a, b, c, pX[11], S42, 0xbd3af235); /* 62 */
II(c, d, a, b, pX[2], S43, 0x2ad7d2bb); /* 63 */
II(b, c, d, a, pX[9], S44, 0xeb86d391); /* 64 */
state[0] += a;
state[1] += b;
state[2] += c;
state[3] += d;
}
memcpy(pOutHash, state, 16);
}
// **************************************************************************************
// **** DO NOT USE THESE ROUTINES TO PROVIDE FUNCTIONALITY THAT NEEDS TO BE
// SECURE!!! ***
// **************************************************************************************
void ComputeHashDebug(const BYTE *pData, UINT byteCount, BYTE *pOutHash) {
UINT leftOver = byteCount & 0x3f;
UINT padAmount;
bool bTwoRowsPadding = false;
if (leftOver < 56) {
padAmount = 56 - leftOver;
} else {
padAmount = 120 - leftOver;
bTwoRowsPadding = true;
}
UINT padAmountPlusSize = padAmount + 8;
UINT state[4] = {0x67452301, 0xefcdab89, 0x98badcfe, 0x10325476};
UINT N = (byteCount + padAmountPlusSize) >> 6;
UINT offset = 0;
UINT NextEndState = bTwoRowsPadding ? N - 2 : N - 1;
const BYTE *pCurrData = pData;
for (UINT i = 0; i < N; i++, offset += 64, pCurrData += 64) {
UINT x[16];
const UINT *pX;
if (i == NextEndState) {
if (!bTwoRowsPadding && i == N - 1) {
UINT remainder = byteCount - offset;
x[0] = byteCount << 4 | 0xf;
assert(byteCount - offset <= byteCount); // check for underflow
assert(pCurrData + remainder == pData + byteCount);
memcpy((BYTE *)x + 4, pCurrData, remainder); // could copy nothing
memcpy((BYTE *)x + 4 + remainder, padding, padAmount);
x[15] = (byteCount << 2) | 0x10000000;
} else if (bTwoRowsPadding) {
if (i == N - 2) {
UINT remainder = byteCount - offset;
assert(byteCount - offset <= byteCount); // check for underflow
assert(pCurrData + remainder == pData + byteCount);
memcpy(x, pCurrData, remainder);
memcpy((BYTE *)x + remainder, padding, padAmount - 56);
NextEndState = N - 1;
} else if (i == N - 1) {
x[0] = byteCount << 4 | 0xf;
memcpy((BYTE *)x + 4, padding + padAmount - 56, 56);
x[15] = (byteCount << 2) | 0x10000000;
}
}
pX = x;
} else {
assert(pCurrData + 64 <= pData + byteCount);
pX = (const UINT *)pCurrData;
}
UINT a = state[0];
UINT b = state[1];
UINT c = state[2];
UINT d = state[3];
/* Round 1 */
FF(a, b, c, d, pX[0], S11, 0xd76aa478); /* 1 */
FF(d, a, b, c, pX[1], S12, 0xe8c7b756); /* 2 */
FF(c, d, a, b, pX[2], S13, 0x242070db); /* 3 */
FF(b, c, d, a, pX[3], S14, 0xc1bdceee); /* 4 */
FF(a, b, c, d, pX[4], S11, 0xf57c0faf); /* 5 */
FF(d, a, b, c, pX[5], S12, 0x4787c62a); /* 6 */
FF(c, d, a, b, pX[6], S13, 0xa8304613); /* 7 */
FF(b, c, d, a, pX[7], S14, 0xfd469501); /* 8 */
FF(a, b, c, d, pX[8], S11, 0x698098d8); /* 9 */
FF(d, a, b, c, pX[9], S12, 0x8b44f7af); /* 10 */
FF(c, d, a, b, pX[10], S13, 0xffff5bb1); /* 11 */
FF(b, c, d, a, pX[11], S14, 0x895cd7be); /* 12 */
FF(a, b, c, d, pX[12], S11, 0x6b901122); /* 13 */
FF(d, a, b, c, pX[13], S12, 0xfd987193); /* 14 */
FF(c, d, a, b, pX[14], S13, 0xa679438e); /* 15 */
FF(b, c, d, a, pX[15], S14, 0x49b40821); /* 16 */
/* Round 2 */
GG(a, b, c, d, pX[1], S21, 0xf61e2562); /* 17 */
GG(d, a, b, c, pX[6], S22, 0xc040b340); /* 18 */
GG(c, d, a, b, pX[11], S23, 0x265e5a51); /* 19 */
GG(b, c, d, a, pX[0], S24, 0xe9b6c7aa); /* 20 */
GG(a, b, c, d, pX[5], S21, 0xd62f105d); /* 21 */
GG(d, a, b, c, pX[10], S22, 0x2441453); /* 22 */
GG(c, d, a, b, pX[15], S23, 0xd8a1e681); /* 23 */
GG(b, c, d, a, pX[4], S24, 0xe7d3fbc8); /* 24 */
GG(a, b, c, d, pX[9], S21, 0x21e1cde6); /* 25 */
GG(d, a, b, c, pX[14], S22, 0xc33707d6); /* 26 */
GG(c, d, a, b, pX[3], S23, 0xf4d50d87); /* 27 */
GG(b, c, d, a, pX[8], S24, 0x455a14ed); /* 28 */
GG(a, b, c, d, pX[13], S21, 0xa9e3e905); /* 29 */
GG(d, a, b, c, pX[2], S22, 0xfcefa3f8); /* 30 */
GG(c, d, a, b, pX[7], S23, 0x676f02d9); /* 31 */
GG(b, c, d, a, pX[12], S24, 0x8d2a4c8a); /* 32 */
/* Round 3 */
HH(a, b, c, d, pX[5], S31, 0xfffa3942); /* 33 */
HH(d, a, b, c, pX[8], S32, 0x8771f681); /* 34 */
HH(c, d, a, b, pX[11], S33, 0x6d9d6122); /* 35 */
HH(b, c, d, a, pX[14], S34, 0xfde5380c); /* 36 */
HH(a, b, c, d, pX[1], S31, 0xa4beea44); /* 37 */
HH(d, a, b, c, pX[4], S32, 0x4bdecfa9); /* 38 */
HH(c, d, a, b, pX[7], S33, 0xf6bb4b60); /* 39 */
HH(b, c, d, a, pX[10], S34, 0xbebfbc70); /* 40 */
HH(a, b, c, d, pX[13], S31, 0x289b7ec6); /* 41 */
HH(d, a, b, c, pX[0], S32, 0xeaa127fa); /* 42 */
HH(c, d, a, b, pX[3], S33, 0xd4ef3085); /* 43 */
HH(b, c, d, a, pX[6], S34, 0x4881d05); /* 44 */
HH(a, b, c, d, pX[9], S31, 0xd9d4d039); /* 45 */
HH(d, a, b, c, pX[12], S32, 0xe6db99e5); /* 46 */
HH(c, d, a, b, pX[15], S33, 0x1fa27cf8); /* 47 */
HH(b, c, d, a, pX[2], S34, 0xc4ac5665); /* 48 */
/* Round 4 */
II(a, b, c, d, pX[0], S41, 0xf4292244); /* 49 */
II(d, a, b, c, pX[7], S42, 0x432aff97); /* 50 */
II(c, d, a, b, pX[14], S43, 0xab9423a7); /* 51 */
II(b, c, d, a, pX[5], S44, 0xfc93a039); /* 52 */
II(a, b, c, d, pX[12], S41, 0x655b59c3); /* 53 */
II(d, a, b, c, pX[3], S42, 0x8f0ccc92); /* 54 */
II(c, d, a, b, pX[10], S43, 0xffeff47d); /* 55 */
II(b, c, d, a, pX[1], S44, 0x85845dd1); /* 56 */
II(a, b, c, d, pX[8], S41, 0x6fa87e4f); /* 57 */
II(d, a, b, c, pX[15], S42, 0xfe2ce6e0); /* 58 */
II(c, d, a, b, pX[6], S43, 0xa3014314); /* 59 */
II(b, c, d, a, pX[13], S44, 0x4e0811a1); /* 60 */
II(a, b, c, d, pX[4], S41, 0xf7537e82); /* 61 */
II(d, a, b, c, pX[11], S42, 0xbd3af235); /* 62 */
II(c, d, a, b, pX[2], S43, 0x2ad7d2bb); /* 63 */
II(b, c, d, a, pX[9], S44, 0xeb86d391); /* 64 */
state[0] += a;
state[1] += b;
state[2] += c;
state[3] += d;
}
memcpy(pOutHash, state, 16);
}
// **************************************************************************************
// **** DO NOT USE THESE ROUTINES TO PROVIDE FUNCTIONALITY THAT NEEDS TO BE
// SECURE!!! ***
// **************************************************************************************
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/ExecutionEngine/ExecutionEngineBindings.cpp | //===-- ExecutionEngineBindings.cpp - C bindings for EEs ------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines the C bindings for the ExecutionEngine library.
//
//===----------------------------------------------------------------------===//
#include "llvm-c/ExecutionEngine.h"
#include "llvm/ExecutionEngine/ExecutionEngine.h"
#include "llvm/ExecutionEngine/GenericValue.h"
#include "llvm/ExecutionEngine/RTDyldMemoryManager.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Module.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Target/TargetOptions.h"
#include <cstring>
using namespace llvm;
#define DEBUG_TYPE "jit"
// Wrapping the C bindings types.
DEFINE_SIMPLE_CONVERSION_FUNCTIONS(GenericValue, LLVMGenericValueRef)
inline LLVMTargetMachineRef wrap(const TargetMachine *P) {
return
reinterpret_cast<LLVMTargetMachineRef>(const_cast<TargetMachine*>(P));
}
/*===-- Operations on generic values --------------------------------------===*/
LLVMGenericValueRef LLVMCreateGenericValueOfInt(LLVMTypeRef Ty,
unsigned long long N,
LLVMBool IsSigned) {
GenericValue *GenVal = new GenericValue();
GenVal->IntVal = APInt(unwrap<IntegerType>(Ty)->getBitWidth(), N, IsSigned);
return wrap(GenVal);
}
LLVMGenericValueRef LLVMCreateGenericValueOfPointer(void *P) {
GenericValue *GenVal = new GenericValue();
GenVal->PointerVal = P;
return wrap(GenVal);
}
LLVMGenericValueRef LLVMCreateGenericValueOfFloat(LLVMTypeRef TyRef, double N) {
GenericValue *GenVal = new GenericValue();
switch (unwrap(TyRef)->getTypeID()) {
case Type::FloatTyID:
GenVal->FloatVal = N;
break;
case Type::DoubleTyID:
GenVal->DoubleVal = N;
break;
default:
llvm_unreachable("LLVMGenericValueToFloat supports only float and double.");
}
return wrap(GenVal);
}
unsigned LLVMGenericValueIntWidth(LLVMGenericValueRef GenValRef) {
return unwrap(GenValRef)->IntVal.getBitWidth();
}
unsigned long long LLVMGenericValueToInt(LLVMGenericValueRef GenValRef,
LLVMBool IsSigned) {
GenericValue *GenVal = unwrap(GenValRef);
if (IsSigned)
return GenVal->IntVal.getSExtValue();
else
return GenVal->IntVal.getZExtValue();
}
void *LLVMGenericValueToPointer(LLVMGenericValueRef GenVal) {
return unwrap(GenVal)->PointerVal;
}
double LLVMGenericValueToFloat(LLVMTypeRef TyRef, LLVMGenericValueRef GenVal) {
switch (unwrap(TyRef)->getTypeID()) {
case Type::FloatTyID:
return unwrap(GenVal)->FloatVal;
case Type::DoubleTyID:
return unwrap(GenVal)->DoubleVal;
default:
llvm_unreachable("LLVMGenericValueToFloat supports only float and double.");
}
}
void LLVMDisposeGenericValue(LLVMGenericValueRef GenVal) {
delete unwrap(GenVal);
}
/*===-- Operations on execution engines -----------------------------------===*/
LLVMBool LLVMCreateExecutionEngineForModule(LLVMExecutionEngineRef *OutEE,
LLVMModuleRef M,
char **OutError) {
std::string Error;
EngineBuilder builder(std::unique_ptr<Module>(unwrap(M)));
builder.setEngineKind(EngineKind::Either)
.setErrorStr(&Error);
if (ExecutionEngine *EE = builder.create()){
*OutEE = wrap(EE);
return 0;
}
*OutError = strdup(Error.c_str());
return 1;
}
LLVMBool LLVMCreateInterpreterForModule(LLVMExecutionEngineRef *OutInterp,
LLVMModuleRef M,
char **OutError) {
std::string Error;
EngineBuilder builder(std::unique_ptr<Module>(unwrap(M)));
builder.setEngineKind(EngineKind::Interpreter)
.setErrorStr(&Error);
if (ExecutionEngine *Interp = builder.create()) {
*OutInterp = wrap(Interp);
return 0;
}
*OutError = strdup(Error.c_str());
return 1;
}
LLVMBool LLVMCreateJITCompilerForModule(LLVMExecutionEngineRef *OutJIT,
LLVMModuleRef M,
unsigned OptLevel,
char **OutError) {
std::string Error;
EngineBuilder builder(std::unique_ptr<Module>(unwrap(M)));
builder.setEngineKind(EngineKind::JIT)
.setErrorStr(&Error)
.setOptLevel((CodeGenOpt::Level)OptLevel);
if (ExecutionEngine *JIT = builder.create()) {
*OutJIT = wrap(JIT);
return 0;
}
*OutError = strdup(Error.c_str());
return 1;
}
void LLVMInitializeMCJITCompilerOptions(LLVMMCJITCompilerOptions *PassedOptions,
size_t SizeOfPassedOptions) {
LLVMMCJITCompilerOptions options;
memset(&options, 0, sizeof(options)); // Most fields are zero by default.
options.CodeModel = LLVMCodeModelJITDefault;
memcpy(PassedOptions, &options,
std::min(sizeof(options), SizeOfPassedOptions));
}
LLVMBool LLVMCreateMCJITCompilerForModule(
LLVMExecutionEngineRef *OutJIT, LLVMModuleRef M,
LLVMMCJITCompilerOptions *PassedOptions, size_t SizeOfPassedOptions,
char **OutError) {
LLVMMCJITCompilerOptions options;
// If the user passed a larger sized options struct, then they were compiled
// against a newer LLVM. Tell them that something is wrong.
if (SizeOfPassedOptions > sizeof(options)) {
*OutError = strdup(
"Refusing to use options struct that is larger than my own; assuming "
"LLVM library mismatch.");
return 1;
}
// Defend against the user having an old version of the API by ensuring that
// any fields they didn't see are cleared. We must defend against fields being
// set to the bitwise equivalent of zero, and assume that this means "do the
// default" as if that option hadn't been available.
LLVMInitializeMCJITCompilerOptions(&options, sizeof(options));
memcpy(&options, PassedOptions, SizeOfPassedOptions);
TargetOptions targetOptions;
targetOptions.EnableFastISel = options.EnableFastISel;
std::unique_ptr<Module> Mod(unwrap(M));
if (Mod)
// Set function attribute "no-frame-pointer-elim" based on
// NoFramePointerElim.
for (auto &F : *Mod) {
auto Attrs = F.getAttributes();
auto Value = options.NoFramePointerElim ? "true" : "false";
Attrs = Attrs.addAttribute(F.getContext(), AttributeSet::FunctionIndex,
"no-frame-pointer-elim", Value);
F.setAttributes(Attrs);
}
std::string Error;
EngineBuilder builder(std::move(Mod));
builder.setEngineKind(EngineKind::JIT)
.setErrorStr(&Error)
.setOptLevel((CodeGenOpt::Level)options.OptLevel)
.setCodeModel(unwrap(options.CodeModel))
.setTargetOptions(targetOptions);
if (options.MCJMM)
builder.setMCJITMemoryManager(
std::unique_ptr<RTDyldMemoryManager>(unwrap(options.MCJMM)));
if (ExecutionEngine *JIT = builder.create()) {
*OutJIT = wrap(JIT);
return 0;
}
*OutError = strdup(Error.c_str());
return 1;
}
LLVMBool LLVMCreateExecutionEngine(LLVMExecutionEngineRef *OutEE,
LLVMModuleProviderRef MP,
char **OutError) {
/* The module provider is now actually a module. */
return LLVMCreateExecutionEngineForModule(OutEE,
reinterpret_cast<LLVMModuleRef>(MP),
OutError);
}
LLVMBool LLVMCreateInterpreter(LLVMExecutionEngineRef *OutInterp,
LLVMModuleProviderRef MP,
char **OutError) {
/* The module provider is now actually a module. */
return LLVMCreateInterpreterForModule(OutInterp,
reinterpret_cast<LLVMModuleRef>(MP),
OutError);
}
LLVMBool LLVMCreateJITCompiler(LLVMExecutionEngineRef *OutJIT,
LLVMModuleProviderRef MP,
unsigned OptLevel,
char **OutError) {
/* The module provider is now actually a module. */
return LLVMCreateJITCompilerForModule(OutJIT,
reinterpret_cast<LLVMModuleRef>(MP),
OptLevel, OutError);
}
void LLVMDisposeExecutionEngine(LLVMExecutionEngineRef EE) {
delete unwrap(EE);
}
void LLVMRunStaticConstructors(LLVMExecutionEngineRef EE) {
unwrap(EE)->runStaticConstructorsDestructors(false);
}
void LLVMRunStaticDestructors(LLVMExecutionEngineRef EE) {
unwrap(EE)->runStaticConstructorsDestructors(true);
}
int LLVMRunFunctionAsMain(LLVMExecutionEngineRef EE, LLVMValueRef F,
unsigned ArgC, const char * const *ArgV,
const char * const *EnvP) {
unwrap(EE)->finalizeObject();
std::vector<std::string> ArgVec(ArgV, ArgV + ArgC);
return unwrap(EE)->runFunctionAsMain(unwrap<Function>(F), ArgVec, EnvP);
}
LLVMGenericValueRef LLVMRunFunction(LLVMExecutionEngineRef EE, LLVMValueRef F,
unsigned NumArgs,
LLVMGenericValueRef *Args) {
unwrap(EE)->finalizeObject();
std::vector<GenericValue> ArgVec;
ArgVec.reserve(NumArgs);
for (unsigned I = 0; I != NumArgs; ++I)
ArgVec.push_back(*unwrap(Args[I]));
GenericValue *Result = new GenericValue();
*Result = unwrap(EE)->runFunction(unwrap<Function>(F), ArgVec);
return wrap(Result);
}
void LLVMFreeMachineCodeForFunction(LLVMExecutionEngineRef EE, LLVMValueRef F) {
}
void LLVMAddModule(LLVMExecutionEngineRef EE, LLVMModuleRef M){
unwrap(EE)->addModule(std::unique_ptr<Module>(unwrap(M)));
}
void LLVMAddModuleProvider(LLVMExecutionEngineRef EE, LLVMModuleProviderRef MP){
/* The module provider is now actually a module. */
LLVMAddModule(EE, reinterpret_cast<LLVMModuleRef>(MP));
}
LLVMBool LLVMRemoveModule(LLVMExecutionEngineRef EE, LLVMModuleRef M,
LLVMModuleRef *OutMod, char **OutError) {
Module *Mod = unwrap(M);
unwrap(EE)->removeModule(Mod);
*OutMod = wrap(Mod);
return 0;
}
LLVMBool LLVMRemoveModuleProvider(LLVMExecutionEngineRef EE,
LLVMModuleProviderRef MP,
LLVMModuleRef *OutMod, char **OutError) {
/* The module provider is now actually a module. */
return LLVMRemoveModule(EE, reinterpret_cast<LLVMModuleRef>(MP), OutMod,
OutError);
}
LLVMBool LLVMFindFunction(LLVMExecutionEngineRef EE, const char *Name,
LLVMValueRef *OutFn) {
if (Function *F = unwrap(EE)->FindFunctionNamed(Name)) {
*OutFn = wrap(F);
return 0;
}
return 1;
}
void *LLVMRecompileAndRelinkFunction(LLVMExecutionEngineRef EE,
LLVMValueRef Fn) {
return nullptr;
}
LLVMTargetDataRef LLVMGetExecutionEngineTargetData(LLVMExecutionEngineRef EE) {
return wrap(unwrap(EE)->getDataLayout());
}
LLVMTargetMachineRef
LLVMGetExecutionEngineTargetMachine(LLVMExecutionEngineRef EE) {
return wrap(unwrap(EE)->getTargetMachine());
}
void LLVMAddGlobalMapping(LLVMExecutionEngineRef EE, LLVMValueRef Global,
void* Addr) {
unwrap(EE)->addGlobalMapping(unwrap<GlobalValue>(Global), Addr);
}
void *LLVMGetPointerToGlobal(LLVMExecutionEngineRef EE, LLVMValueRef Global) {
unwrap(EE)->finalizeObject();
return unwrap(EE)->getPointerToGlobal(unwrap<GlobalValue>(Global));
}
uint64_t LLVMGetGlobalValueAddress(LLVMExecutionEngineRef EE, const char *Name) {
return unwrap(EE)->getGlobalValueAddress(Name);
}
uint64_t LLVMGetFunctionAddress(LLVMExecutionEngineRef EE, const char *Name) {
return unwrap(EE)->getFunctionAddress(Name);
}
/*===-- Operations on memory managers -------------------------------------===*/
namespace {
struct SimpleBindingMMFunctions {
LLVMMemoryManagerAllocateCodeSectionCallback AllocateCodeSection;
LLVMMemoryManagerAllocateDataSectionCallback AllocateDataSection;
LLVMMemoryManagerFinalizeMemoryCallback FinalizeMemory;
LLVMMemoryManagerDestroyCallback Destroy;
};
class SimpleBindingMemoryManager : public RTDyldMemoryManager {
public:
SimpleBindingMemoryManager(const SimpleBindingMMFunctions& Functions,
void *Opaque);
~SimpleBindingMemoryManager() override;
uint8_t *allocateCodeSection(uintptr_t Size, unsigned Alignment,
unsigned SectionID,
StringRef SectionName) override;
uint8_t *allocateDataSection(uintptr_t Size, unsigned Alignment,
unsigned SectionID, StringRef SectionName,
bool isReadOnly) override;
bool finalizeMemory(std::string *ErrMsg) override;
private:
SimpleBindingMMFunctions Functions;
void *Opaque;
};
SimpleBindingMemoryManager::SimpleBindingMemoryManager(
const SimpleBindingMMFunctions& Functions,
void *Opaque)
: Functions(Functions), Opaque(Opaque) {
assert(Functions.AllocateCodeSection &&
"No AllocateCodeSection function provided!");
assert(Functions.AllocateDataSection &&
"No AllocateDataSection function provided!");
assert(Functions.FinalizeMemory &&
"No FinalizeMemory function provided!");
assert(Functions.Destroy &&
"No Destroy function provided!");
}
SimpleBindingMemoryManager::~SimpleBindingMemoryManager() {
Functions.Destroy(Opaque);
}
uint8_t *SimpleBindingMemoryManager::allocateCodeSection(
uintptr_t Size, unsigned Alignment, unsigned SectionID,
StringRef SectionName) {
return Functions.AllocateCodeSection(Opaque, Size, Alignment, SectionID,
SectionName.str().c_str());
}
uint8_t *SimpleBindingMemoryManager::allocateDataSection(
uintptr_t Size, unsigned Alignment, unsigned SectionID,
StringRef SectionName, bool isReadOnly) {
return Functions.AllocateDataSection(Opaque, Size, Alignment, SectionID,
SectionName.str().c_str(),
isReadOnly);
}
bool SimpleBindingMemoryManager::finalizeMemory(std::string *ErrMsg) {
char *errMsgCString = nullptr;
bool result = Functions.FinalizeMemory(Opaque, &errMsgCString);
assert((result || !errMsgCString) &&
"Did not expect an error message if FinalizeMemory succeeded");
if (errMsgCString) {
if (ErrMsg)
*ErrMsg = errMsgCString;
free(errMsgCString);
}
return result;
}
} // anonymous namespace
LLVMMCJITMemoryManagerRef LLVMCreateSimpleMCJITMemoryManager(
void *Opaque,
LLVMMemoryManagerAllocateCodeSectionCallback AllocateCodeSection,
LLVMMemoryManagerAllocateDataSectionCallback AllocateDataSection,
LLVMMemoryManagerFinalizeMemoryCallback FinalizeMemory,
LLVMMemoryManagerDestroyCallback Destroy) {
if (!AllocateCodeSection || !AllocateDataSection || !FinalizeMemory ||
!Destroy)
return nullptr;
SimpleBindingMMFunctions functions;
functions.AllocateCodeSection = AllocateCodeSection;
functions.AllocateDataSection = AllocateDataSection;
functions.FinalizeMemory = FinalizeMemory;
functions.Destroy = Destroy;
return wrap(new SimpleBindingMemoryManager(functions, Opaque));
}
void LLVMDisposeMCJITMemoryManager(LLVMMCJITMemoryManagerRef MM) {
delete unwrap(MM);
}
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/ExecutionEngine/TargetSelect.cpp | //===-- TargetSelect.cpp - Target Chooser Code ----------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This just asks the TargetRegistry for the appropriate target to use, and
// allows the user to specify a specific one on the commandline with -march=x,
// -mcpu=y, and -mattr=a,-b,+c. Clients should initialize targets prior to
// calling selectTarget().
//
//===----------------------------------------------------------------------===//
#include "llvm/ExecutionEngine/ExecutionEngine.h"
#include "llvm/ADT/Triple.h"
#include "llvm/IR/Module.h"
#include "llvm/MC/SubtargetFeature.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Host.h"
#include "llvm/Support/TargetRegistry.h"
#include "llvm/Target/TargetMachine.h"
using namespace llvm;
TargetMachine *EngineBuilder::selectTarget() {
Triple TT;
// MCJIT can generate code for remote targets, but the old JIT and Interpreter
// must use the host architecture.
if (WhichEngine != EngineKind::Interpreter && M)
TT.setTriple(M->getTargetTriple());
return selectTarget(TT, MArch, MCPU, MAttrs);
}
/// selectTarget - Pick a target either via -march or by guessing the native
/// arch. Add any CPU features specified via -mcpu or -mattr.
TargetMachine *EngineBuilder::selectTarget(const Triple &TargetTriple,
StringRef MArch,
StringRef MCPU,
const SmallVectorImpl<std::string>& MAttrs) {
Triple TheTriple(TargetTriple);
if (TheTriple.getTriple().empty())
TheTriple.setTriple(sys::getProcessTriple());
// Adjust the triple to match what the user requested.
const Target *TheTarget = nullptr;
if (!MArch.empty()) {
auto I = std::find_if(
TargetRegistry::targets().begin(), TargetRegistry::targets().end(),
[&](const Target &T) { return MArch == T.getName(); });
if (I == TargetRegistry::targets().end()) {
if (ErrorStr)
*ErrorStr = "No available targets are compatible with this -march, "
"see -version for the available targets.\n";
return nullptr;
}
TheTarget = &*I;
// Adjust the triple to match (if known), otherwise stick with the
// requested/host triple.
Triple::ArchType Type = Triple::getArchTypeForLLVMName(MArch);
if (Type != Triple::UnknownArch)
TheTriple.setArch(Type);
} else {
std::string Error;
TheTarget = TargetRegistry::lookupTarget(TheTriple.getTriple(), Error);
if (!TheTarget) {
if (ErrorStr)
*ErrorStr = Error;
return nullptr;
}
}
// Package up features to be passed to target/subtarget
std::string FeaturesStr;
if (!MAttrs.empty()) {
SubtargetFeatures Features;
for (unsigned i = 0; i != MAttrs.size(); ++i)
Features.AddFeature(MAttrs[i]);
FeaturesStr = Features.getString();
}
// FIXME: non-iOS ARM FastISel is broken with MCJIT.
if (TheTriple.getArch() == Triple::arm &&
!TheTriple.isiOS() &&
OptLevel == CodeGenOpt::None) {
OptLevel = CodeGenOpt::Less;
}
// Allocate a target...
TargetMachine *Target = TheTarget->createTargetMachine(TheTriple.getTriple(),
MCPU, FeaturesStr,
Options,
RelocModel, CMModel,
OptLevel);
assert(Target && "Could not allocate target machine!");
return Target;
}
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/ExecutionEngine/SectionMemoryManager.cpp | //===- SectionMemoryManager.cpp - Memory manager for MCJIT/RtDyld *- C++ -*-==//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements the section-based memory manager used by the MCJIT
// execution engine and RuntimeDyld
//
//===----------------------------------------------------------------------===//
#include "llvm/Config/config.h"
#include "llvm/ExecutionEngine/SectionMemoryManager.h"
#include "llvm/Support/MathExtras.h"
namespace llvm {
uint8_t *SectionMemoryManager::allocateDataSection(uintptr_t Size,
unsigned Alignment,
unsigned SectionID,
StringRef SectionName,
bool IsReadOnly) {
if (IsReadOnly)
return allocateSection(RODataMem, Size, Alignment);
return allocateSection(RWDataMem, Size, Alignment);
}
uint8_t *SectionMemoryManager::allocateCodeSection(uintptr_t Size,
unsigned Alignment,
unsigned SectionID,
StringRef SectionName) {
return allocateSection(CodeMem, Size, Alignment);
}
uint8_t *SectionMemoryManager::allocateSection(MemoryGroup &MemGroup,
uintptr_t Size,
unsigned Alignment) {
if (!Alignment)
Alignment = 16;
assert(!(Alignment & (Alignment - 1)) && "Alignment must be a power of two.");
uintptr_t RequiredSize = Alignment * ((Size + Alignment - 1)/Alignment + 1);
uintptr_t Addr = 0;
// Look in the list of free memory regions and use a block there if one
// is available.
for (int i = 0, e = MemGroup.FreeMem.size(); i != e; ++i) {
sys::MemoryBlock &MB = MemGroup.FreeMem[i];
if (MB.size() >= RequiredSize) {
Addr = (uintptr_t)MB.base();
uintptr_t EndOfBlock = Addr + MB.size();
// Align the address.
Addr = (Addr + Alignment - 1) & ~(uintptr_t)(Alignment - 1);
// Store cutted free memory block.
MemGroup.FreeMem[i] = sys::MemoryBlock((void*)(Addr + Size),
EndOfBlock - Addr - Size);
return (uint8_t*)Addr;
}
}
// No pre-allocated free block was large enough. Allocate a new memory region.
// Note that all sections get allocated as read-write. The permissions will
// be updated later based on memory group.
//
// FIXME: It would be useful to define a default allocation size (or add
// it as a constructor parameter) to minimize the number of allocations.
//
// FIXME: Initialize the Near member for each memory group to avoid
// interleaving.
std::error_code ec;
sys::MemoryBlock MB = sys::Memory::allocateMappedMemory(RequiredSize,
&MemGroup.Near,
sys::Memory::MF_READ |
sys::Memory::MF_WRITE,
ec);
if (ec) {
// FIXME: Add error propagation to the interface.
return nullptr;
}
// Save this address as the basis for our next request
MemGroup.Near = MB;
MemGroup.AllocatedMem.push_back(MB);
Addr = (uintptr_t)MB.base();
uintptr_t EndOfBlock = Addr + MB.size();
// Align the address.
Addr = (Addr + Alignment - 1) & ~(uintptr_t)(Alignment - 1);
// The allocateMappedMemory may allocate much more memory than we need. In
// this case, we store the unused memory as a free memory block.
unsigned FreeSize = EndOfBlock-Addr-Size;
if (FreeSize > 16)
MemGroup.FreeMem.push_back(sys::MemoryBlock((void*)(Addr + Size), FreeSize));
// Return aligned address
return (uint8_t*)Addr;
}
bool SectionMemoryManager::finalizeMemory(std::string *ErrMsg)
{
// FIXME: Should in-progress permissions be reverted if an error occurs?
std::error_code ec;
// Don't allow free memory blocks to be used after setting protection flags.
CodeMem.FreeMem.clear();
// Make code memory executable.
ec = applyMemoryGroupPermissions(CodeMem,
sys::Memory::MF_READ | sys::Memory::MF_EXEC);
if (ec) {
if (ErrMsg) {
*ErrMsg = ec.message();
}
return true;
}
// Don't allow free memory blocks to be used after setting protection flags.
RODataMem.FreeMem.clear();
// Make read-only data memory read-only.
ec = applyMemoryGroupPermissions(RODataMem,
sys::Memory::MF_READ | sys::Memory::MF_EXEC);
if (ec) {
if (ErrMsg) {
*ErrMsg = ec.message();
}
return true;
}
// Read-write data memory already has the correct permissions
// Some platforms with separate data cache and instruction cache require
// explicit cache flush, otherwise JIT code manipulations (like resolved
// relocations) will get to the data cache but not to the instruction cache.
invalidateInstructionCache();
return false;
}
std::error_code
SectionMemoryManager::applyMemoryGroupPermissions(MemoryGroup &MemGroup,
unsigned Permissions) {
for (int i = 0, e = MemGroup.AllocatedMem.size(); i != e; ++i) {
std::error_code ec;
ec =
sys::Memory::protectMappedMemory(MemGroup.AllocatedMem[i], Permissions);
if (ec) {
return ec;
}
}
return std::error_code();
}
void SectionMemoryManager::invalidateInstructionCache() {
for (int i = 0, e = CodeMem.AllocatedMem.size(); i != e; ++i)
sys::Memory::InvalidateInstructionCache(CodeMem.AllocatedMem[i].base(),
CodeMem.AllocatedMem[i].size());
}
SectionMemoryManager::~SectionMemoryManager() {
for (unsigned i = 0, e = CodeMem.AllocatedMem.size(); i != e; ++i)
sys::Memory::releaseMappedMemory(CodeMem.AllocatedMem[i]);
for (unsigned i = 0, e = RWDataMem.AllocatedMem.size(); i != e; ++i)
sys::Memory::releaseMappedMemory(RWDataMem.AllocatedMem[i]);
for (unsigned i = 0, e = RODataMem.AllocatedMem.size(); i != e; ++i)
sys::Memory::releaseMappedMemory(RODataMem.AllocatedMem[i]);
}
} // namespace llvm
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/ExecutionEngine/CMakeLists.txt |
add_llvm_library(LLVMExecutionEngine
ExecutionEngine.cpp
ExecutionEngineBindings.cpp
GDBRegistrationListener.cpp
SectionMemoryManager.cpp
TargetSelect.cpp
ADDITIONAL_HEADER_DIRS
${LLVM_MAIN_INCLUDE_DIR}/llvm/ExecutionEngine
DEPENDS
intrinsics_gen
)
add_subdirectory(Interpreter)
# add_subdirectory(MCJIT) # HLSL Change
# add_subdirectory(Orc) # HLSL Change
# add_subdirectory(RuntimeDyld) # HLSL Change
if( LLVM_USE_OPROFILE )
add_subdirectory(OProfileJIT)
endif( LLVM_USE_OPROFILE )
if( LLVM_USE_INTEL_JITEVENTS )
add_subdirectory(IntelJITEvents)
endif( LLVM_USE_INTEL_JITEVENTS )
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/ExecutionEngine/LLVMBuild.txt | ;===- ./lib/ExecutionEngine/LLVMBuild.txt ----------------------*- Conf -*--===;
;
; The LLVM Compiler Infrastructure
;
; This file is distributed under the University of Illinois Open Source
; License. See LICENSE.TXT for details.
;
;===------------------------------------------------------------------------===;
;
; This is an LLVMBuild description file for the components in this subdirectory.
;
; For more information on the LLVMBuild system, please see:
;
; http://llvm.org/docs/LLVMBuild.html
;
;===------------------------------------------------------------------------===;
[common]
subdirectories = Interpreter MCJIT RuntimeDyld IntelJITEvents OProfileJIT Orc
[component_0]
type = Library
name = ExecutionEngine
parent = Libraries
required_libraries = Core MC Object RuntimeDyld Support Target
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/ExecutionEngine/ExecutionEngine.cpp | //===-- ExecutionEngine.cpp - Common Implementation shared by EEs ---------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines the common interface used by the various execution engine
// subclasses.
//
//===----------------------------------------------------------------------===//
#include "llvm/ExecutionEngine/ExecutionEngine.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/ExecutionEngine/GenericValue.h"
#include "llvm/ExecutionEngine/JITEventListener.h"
#include "llvm/ExecutionEngine/RTDyldMemoryManager.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Mangler.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/Operator.h"
#include "llvm/IR/ValueHandle.h"
#include "llvm/Object/Archive.h"
#include "llvm/Object/ObjectFile.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/DynamicLibrary.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/Host.h"
#include "llvm/Support/MutexGuard.h"
#include "llvm/Support/TargetRegistry.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetMachine.h"
#include <cmath>
#include <cstring>
using namespace llvm;
#define DEBUG_TYPE "jit"
STATISTIC(NumInitBytes, "Number of bytes of global vars initialized");
STATISTIC(NumGlobals , "Number of global vars initialized");
ExecutionEngine *(*ExecutionEngine::MCJITCtor)(
std::unique_ptr<Module> M, std::string *ErrorStr,
std::shared_ptr<MCJITMemoryManager> MemMgr,
std::shared_ptr<RuntimeDyld::SymbolResolver> Resolver,
std::unique_ptr<TargetMachine> TM) = nullptr;
ExecutionEngine *(*ExecutionEngine::OrcMCJITReplacementCtor)(
std::string *ErrorStr, std::shared_ptr<MCJITMemoryManager> MemMgr,
std::shared_ptr<RuntimeDyld::SymbolResolver> Resolver,
std::unique_ptr<TargetMachine> TM) = nullptr;
ExecutionEngine *(*ExecutionEngine::InterpCtor)(std::unique_ptr<Module> M,
std::string *ErrorStr) =nullptr;
void JITEventListener::anchor() {}
ExecutionEngine::ExecutionEngine(std::unique_ptr<Module> M)
: LazyFunctionCreator(nullptr) {
CompilingLazily = false;
GVCompilationDisabled = false;
SymbolSearchingDisabled = false;
// IR module verification is enabled by default in debug builds, and disabled
// by default in release builds.
#ifndef NDEBUG
VerifyModules = true;
#else
VerifyModules = false;
#endif
assert(M && "Module is null?");
Modules.push_back(std::move(M));
}
ExecutionEngine::~ExecutionEngine() {
clearAllGlobalMappings();
}
namespace {
/// \brief Helper class which uses a value handler to automatically deletes the
/// memory block when the GlobalVariable is destroyed.
class GVMemoryBlock : public CallbackVH {
GVMemoryBlock(const GlobalVariable *GV)
: CallbackVH(const_cast<GlobalVariable*>(GV)) {}
public:
/// \brief Returns the address the GlobalVariable should be written into. The
/// GVMemoryBlock object prefixes that.
static char *Create(const GlobalVariable *GV, const DataLayout& TD) {
Type *ElTy = GV->getType()->getElementType();
size_t GVSize = (size_t)TD.getTypeAllocSize(ElTy);
void *RawMemory = ::operator new(
RoundUpToAlignment(sizeof(GVMemoryBlock),
TD.getPreferredAlignment(GV))
+ GVSize);
new(RawMemory) GVMemoryBlock(GV);
return static_cast<char*>(RawMemory) + sizeof(GVMemoryBlock);
}
void deleted() override {
// We allocated with operator new and with some extra memory hanging off the
// end, so don't just delete this. I'm not sure if this is actually
// required.
this->~GVMemoryBlock();
::operator delete(this);
}
};
} // anonymous namespace
char *ExecutionEngine::getMemoryForGV(const GlobalVariable *GV) {
return GVMemoryBlock::Create(GV, *getDataLayout());
}
void ExecutionEngine::addObjectFile(std::unique_ptr<object::ObjectFile> O) {
llvm_unreachable("ExecutionEngine subclass doesn't implement addObjectFile.");
}
void
ExecutionEngine::addObjectFile(object::OwningBinary<object::ObjectFile> O) {
llvm_unreachable("ExecutionEngine subclass doesn't implement addObjectFile.");
}
void ExecutionEngine::addArchive(object::OwningBinary<object::Archive> A) {
llvm_unreachable("ExecutionEngine subclass doesn't implement addArchive.");
}
bool ExecutionEngine::removeModule(Module *M) {
for (auto I = Modules.begin(), E = Modules.end(); I != E; ++I) {
Module *Found = I->get();
if (Found == M) {
I->release();
Modules.erase(I);
clearGlobalMappingsFromModule(M);
return true;
}
}
return false;
}
Function *ExecutionEngine::FindFunctionNamed(const char *FnName) {
for (unsigned i = 0, e = Modules.size(); i != e; ++i) {
Function *F = Modules[i]->getFunction(FnName);
if (F && !F->isDeclaration())
return F;
}
return nullptr;
}
GlobalVariable *ExecutionEngine::FindGlobalVariableNamed(const char *Name, bool AllowInternal) {
for (unsigned i = 0, e = Modules.size(); i != e; ++i) {
GlobalVariable *GV = Modules[i]->getGlobalVariable(Name,AllowInternal);
if (GV && !GV->isDeclaration())
return GV;
}
return nullptr;
}
uint64_t ExecutionEngineState::RemoveMapping(StringRef Name) {
GlobalAddressMapTy::iterator I = GlobalAddressMap.find(Name);
uint64_t OldVal;
// FIXME: This is silly, we shouldn't end up with a mapping -> 0 in the
// GlobalAddressMap.
if (I == GlobalAddressMap.end())
OldVal = 0;
else {
GlobalAddressReverseMap.erase(I->second);
OldVal = I->second;
GlobalAddressMap.erase(I);
}
return OldVal;
}
std::string ExecutionEngine::getMangledName(const GlobalValue *GV) {
assert(GV->hasName() && "Global must have name.");
MutexGuard locked(lock);
SmallString<128> FullName;
const DataLayout &DL =
GV->getParent()->getDataLayout().isDefault()
? *getDataLayout()
: GV->getParent()->getDataLayout();
Mangler::getNameWithPrefix(FullName, GV->getName(), DL);
return FullName.str();
}
void ExecutionEngine::addGlobalMapping(const GlobalValue *GV, void *Addr) {
MutexGuard locked(lock);
addGlobalMapping(getMangledName(GV), (uint64_t) Addr);
}
void ExecutionEngine::addGlobalMapping(StringRef Name, uint64_t Addr) {
MutexGuard locked(lock);
assert(!Name.empty() && "Empty GlobalMapping symbol name!");
DEBUG(dbgs() << "JIT: Map \'" << Name << "\' to [" << Addr << "]\n";);
uint64_t &CurVal = EEState.getGlobalAddressMap()[Name];
assert((!CurVal || !Addr) && "GlobalMapping already established!");
CurVal = Addr;
// If we are using the reverse mapping, add it too.
if (!EEState.getGlobalAddressReverseMap().empty()) {
std::string &V = EEState.getGlobalAddressReverseMap()[CurVal];
assert((!V.empty() || !Name.empty()) &&
"GlobalMapping already established!");
V = Name;
}
}
void ExecutionEngine::clearAllGlobalMappings() {
MutexGuard locked(lock);
EEState.getGlobalAddressMap().clear();
EEState.getGlobalAddressReverseMap().clear();
}
void ExecutionEngine::clearGlobalMappingsFromModule(Module *M) {
MutexGuard locked(lock);
for (Module::iterator FI = M->begin(), FE = M->end(); FI != FE; ++FI)
EEState.RemoveMapping(getMangledName(FI));
for (Module::global_iterator GI = M->global_begin(), GE = M->global_end();
GI != GE; ++GI)
EEState.RemoveMapping(getMangledName(GI));
}
uint64_t ExecutionEngine::updateGlobalMapping(const GlobalValue *GV,
void *Addr) {
MutexGuard locked(lock);
return updateGlobalMapping(getMangledName(GV), (uint64_t) Addr);
}
uint64_t ExecutionEngine::updateGlobalMapping(StringRef Name, uint64_t Addr) {
MutexGuard locked(lock);
ExecutionEngineState::GlobalAddressMapTy &Map =
EEState.getGlobalAddressMap();
// Deleting from the mapping?
if (!Addr)
return EEState.RemoveMapping(Name);
uint64_t &CurVal = Map[Name];
uint64_t OldVal = CurVal;
if (CurVal && !EEState.getGlobalAddressReverseMap().empty())
EEState.getGlobalAddressReverseMap().erase(CurVal);
CurVal = Addr;
// If we are using the reverse mapping, add it too.
if (!EEState.getGlobalAddressReverseMap().empty()) {
std::string &V = EEState.getGlobalAddressReverseMap()[CurVal];
assert((!V.empty() || !Name.empty()) &&
"GlobalMapping already established!");
V = Name;
}
return OldVal;
}
uint64_t ExecutionEngine::getAddressToGlobalIfAvailable(StringRef S) {
MutexGuard locked(lock);
uint64_t Address = 0;
ExecutionEngineState::GlobalAddressMapTy::iterator I =
EEState.getGlobalAddressMap().find(S);
if (I != EEState.getGlobalAddressMap().end())
Address = I->second;
return Address;
}
void *ExecutionEngine::getPointerToGlobalIfAvailable(StringRef S) {
MutexGuard locked(lock);
if (void* Address = (void *) getAddressToGlobalIfAvailable(S))
return Address;
return nullptr;
}
void *ExecutionEngine::getPointerToGlobalIfAvailable(const GlobalValue *GV) {
MutexGuard locked(lock);
return getPointerToGlobalIfAvailable(getMangledName(GV));
}
const GlobalValue *ExecutionEngine::getGlobalValueAtAddress(void *Addr) {
MutexGuard locked(lock);
// If we haven't computed the reverse mapping yet, do so first.
if (EEState.getGlobalAddressReverseMap().empty()) {
for (ExecutionEngineState::GlobalAddressMapTy::iterator
I = EEState.getGlobalAddressMap().begin(),
E = EEState.getGlobalAddressMap().end(); I != E; ++I) {
StringRef Name = I->first();
uint64_t Addr = I->second;
EEState.getGlobalAddressReverseMap().insert(std::make_pair(
Addr, Name));
}
}
std::map<uint64_t, std::string>::iterator I =
EEState.getGlobalAddressReverseMap().find((uint64_t) Addr);
if (I != EEState.getGlobalAddressReverseMap().end()) {
StringRef Name = I->second;
for (unsigned i = 0, e = Modules.size(); i != e; ++i)
if (GlobalValue *GV = Modules[i]->getNamedValue(Name))
return GV;
}
return nullptr;
}
namespace {
class ArgvArray {
std::unique_ptr<char[]> Array;
std::vector<std::unique_ptr<char[]>> Values;
public:
/// Turn a vector of strings into a nice argv style array of pointers to null
/// terminated strings.
void *reset(LLVMContext &C, ExecutionEngine *EE,
const std::vector<std::string> &InputArgv);
};
} // anonymous namespace
void *ArgvArray::reset(LLVMContext &C, ExecutionEngine *EE,
const std::vector<std::string> &InputArgv) {
Values.clear(); // Free the old contents.
Values.reserve(InputArgv.size());
unsigned PtrSize = EE->getDataLayout()->getPointerSize();
Array = make_unique<char[]>((InputArgv.size()+1)*PtrSize);
DEBUG(dbgs() << "JIT: ARGV = " << (void*)Array.get() << "\n");
Type *SBytePtr = Type::getInt8PtrTy(C);
for (unsigned i = 0; i != InputArgv.size(); ++i) {
unsigned Size = InputArgv[i].size()+1;
auto Dest = make_unique<char[]>(Size);
DEBUG(dbgs() << "JIT: ARGV[" << i << "] = " << (void*)Dest.get() << "\n");
std::copy(InputArgv[i].begin(), InputArgv[i].end(), Dest.get());
Dest[Size-1] = 0;
// Endian safe: Array[i] = (PointerTy)Dest;
EE->StoreValueToMemory(PTOGV(Dest.get()),
(GenericValue*)(&Array[i*PtrSize]), SBytePtr);
Values.push_back(std::move(Dest));
}
// Null terminate it
EE->StoreValueToMemory(PTOGV(nullptr),
(GenericValue*)(&Array[InputArgv.size()*PtrSize]),
SBytePtr);
return Array.get();
}
void ExecutionEngine::runStaticConstructorsDestructors(Module &mod,
bool isDtors) {
const char *Name = isDtors ? "llvm.global_dtors" : "llvm.global_ctors";
GlobalVariable *GV = mod.getNamedGlobal(Name);
// If this global has internal linkage, or if it has a use, then it must be
// an old-style (llvmgcc3) static ctor with __main linked in and in use. If
// this is the case, don't execute any of the global ctors, __main will do
// it.
if (!GV || GV->isDeclaration() || GV->hasLocalLinkage()) return;
// Should be an array of '{ i32, void ()* }' structs. The first value is
// the init priority, which we ignore.
ConstantArray *InitList = dyn_cast<ConstantArray>(GV->getInitializer());
if (!InitList)
return;
for (unsigned i = 0, e = InitList->getNumOperands(); i != e; ++i) {
ConstantStruct *CS = dyn_cast<ConstantStruct>(InitList->getOperand(i));
if (!CS) continue;
Constant *FP = CS->getOperand(1);
if (FP->isNullValue())
continue; // Found a sentinal value, ignore.
// Strip off constant expression casts.
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(FP))
if (CE->isCast())
FP = CE->getOperand(0);
// Execute the ctor/dtor function!
if (Function *F = dyn_cast<Function>(FP))
runFunction(F, None);
// FIXME: It is marginally lame that we just do nothing here if we see an
// entry we don't recognize. It might not be unreasonable for the verifier
// to not even allow this and just assert here.
}
}
void ExecutionEngine::runStaticConstructorsDestructors(bool isDtors) {
// Execute global ctors/dtors for each module in the program.
for (std::unique_ptr<Module> &M : Modules)
runStaticConstructorsDestructors(*M, isDtors);
}
#ifndef NDEBUG
/// isTargetNullPtr - Return whether the target pointer stored at Loc is null.
static bool isTargetNullPtr(ExecutionEngine *EE, void *Loc) {
unsigned PtrSize = EE->getDataLayout()->getPointerSize();
for (unsigned i = 0; i < PtrSize; ++i)
if (*(i + (uint8_t*)Loc))
return false;
return true;
}
#endif
int ExecutionEngine::runFunctionAsMain(Function *Fn,
const std::vector<std::string> &argv,
const char * const * envp) {
std::vector<GenericValue> GVArgs;
GenericValue GVArgc;
GVArgc.IntVal = APInt(32, argv.size());
// Check main() type
unsigned NumArgs = Fn->getFunctionType()->getNumParams();
FunctionType *FTy = Fn->getFunctionType();
Type* PPInt8Ty = Type::getInt8PtrTy(Fn->getContext())->getPointerTo();
// Check the argument types.
if (NumArgs > 3)
report_fatal_error("Invalid number of arguments of main() supplied");
if (NumArgs >= 3 && FTy->getParamType(2) != PPInt8Ty)
report_fatal_error("Invalid type for third argument of main() supplied");
if (NumArgs >= 2 && FTy->getParamType(1) != PPInt8Ty)
report_fatal_error("Invalid type for second argument of main() supplied");
if (NumArgs >= 1 && !FTy->getParamType(0)->isIntegerTy(32))
report_fatal_error("Invalid type for first argument of main() supplied");
if (!FTy->getReturnType()->isIntegerTy() &&
!FTy->getReturnType()->isVoidTy())
report_fatal_error("Invalid return type of main() supplied");
ArgvArray CArgv;
ArgvArray CEnv;
if (NumArgs) {
GVArgs.push_back(GVArgc); // Arg #0 = argc.
if (NumArgs > 1) {
// Arg #1 = argv.
GVArgs.push_back(PTOGV(CArgv.reset(Fn->getContext(), this, argv)));
assert(!isTargetNullPtr(this, GVTOP(GVArgs[1])) &&
"argv[0] was null after CreateArgv");
if (NumArgs > 2) {
std::vector<std::string> EnvVars;
for (unsigned i = 0; envp[i]; ++i)
EnvVars.emplace_back(envp[i]);
// Arg #2 = envp.
GVArgs.push_back(PTOGV(CEnv.reset(Fn->getContext(), this, EnvVars)));
}
}
}
return runFunction(Fn, GVArgs).IntVal.getZExtValue();
}
EngineBuilder::EngineBuilder() : EngineBuilder(nullptr) {}
EngineBuilder::EngineBuilder(std::unique_ptr<Module> M)
: M(std::move(M)), WhichEngine(EngineKind::Either), ErrorStr(nullptr),
OptLevel(CodeGenOpt::Default), MemMgr(nullptr), Resolver(nullptr),
RelocModel(Reloc::Default), CMModel(CodeModel::JITDefault),
UseOrcMCJITReplacement(false) {
// IR module verification is enabled by default in debug builds, and disabled
// by default in release builds.
#ifndef NDEBUG
VerifyModules = true;
#else
VerifyModules = false;
#endif
}
EngineBuilder::~EngineBuilder() = default;
EngineBuilder &EngineBuilder::setMCJITMemoryManager(
std::unique_ptr<RTDyldMemoryManager> mcjmm) {
auto SharedMM = std::shared_ptr<RTDyldMemoryManager>(std::move(mcjmm));
MemMgr = SharedMM;
Resolver = SharedMM;
return *this;
}
EngineBuilder&
EngineBuilder::setMemoryManager(std::unique_ptr<MCJITMemoryManager> MM) {
MemMgr = std::shared_ptr<MCJITMemoryManager>(std::move(MM));
return *this;
}
EngineBuilder&
EngineBuilder::setSymbolResolver(std::unique_ptr<RuntimeDyld::SymbolResolver> SR) {
Resolver = std::shared_ptr<RuntimeDyld::SymbolResolver>(std::move(SR));
return *this;
}
ExecutionEngine *EngineBuilder::create(TargetMachine *TM) {
std::unique_ptr<TargetMachine> TheTM(TM); // Take ownership.
// Make sure we can resolve symbols in the program as well. The zero arg
// to the function tells DynamicLibrary to load the program, not a library.
if (sys::DynamicLibrary::LoadLibraryPermanently(nullptr, ErrorStr))
return nullptr;
// If the user specified a memory manager but didn't specify which engine to
// create, we assume they only want the JIT, and we fail if they only want
// the interpreter.
if (MemMgr) {
if (WhichEngine & EngineKind::JIT)
WhichEngine = EngineKind::JIT;
else {
if (ErrorStr)
*ErrorStr = "Cannot create an interpreter with a memory manager.";
return nullptr;
}
}
// Unless the interpreter was explicitly selected or the JIT is not linked,
// try making a JIT.
if ((WhichEngine & EngineKind::JIT) && TheTM) {
Triple TT(M->getTargetTriple());
if (!TM->getTarget().hasJIT()) {
errs() << "WARNING: This target JIT is not designed for the host"
<< " you are running. If bad things happen, please choose"
<< " a different -march switch.\n";
}
ExecutionEngine *EE = nullptr;
if (ExecutionEngine::OrcMCJITReplacementCtor && UseOrcMCJITReplacement) {
EE = ExecutionEngine::OrcMCJITReplacementCtor(ErrorStr, std::move(MemMgr),
std::move(Resolver),
std::move(TheTM));
EE->addModule(std::move(M));
} else if (ExecutionEngine::MCJITCtor)
EE = ExecutionEngine::MCJITCtor(std::move(M), ErrorStr, std::move(MemMgr),
std::move(Resolver), std::move(TheTM));
if (EE) {
EE->setVerifyModules(VerifyModules);
return EE;
}
}
// If we can't make a JIT and we didn't request one specifically, try making
// an interpreter instead.
if (WhichEngine & EngineKind::Interpreter) {
if (ExecutionEngine::InterpCtor)
return ExecutionEngine::InterpCtor(std::move(M), ErrorStr);
if (ErrorStr)
*ErrorStr = "Interpreter has not been linked in.";
return nullptr;
}
if ((WhichEngine & EngineKind::JIT) && !ExecutionEngine::MCJITCtor) {
if (ErrorStr)
*ErrorStr = "JIT has not been linked in.";
}
return nullptr;
}
void *ExecutionEngine::getPointerToGlobal(const GlobalValue *GV) {
if (Function *F = const_cast<Function*>(dyn_cast<Function>(GV)))
return getPointerToFunction(F);
MutexGuard locked(lock);
if (void* P = getPointerToGlobalIfAvailable(GV))
return P;
// Global variable might have been added since interpreter started.
if (GlobalVariable *GVar =
const_cast<GlobalVariable *>(dyn_cast<GlobalVariable>(GV)))
EmitGlobalVariable(GVar);
else
llvm_unreachable("Global hasn't had an address allocated yet!");
return getPointerToGlobalIfAvailable(GV);
}
/// \brief Converts a Constant* into a GenericValue, including handling of
/// ConstantExpr values.
GenericValue ExecutionEngine::getConstantValue(const Constant *C) {
// If its undefined, return the garbage.
if (isa<UndefValue>(C)) {
GenericValue Result;
switch (C->getType()->getTypeID()) {
default:
break;
case Type::IntegerTyID:
case Type::X86_FP80TyID:
case Type::FP128TyID:
case Type::PPC_FP128TyID:
// Although the value is undefined, we still have to construct an APInt
// with the correct bit width.
Result.IntVal = APInt(C->getType()->getPrimitiveSizeInBits(), 0);
break;
case Type::StructTyID: {
// if the whole struct is 'undef' just reserve memory for the value.
if(StructType *STy = dyn_cast<StructType>(C->getType())) {
unsigned int elemNum = STy->getNumElements();
Result.AggregateVal.resize(elemNum);
for (unsigned int i = 0; i < elemNum; ++i) {
Type *ElemTy = STy->getElementType(i);
if (ElemTy->isIntegerTy())
Result.AggregateVal[i].IntVal =
APInt(ElemTy->getPrimitiveSizeInBits(), 0);
else if (ElemTy->isAggregateType()) {
const Constant *ElemUndef = UndefValue::get(ElemTy);
Result.AggregateVal[i] = getConstantValue(ElemUndef);
}
}
}
}
break;
case Type::VectorTyID:
// if the whole vector is 'undef' just reserve memory for the value.
const VectorType* VTy = dyn_cast<VectorType>(C->getType());
const Type *ElemTy = VTy->getElementType();
unsigned int elemNum = VTy->getNumElements();
Result.AggregateVal.resize(elemNum);
if (ElemTy->isIntegerTy())
for (unsigned int i = 0; i < elemNum; ++i)
Result.AggregateVal[i].IntVal =
APInt(ElemTy->getPrimitiveSizeInBits(), 0);
break;
}
return Result;
}
// Otherwise, if the value is a ConstantExpr...
if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) {
Constant *Op0 = CE->getOperand(0);
switch (CE->getOpcode()) {
case Instruction::GetElementPtr: {
// Compute the index
GenericValue Result = getConstantValue(Op0);
APInt Offset(DL->getPointerSizeInBits(), 0);
cast<GEPOperator>(CE)->accumulateConstantOffset(*DL, Offset);
char* tmp = (char*) Result.PointerVal;
Result = PTOGV(tmp + Offset.getSExtValue());
return Result;
}
case Instruction::Trunc: {
GenericValue GV = getConstantValue(Op0);
uint32_t BitWidth = cast<IntegerType>(CE->getType())->getBitWidth();
GV.IntVal = GV.IntVal.trunc(BitWidth);
return GV;
}
case Instruction::ZExt: {
GenericValue GV = getConstantValue(Op0);
uint32_t BitWidth = cast<IntegerType>(CE->getType())->getBitWidth();
GV.IntVal = GV.IntVal.zext(BitWidth);
return GV;
}
case Instruction::SExt: {
GenericValue GV = getConstantValue(Op0);
uint32_t BitWidth = cast<IntegerType>(CE->getType())->getBitWidth();
GV.IntVal = GV.IntVal.sext(BitWidth);
return GV;
}
case Instruction::FPTrunc: {
// FIXME long double
GenericValue GV = getConstantValue(Op0);
GV.FloatVal = float(GV.DoubleVal);
return GV;
}
case Instruction::FPExt:{
// FIXME long double
GenericValue GV = getConstantValue(Op0);
GV.DoubleVal = double(GV.FloatVal);
return GV;
}
case Instruction::UIToFP: {
GenericValue GV = getConstantValue(Op0);
if (CE->getType()->isFloatTy())
GV.FloatVal = float(GV.IntVal.roundToDouble());
else if (CE->getType()->isDoubleTy())
GV.DoubleVal = GV.IntVal.roundToDouble();
else if (CE->getType()->isX86_FP80Ty()) {
APFloat apf = APFloat::getZero(APFloat::x87DoubleExtended);
(void)apf.convertFromAPInt(GV.IntVal,
false,
APFloat::rmNearestTiesToEven);
GV.IntVal = apf.bitcastToAPInt();
}
return GV;
}
case Instruction::SIToFP: {
GenericValue GV = getConstantValue(Op0);
if (CE->getType()->isFloatTy())
GV.FloatVal = float(GV.IntVal.signedRoundToDouble());
else if (CE->getType()->isDoubleTy())
GV.DoubleVal = GV.IntVal.signedRoundToDouble();
else if (CE->getType()->isX86_FP80Ty()) {
APFloat apf = APFloat::getZero(APFloat::x87DoubleExtended);
(void)apf.convertFromAPInt(GV.IntVal,
true,
APFloat::rmNearestTiesToEven);
GV.IntVal = apf.bitcastToAPInt();
}
return GV;
}
case Instruction::FPToUI: // double->APInt conversion handles sign
case Instruction::FPToSI: {
GenericValue GV = getConstantValue(Op0);
uint32_t BitWidth = cast<IntegerType>(CE->getType())->getBitWidth();
if (Op0->getType()->isFloatTy())
GV.IntVal = APIntOps::RoundFloatToAPInt(GV.FloatVal, BitWidth);
else if (Op0->getType()->isDoubleTy())
GV.IntVal = APIntOps::RoundDoubleToAPInt(GV.DoubleVal, BitWidth);
else if (Op0->getType()->isX86_FP80Ty()) {
APFloat apf = APFloat(APFloat::x87DoubleExtended, GV.IntVal);
uint64_t v;
bool ignored;
(void)apf.convertToInteger(&v, BitWidth,
CE->getOpcode()==Instruction::FPToSI,
APFloat::rmTowardZero, &ignored);
GV.IntVal = v; // endian?
}
return GV;
}
case Instruction::PtrToInt: {
GenericValue GV = getConstantValue(Op0);
uint32_t PtrWidth = DL->getTypeSizeInBits(Op0->getType());
assert(PtrWidth <= 64 && "Bad pointer width");
GV.IntVal = APInt(PtrWidth, uintptr_t(GV.PointerVal));
uint32_t IntWidth = DL->getTypeSizeInBits(CE->getType());
GV.IntVal = GV.IntVal.zextOrTrunc(IntWidth);
return GV;
}
case Instruction::IntToPtr: {
GenericValue GV = getConstantValue(Op0);
uint32_t PtrWidth = DL->getTypeSizeInBits(CE->getType());
GV.IntVal = GV.IntVal.zextOrTrunc(PtrWidth);
assert(GV.IntVal.getBitWidth() <= 64 && "Bad pointer width");
GV.PointerVal = PointerTy(uintptr_t(GV.IntVal.getZExtValue()));
return GV;
}
case Instruction::BitCast: {
GenericValue GV = getConstantValue(Op0);
Type* DestTy = CE->getType();
switch (Op0->getType()->getTypeID()) {
default: llvm_unreachable("Invalid bitcast operand");
case Type::IntegerTyID:
assert(DestTy->isFloatingPointTy() && "invalid bitcast");
if (DestTy->isFloatTy())
GV.FloatVal = GV.IntVal.bitsToFloat();
else if (DestTy->isDoubleTy())
GV.DoubleVal = GV.IntVal.bitsToDouble();
break;
case Type::FloatTyID:
assert(DestTy->isIntegerTy(32) && "Invalid bitcast");
GV.IntVal = APInt::floatToBits(GV.FloatVal);
break;
case Type::DoubleTyID:
assert(DestTy->isIntegerTy(64) && "Invalid bitcast");
GV.IntVal = APInt::doubleToBits(GV.DoubleVal);
break;
case Type::PointerTyID:
assert(DestTy->isPointerTy() && "Invalid bitcast");
break; // getConstantValue(Op0) above already converted it
}
return GV;
}
case Instruction::Add:
case Instruction::FAdd:
case Instruction::Sub:
case Instruction::FSub:
case Instruction::Mul:
case Instruction::FMul:
case Instruction::UDiv:
case Instruction::SDiv:
case Instruction::URem:
case Instruction::SRem:
case Instruction::And:
case Instruction::Or:
case Instruction::Xor: {
GenericValue LHS = getConstantValue(Op0);
GenericValue RHS = getConstantValue(CE->getOperand(1));
GenericValue GV;
switch (CE->getOperand(0)->getType()->getTypeID()) {
default: llvm_unreachable("Bad add type!");
case Type::IntegerTyID:
switch (CE->getOpcode()) {
default: llvm_unreachable("Invalid integer opcode");
case Instruction::Add: GV.IntVal = LHS.IntVal + RHS.IntVal; break;
case Instruction::Sub: GV.IntVal = LHS.IntVal - RHS.IntVal; break;
case Instruction::Mul: GV.IntVal = LHS.IntVal * RHS.IntVal; break;
case Instruction::UDiv:GV.IntVal = LHS.IntVal.udiv(RHS.IntVal); break;
case Instruction::SDiv:GV.IntVal = LHS.IntVal.sdiv(RHS.IntVal); break;
case Instruction::URem:GV.IntVal = LHS.IntVal.urem(RHS.IntVal); break;
case Instruction::SRem:GV.IntVal = LHS.IntVal.srem(RHS.IntVal); break;
case Instruction::And: GV.IntVal = LHS.IntVal & RHS.IntVal; break;
case Instruction::Or: GV.IntVal = LHS.IntVal | RHS.IntVal; break;
case Instruction::Xor: GV.IntVal = LHS.IntVal ^ RHS.IntVal; break;
}
break;
case Type::FloatTyID:
switch (CE->getOpcode()) {
default: llvm_unreachable("Invalid float opcode");
case Instruction::FAdd:
GV.FloatVal = LHS.FloatVal + RHS.FloatVal; break;
case Instruction::FSub:
GV.FloatVal = LHS.FloatVal - RHS.FloatVal; break;
case Instruction::FMul:
GV.FloatVal = LHS.FloatVal * RHS.FloatVal; break;
case Instruction::FDiv:
GV.FloatVal = LHS.FloatVal / RHS.FloatVal; break;
case Instruction::FRem:
GV.FloatVal = std::fmod(LHS.FloatVal,RHS.FloatVal); break;
}
break;
case Type::DoubleTyID:
switch (CE->getOpcode()) {
default: llvm_unreachable("Invalid double opcode");
case Instruction::FAdd:
GV.DoubleVal = LHS.DoubleVal + RHS.DoubleVal; break;
case Instruction::FSub:
GV.DoubleVal = LHS.DoubleVal - RHS.DoubleVal; break;
case Instruction::FMul:
GV.DoubleVal = LHS.DoubleVal * RHS.DoubleVal; break;
case Instruction::FDiv:
GV.DoubleVal = LHS.DoubleVal / RHS.DoubleVal; break;
case Instruction::FRem:
GV.DoubleVal = std::fmod(LHS.DoubleVal,RHS.DoubleVal); break;
}
break;
case Type::X86_FP80TyID:
case Type::PPC_FP128TyID:
case Type::FP128TyID: {
const fltSemantics &Sem = CE->getOperand(0)->getType()->getFltSemantics();
APFloat apfLHS = APFloat(Sem, LHS.IntVal);
switch (CE->getOpcode()) {
default: llvm_unreachable("Invalid long double opcode");
case Instruction::FAdd:
apfLHS.add(APFloat(Sem, RHS.IntVal), APFloat::rmNearestTiesToEven);
GV.IntVal = apfLHS.bitcastToAPInt();
break;
case Instruction::FSub:
apfLHS.subtract(APFloat(Sem, RHS.IntVal),
APFloat::rmNearestTiesToEven);
GV.IntVal = apfLHS.bitcastToAPInt();
break;
case Instruction::FMul:
apfLHS.multiply(APFloat(Sem, RHS.IntVal),
APFloat::rmNearestTiesToEven);
GV.IntVal = apfLHS.bitcastToAPInt();
break;
case Instruction::FDiv:
apfLHS.divide(APFloat(Sem, RHS.IntVal),
APFloat::rmNearestTiesToEven);
GV.IntVal = apfLHS.bitcastToAPInt();
break;
case Instruction::FRem:
apfLHS.mod(APFloat(Sem, RHS.IntVal),
APFloat::rmNearestTiesToEven);
GV.IntVal = apfLHS.bitcastToAPInt();
break;
}
}
break;
}
return GV;
}
default:
break;
}
SmallString<256> Msg;
raw_svector_ostream OS(Msg);
OS << "ConstantExpr not handled: " << *CE;
report_fatal_error(OS.str());
}
// Otherwise, we have a simple constant.
GenericValue Result;
switch (C->getType()->getTypeID()) {
case Type::FloatTyID:
Result.FloatVal = cast<ConstantFP>(C)->getValueAPF().convertToFloat();
break;
case Type::DoubleTyID:
Result.DoubleVal = cast<ConstantFP>(C)->getValueAPF().convertToDouble();
break;
case Type::X86_FP80TyID:
case Type::FP128TyID:
case Type::PPC_FP128TyID:
Result.IntVal = cast <ConstantFP>(C)->getValueAPF().bitcastToAPInt();
break;
case Type::IntegerTyID:
Result.IntVal = cast<ConstantInt>(C)->getValue();
break;
case Type::PointerTyID:
if (isa<ConstantPointerNull>(C))
Result.PointerVal = nullptr;
else if (const Function *F = dyn_cast<Function>(C))
Result = PTOGV(getPointerToFunctionOrStub(const_cast<Function*>(F)));
else if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(C))
Result = PTOGV(getOrEmitGlobalVariable(const_cast<GlobalVariable*>(GV)));
else
llvm_unreachable("Unknown constant pointer type!");
break;
case Type::VectorTyID: {
unsigned elemNum;
Type* ElemTy;
const ConstantDataVector *CDV = dyn_cast<ConstantDataVector>(C);
const ConstantVector *CV = dyn_cast<ConstantVector>(C);
const ConstantAggregateZero *CAZ = dyn_cast<ConstantAggregateZero>(C);
if (CDV) {
elemNum = CDV->getNumElements();
ElemTy = CDV->getElementType();
} else if (CV || CAZ) {
VectorType* VTy = dyn_cast<VectorType>(C->getType());
elemNum = VTy->getNumElements();
ElemTy = VTy->getElementType();
} else {
llvm_unreachable("Unknown constant vector type!");
}
Result.AggregateVal.resize(elemNum);
// Check if vector holds floats.
if(ElemTy->isFloatTy()) {
if (CAZ) {
GenericValue floatZero;
floatZero.FloatVal = 0.f;
std::fill(Result.AggregateVal.begin(), Result.AggregateVal.end(),
floatZero);
break;
}
if(CV) {
for (unsigned i = 0; i < elemNum; ++i)
if (!isa<UndefValue>(CV->getOperand(i)))
Result.AggregateVal[i].FloatVal = cast<ConstantFP>(
CV->getOperand(i))->getValueAPF().convertToFloat();
break;
}
if(CDV)
for (unsigned i = 0; i < elemNum; ++i)
Result.AggregateVal[i].FloatVal = CDV->getElementAsFloat(i);
break;
}
// Check if vector holds doubles.
if (ElemTy->isDoubleTy()) {
if (CAZ) {
GenericValue doubleZero;
doubleZero.DoubleVal = 0.0;
std::fill(Result.AggregateVal.begin(), Result.AggregateVal.end(),
doubleZero);
break;
}
if(CV) {
for (unsigned i = 0; i < elemNum; ++i)
if (!isa<UndefValue>(CV->getOperand(i)))
Result.AggregateVal[i].DoubleVal = cast<ConstantFP>(
CV->getOperand(i))->getValueAPF().convertToDouble();
break;
}
if(CDV)
for (unsigned i = 0; i < elemNum; ++i)
Result.AggregateVal[i].DoubleVal = CDV->getElementAsDouble(i);
break;
}
// Check if vector holds integers.
if (ElemTy->isIntegerTy()) {
if (CAZ) {
GenericValue intZero;
intZero.IntVal = APInt(ElemTy->getScalarSizeInBits(), 0ull);
std::fill(Result.AggregateVal.begin(), Result.AggregateVal.end(),
intZero);
break;
}
if(CV) {
for (unsigned i = 0; i < elemNum; ++i)
if (!isa<UndefValue>(CV->getOperand(i)))
Result.AggregateVal[i].IntVal = cast<ConstantInt>(
CV->getOperand(i))->getValue();
else {
Result.AggregateVal[i].IntVal =
APInt(CV->getOperand(i)->getType()->getPrimitiveSizeInBits(), 0);
}
break;
}
if(CDV)
for (unsigned i = 0; i < elemNum; ++i)
Result.AggregateVal[i].IntVal = APInt(
CDV->getElementType()->getPrimitiveSizeInBits(),
CDV->getElementAsInteger(i));
break;
}
llvm_unreachable("Unknown constant pointer type!");
}
break;
default:
SmallString<256> Msg;
raw_svector_ostream OS(Msg);
OS << "ERROR: Constant unimplemented for type: " << *C->getType();
report_fatal_error(OS.str());
}
return Result;
}
/// StoreIntToMemory - Fills the StoreBytes bytes of memory starting from Dst
/// with the integer held in IntVal.
static void StoreIntToMemory(const APInt &IntVal, uint8_t *Dst,
unsigned StoreBytes) {
assert((IntVal.getBitWidth()+7)/8 >= StoreBytes && "Integer too small!");
const uint8_t *Src = (const uint8_t *)IntVal.getRawData();
if (sys::IsLittleEndianHost) {
// Little-endian host - the source is ordered from LSB to MSB. Order the
// destination from LSB to MSB: Do a straight copy.
memcpy(Dst, Src, StoreBytes);
} else {
// Big-endian host - the source is an array of 64 bit words ordered from
// LSW to MSW. Each word is ordered from MSB to LSB. Order the destination
// from MSB to LSB: Reverse the word order, but not the bytes in a word.
while (StoreBytes > sizeof(uint64_t)) {
StoreBytes -= sizeof(uint64_t);
// May not be aligned so use memcpy.
memcpy(Dst + StoreBytes, Src, sizeof(uint64_t));
Src += sizeof(uint64_t);
}
memcpy(Dst, Src + sizeof(uint64_t) - StoreBytes, StoreBytes);
}
}
void ExecutionEngine::StoreValueToMemory(const GenericValue &Val,
GenericValue *Ptr, Type *Ty) {
const unsigned StoreBytes = getDataLayout()->getTypeStoreSize(Ty);
switch (Ty->getTypeID()) {
default:
dbgs() << "Cannot store value of type " << *Ty << "!\n";
break;
case Type::IntegerTyID:
StoreIntToMemory(Val.IntVal, (uint8_t*)Ptr, StoreBytes);
break;
case Type::FloatTyID:
*((float*)Ptr) = Val.FloatVal;
break;
case Type::DoubleTyID:
*((double*)Ptr) = Val.DoubleVal;
break;
case Type::X86_FP80TyID:
memcpy(Ptr, Val.IntVal.getRawData(), 10);
break;
case Type::PointerTyID:
// Ensure 64 bit target pointers are fully initialized on 32 bit hosts.
if (StoreBytes != sizeof(PointerTy))
memset(&(Ptr->PointerVal), 0, StoreBytes);
*((PointerTy*)Ptr) = Val.PointerVal;
break;
case Type::VectorTyID:
for (unsigned i = 0; i < Val.AggregateVal.size(); ++i) {
if (cast<VectorType>(Ty)->getElementType()->isDoubleTy())
*(((double*)Ptr)+i) = Val.AggregateVal[i].DoubleVal;
if (cast<VectorType>(Ty)->getElementType()->isFloatTy())
*(((float*)Ptr)+i) = Val.AggregateVal[i].FloatVal;
if (cast<VectorType>(Ty)->getElementType()->isIntegerTy()) {
unsigned numOfBytes =(Val.AggregateVal[i].IntVal.getBitWidth()+7)/8;
StoreIntToMemory(Val.AggregateVal[i].IntVal,
(uint8_t*)Ptr + numOfBytes*i, numOfBytes);
}
}
break;
}
if (sys::IsLittleEndianHost != getDataLayout()->isLittleEndian())
// Host and target are different endian - reverse the stored bytes.
std::reverse((uint8_t*)Ptr, StoreBytes + (uint8_t*)Ptr);
}
/// LoadIntFromMemory - Loads the integer stored in the LoadBytes bytes starting
/// from Src into IntVal, which is assumed to be wide enough and to hold zero.
static void LoadIntFromMemory(APInt &IntVal, uint8_t *Src, unsigned LoadBytes) {
assert((IntVal.getBitWidth()+7)/8 >= LoadBytes && "Integer too small!");
uint8_t *Dst = reinterpret_cast<uint8_t *>(
const_cast<uint64_t *>(IntVal.getRawData()));
if (sys::IsLittleEndianHost)
// Little-endian host - the destination must be ordered from LSB to MSB.
// The source is ordered from LSB to MSB: Do a straight copy.
memcpy(Dst, Src, LoadBytes);
else {
// Big-endian - the destination is an array of 64 bit words ordered from
// LSW to MSW. Each word must be ordered from MSB to LSB. The source is
// ordered from MSB to LSB: Reverse the word order, but not the bytes in
// a word.
while (LoadBytes > sizeof(uint64_t)) {
LoadBytes -= sizeof(uint64_t);
// May not be aligned so use memcpy.
memcpy(Dst, Src + LoadBytes, sizeof(uint64_t));
Dst += sizeof(uint64_t);
}
memcpy(Dst + sizeof(uint64_t) - LoadBytes, Src, LoadBytes);
}
}
/// FIXME: document
///
void ExecutionEngine::LoadValueFromMemory(GenericValue &Result,
GenericValue *Ptr,
Type *Ty) {
const unsigned LoadBytes = getDataLayout()->getTypeStoreSize(Ty);
switch (Ty->getTypeID()) {
case Type::IntegerTyID:
// An APInt with all words initially zero.
Result.IntVal = APInt(cast<IntegerType>(Ty)->getBitWidth(), 0);
LoadIntFromMemory(Result.IntVal, (uint8_t*)Ptr, LoadBytes);
break;
case Type::FloatTyID:
Result.FloatVal = *((float*)Ptr);
break;
case Type::DoubleTyID:
Result.DoubleVal = *((double*)Ptr);
break;
case Type::PointerTyID:
Result.PointerVal = *((PointerTy*)Ptr);
break;
case Type::X86_FP80TyID: {
// This is endian dependent, but it will only work on x86 anyway.
// FIXME: Will not trap if loading a signaling NaN.
uint64_t y[2];
memcpy(y, Ptr, 10);
Result.IntVal = APInt(80, y);
break;
}
case Type::VectorTyID: {
const VectorType *VT = cast<VectorType>(Ty);
const Type *ElemT = VT->getElementType();
const unsigned numElems = VT->getNumElements();
if (ElemT->isFloatTy()) {
Result.AggregateVal.resize(numElems);
for (unsigned i = 0; i < numElems; ++i)
Result.AggregateVal[i].FloatVal = *((float*)Ptr+i);
}
if (ElemT->isDoubleTy()) {
Result.AggregateVal.resize(numElems);
for (unsigned i = 0; i < numElems; ++i)
Result.AggregateVal[i].DoubleVal = *((double*)Ptr+i);
}
if (ElemT->isIntegerTy()) {
GenericValue intZero;
const unsigned elemBitWidth = cast<IntegerType>(ElemT)->getBitWidth();
intZero.IntVal = APInt(elemBitWidth, 0);
Result.AggregateVal.resize(numElems, intZero);
for (unsigned i = 0; i < numElems; ++i)
LoadIntFromMemory(Result.AggregateVal[i].IntVal,
(uint8_t*)Ptr+((elemBitWidth+7)/8)*i, (elemBitWidth+7)/8);
}
break;
}
default:
SmallString<256> Msg;
raw_svector_ostream OS(Msg);
OS << "Cannot load value of type " << *Ty << "!";
report_fatal_error(OS.str());
}
}
void ExecutionEngine::InitializeMemory(const Constant *Init, void *Addr) {
DEBUG(dbgs() << "JIT: Initializing " << Addr << " ");
DEBUG(Init->dump());
if (isa<UndefValue>(Init))
return;
if (const ConstantVector *CP = dyn_cast<ConstantVector>(Init)) {
unsigned ElementSize =
getDataLayout()->getTypeAllocSize(CP->getType()->getElementType());
for (unsigned i = 0, e = CP->getNumOperands(); i != e; ++i)
InitializeMemory(CP->getOperand(i), (char*)Addr+i*ElementSize);
return;
}
if (isa<ConstantAggregateZero>(Init)) {
memset(Addr, 0, (size_t)getDataLayout()->getTypeAllocSize(Init->getType()));
return;
}
if (const ConstantArray *CPA = dyn_cast<ConstantArray>(Init)) {
unsigned ElementSize =
getDataLayout()->getTypeAllocSize(CPA->getType()->getElementType());
for (unsigned i = 0, e = CPA->getNumOperands(); i != e; ++i)
InitializeMemory(CPA->getOperand(i), (char*)Addr+i*ElementSize);
return;
}
if (const ConstantStruct *CPS = dyn_cast<ConstantStruct>(Init)) {
const StructLayout *SL =
getDataLayout()->getStructLayout(cast<StructType>(CPS->getType()));
for (unsigned i = 0, e = CPS->getNumOperands(); i != e; ++i)
InitializeMemory(CPS->getOperand(i), (char*)Addr+SL->getElementOffset(i));
return;
}
if (const ConstantDataSequential *CDS =
dyn_cast<ConstantDataSequential>(Init)) {
// CDS is already laid out in host memory order.
StringRef Data = CDS->getRawDataValues();
memcpy(Addr, Data.data(), Data.size());
return;
}
if (Init->getType()->isFirstClassType()) {
GenericValue Val = getConstantValue(Init);
StoreValueToMemory(Val, (GenericValue*)Addr, Init->getType());
return;
}
DEBUG(dbgs() << "Bad Type: " << *Init->getType() << "\n");
llvm_unreachable("Unknown constant type to initialize memory with!");
}
/// EmitGlobals - Emit all of the global variables to memory, storing their
/// addresses into GlobalAddress. This must make sure to copy the contents of
/// their initializers into the memory.
void ExecutionEngine::emitGlobals() {
// Loop over all of the global variables in the program, allocating the memory
// to hold them. If there is more than one module, do a prepass over globals
// to figure out how the different modules should link together.
std::map<std::pair<std::string, Type*>,
const GlobalValue*> LinkedGlobalsMap;
if (Modules.size() != 1) {
for (unsigned m = 0, e = Modules.size(); m != e; ++m) {
Module &M = *Modules[m];
for (const auto &GV : M.globals()) {
if (GV.hasLocalLinkage() || GV.isDeclaration() ||
GV.hasAppendingLinkage() || !GV.hasName())
continue;// Ignore external globals and globals with internal linkage.
const GlobalValue *&GVEntry =
LinkedGlobalsMap[std::make_pair(GV.getName(), GV.getType())];
// If this is the first time we've seen this global, it is the canonical
// version.
if (!GVEntry) {
GVEntry = &GV;
continue;
}
// If the existing global is strong, never replace it.
if (GVEntry->hasExternalLinkage())
continue;
// Otherwise, we know it's linkonce/weak, replace it if this is a strong
// symbol. FIXME is this right for common?
if (GV.hasExternalLinkage() || GVEntry->hasExternalWeakLinkage())
GVEntry = &GV;
}
}
}
std::vector<const GlobalValue*> NonCanonicalGlobals;
for (unsigned m = 0, e = Modules.size(); m != e; ++m) {
Module &M = *Modules[m];
for (const auto &GV : M.globals()) {
// In the multi-module case, see what this global maps to.
if (!LinkedGlobalsMap.empty()) {
if (const GlobalValue *GVEntry =
LinkedGlobalsMap[std::make_pair(GV.getName(), GV.getType())]) {
// If something else is the canonical global, ignore this one.
if (GVEntry != &GV) {
NonCanonicalGlobals.push_back(&GV);
continue;
}
}
}
if (!GV.isDeclaration()) {
addGlobalMapping(&GV, getMemoryForGV(&GV));
} else {
// External variable reference. Try to use the dynamic loader to
// get a pointer to it.
if (void *SymAddr =
sys::DynamicLibrary::SearchForAddressOfSymbol(GV.getName()))
addGlobalMapping(&GV, SymAddr);
else {
report_fatal_error("Could not resolve external global address: "
+GV.getName());
}
}
}
// If there are multiple modules, map the non-canonical globals to their
// canonical location.
if (!NonCanonicalGlobals.empty()) {
for (unsigned i = 0, e = NonCanonicalGlobals.size(); i != e; ++i) {
const GlobalValue *GV = NonCanonicalGlobals[i];
const GlobalValue *CGV =
LinkedGlobalsMap[std::make_pair(GV->getName(), GV->getType())];
void *Ptr = getPointerToGlobalIfAvailable(CGV);
assert(Ptr && "Canonical global wasn't codegen'd!");
addGlobalMapping(GV, Ptr);
}
}
// Now that all of the globals are set up in memory, loop through them all
// and initialize their contents.
for (const auto &GV : M.globals()) {
if (!GV.isDeclaration()) {
if (!LinkedGlobalsMap.empty()) {
if (const GlobalValue *GVEntry =
LinkedGlobalsMap[std::make_pair(GV.getName(), GV.getType())])
if (GVEntry != &GV) // Not the canonical variable.
continue;
}
EmitGlobalVariable(&GV);
}
}
}
}
// EmitGlobalVariable - This method emits the specified global variable to the
// address specified in GlobalAddresses, or allocates new memory if it's not
// already in the map.
void ExecutionEngine::EmitGlobalVariable(const GlobalVariable *GV) {
void *GA = getPointerToGlobalIfAvailable(GV);
if (!GA) {
// If it's not already specified, allocate memory for the global.
GA = getMemoryForGV(GV);
// If we failed to allocate memory for this global, return.
if (!GA) return;
addGlobalMapping(GV, GA);
}
// Don't initialize if it's thread local, let the client do it.
if (!GV->isThreadLocal())
InitializeMemory(GV->getInitializer(), GA);
Type *ElTy = GV->getType()->getElementType();
size_t GVSize = (size_t)getDataLayout()->getTypeAllocSize(ElTy);
NumInitBytes += (unsigned)GVSize;
++NumGlobals;
}
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/ExecutionEngine/GDBRegistrationListener.cpp | //===----- GDBRegistrationListener.cpp - Registers objects with GDB -------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "llvm/ADT/DenseMap.h"
#include "llvm/ExecutionEngine/JITEventListener.h"
#include "llvm/Object/ObjectFile.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/ManagedStatic.h"
#include "llvm/Support/Mutex.h"
#include "llvm/Support/MutexGuard.h"
using namespace llvm;
using namespace llvm::object;
// This must be kept in sync with gdb/gdb/jit.h .
extern "C" {
typedef enum {
JIT_NOACTION = 0,
JIT_REGISTER_FN,
JIT_UNREGISTER_FN
} jit_actions_t;
struct jit_code_entry {
struct jit_code_entry *next_entry;
struct jit_code_entry *prev_entry;
const char *symfile_addr;
uint64_t symfile_size;
};
struct jit_descriptor {
uint32_t version;
// This should be jit_actions_t, but we want to be specific about the
// bit-width.
uint32_t action_flag;
struct jit_code_entry *relevant_entry;
struct jit_code_entry *first_entry;
};
// We put information about the JITed function in this global, which the
// debugger reads. Make sure to specify the version statically, because the
// debugger checks the version before we can set it during runtime.
struct jit_descriptor __jit_debug_descriptor = { 1, 0, nullptr, nullptr };
// Debuggers puts a breakpoint in this function.
LLVM_ATTRIBUTE_NOINLINE void __jit_debug_register_code() {
// The noinline and the asm prevent calls to this function from being
// optimized out.
#if !defined(_MSC_VER)
asm volatile("":::"memory");
#endif
}
}
namespace {
struct RegisteredObjectInfo {
RegisteredObjectInfo() {}
RegisteredObjectInfo(std::size_t Size, jit_code_entry *Entry,
OwningBinary<ObjectFile> Obj)
: Size(Size), Entry(Entry), Obj(std::move(Obj)) {}
RegisteredObjectInfo(RegisteredObjectInfo &&Other)
: Size(Other.Size), Entry(Other.Entry), Obj(std::move(Other.Obj)) {}
RegisteredObjectInfo& operator=(RegisteredObjectInfo &&Other) {
Size = Other.Size;
Entry = Other.Entry;
Obj = std::move(Other.Obj);
return *this;
}
std::size_t Size;
jit_code_entry *Entry;
OwningBinary<ObjectFile> Obj;
};
// Buffer for an in-memory object file in executable memory
typedef llvm::DenseMap< const char*, RegisteredObjectInfo>
RegisteredObjectBufferMap;
/// Global access point for the JIT debugging interface designed for use with a
/// singleton toolbox. Handles thread-safe registration and deregistration of
/// object files that are in executable memory managed by the client of this
/// class.
class GDBJITRegistrationListener : public JITEventListener {
/// A map of in-memory object files that have been registered with the
/// JIT interface.
RegisteredObjectBufferMap ObjectBufferMap;
public:
/// Instantiates the JIT service.
GDBJITRegistrationListener() : ObjectBufferMap() {}
/// Unregisters each object that was previously registered and releases all
/// internal resources.
~GDBJITRegistrationListener() override;
/// Creates an entry in the JIT registry for the buffer @p Object,
/// which must contain an object file in executable memory with any
/// debug information for the debugger.
void NotifyObjectEmitted(const ObjectFile &Object,
const RuntimeDyld::LoadedObjectInfo &L) override;
/// Removes the internal registration of @p Object, and
/// frees associated resources.
/// Returns true if @p Object was found in ObjectBufferMap.
void NotifyFreeingObject(const ObjectFile &Object) override;
private:
/// Deregister the debug info for the given object file from the debugger
/// and delete any temporary copies. This private method does not remove
/// the function from Map so that it can be called while iterating over Map.
void deregisterObjectInternal(RegisteredObjectBufferMap::iterator I);
};
/// Lock used to serialize all jit registration events, since they
/// modify global variables.
ManagedStatic<sys::Mutex> JITDebugLock;
/// Do the registration.
void NotifyDebugger(jit_code_entry* JITCodeEntry) {
__jit_debug_descriptor.action_flag = JIT_REGISTER_FN;
// Insert this entry at the head of the list.
JITCodeEntry->prev_entry = nullptr;
jit_code_entry* NextEntry = __jit_debug_descriptor.first_entry;
JITCodeEntry->next_entry = NextEntry;
if (NextEntry) {
NextEntry->prev_entry = JITCodeEntry;
}
__jit_debug_descriptor.first_entry = JITCodeEntry;
__jit_debug_descriptor.relevant_entry = JITCodeEntry;
__jit_debug_register_code();
}
GDBJITRegistrationListener::~GDBJITRegistrationListener() {
// Free all registered object files.
llvm::MutexGuard locked(*JITDebugLock);
for (RegisteredObjectBufferMap::iterator I = ObjectBufferMap.begin(),
E = ObjectBufferMap.end();
I != E; ++I) {
// Call the private method that doesn't update the map so our iterator
// doesn't break.
deregisterObjectInternal(I);
}
ObjectBufferMap.clear();
}
void GDBJITRegistrationListener::NotifyObjectEmitted(
const ObjectFile &Object,
const RuntimeDyld::LoadedObjectInfo &L) {
OwningBinary<ObjectFile> DebugObj = L.getObjectForDebug(Object);
// Bail out if debug objects aren't supported.
if (!DebugObj.getBinary())
return;
const char *Buffer = DebugObj.getBinary()->getMemoryBufferRef().getBufferStart();
size_t Size = DebugObj.getBinary()->getMemoryBufferRef().getBufferSize();
const char *Key = Object.getMemoryBufferRef().getBufferStart();
assert(Key && "Attempt to register a null object with a debugger.");
llvm::MutexGuard locked(*JITDebugLock);
assert(ObjectBufferMap.find(Key) == ObjectBufferMap.end() &&
"Second attempt to perform debug registration.");
jit_code_entry* JITCodeEntry = new jit_code_entry();
if (!JITCodeEntry) {
llvm::report_fatal_error(
"Allocation failed when registering a JIT entry!\n");
} else {
JITCodeEntry->symfile_addr = Buffer;
JITCodeEntry->symfile_size = Size;
ObjectBufferMap[Key] = RegisteredObjectInfo(Size, JITCodeEntry,
std::move(DebugObj));
NotifyDebugger(JITCodeEntry);
}
}
void GDBJITRegistrationListener::NotifyFreeingObject(const ObjectFile& Object) {
const char *Key = Object.getMemoryBufferRef().getBufferStart();
llvm::MutexGuard locked(*JITDebugLock);
RegisteredObjectBufferMap::iterator I = ObjectBufferMap.find(Key);
if (I != ObjectBufferMap.end()) {
deregisterObjectInternal(I);
ObjectBufferMap.erase(I);
}
}
void GDBJITRegistrationListener::deregisterObjectInternal(
RegisteredObjectBufferMap::iterator I) {
jit_code_entry*& JITCodeEntry = I->second.Entry;
// Do the unregistration.
{
__jit_debug_descriptor.action_flag = JIT_UNREGISTER_FN;
// Remove the jit_code_entry from the linked list.
jit_code_entry* PrevEntry = JITCodeEntry->prev_entry;
jit_code_entry* NextEntry = JITCodeEntry->next_entry;
if (NextEntry) {
NextEntry->prev_entry = PrevEntry;
}
if (PrevEntry) {
PrevEntry->next_entry = NextEntry;
}
else {
assert(__jit_debug_descriptor.first_entry == JITCodeEntry);
__jit_debug_descriptor.first_entry = NextEntry;
}
// Tell the debugger which entry we removed, and unregister the code.
__jit_debug_descriptor.relevant_entry = JITCodeEntry;
__jit_debug_register_code();
}
delete JITCodeEntry;
JITCodeEntry = nullptr;
}
llvm::ManagedStatic<GDBJITRegistrationListener> GDBRegListener;
} // end namespace
namespace llvm {
JITEventListener* JITEventListener::createGDBRegistrationListener() {
return &*GDBRegListener;
}
} // namespace llvm
|
0 | repos/DirectXShaderCompiler/lib/ExecutionEngine | repos/DirectXShaderCompiler/lib/ExecutionEngine/OProfileJIT/OProfileWrapper.cpp | //===-- OProfileWrapper.cpp - OProfile JIT API Wrapper implementation -----===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements the interface in OProfileWrapper.h. It is responsible
// for loading the opagent dynamic library when the first call to an op_
// function occurs.
//
//===----------------------------------------------------------------------===//
#include "llvm/ExecutionEngine/OProfileWrapper.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/DynamicLibrary.h"
#include "llvm/Support/Mutex.h"
#include "llvm/Support/MutexGuard.h"
#include "llvm/Support/raw_ostream.h"
#include <cstring>
#include <dirent.h>
#include <fcntl.h>
#include <sstream>
#include <stddef.h>
#include <sys/stat.h>
#include <unistd.h>
#define DEBUG_TYPE "oprofile-wrapper"
namespace {
// Global mutex to ensure a single thread initializes oprofile agent.
llvm::sys::Mutex OProfileInitializationMutex;
} // anonymous namespace
namespace llvm {
OProfileWrapper::OProfileWrapper()
: Agent(0),
OpenAgentFunc(0),
CloseAgentFunc(0),
WriteNativeCodeFunc(0),
WriteDebugLineInfoFunc(0),
UnloadNativeCodeFunc(0),
MajorVersionFunc(0),
MinorVersionFunc(0),
IsOProfileRunningFunc(0),
Initialized(false) {
}
bool OProfileWrapper::initialize() {
using namespace llvm;
using namespace llvm::sys;
MutexGuard Guard(OProfileInitializationMutex);
if (Initialized)
return OpenAgentFunc != 0;
Initialized = true;
// If the oprofile daemon is not running, don't load the opagent library
if (!isOProfileRunning()) {
DEBUG(dbgs() << "OProfile daemon is not detected.\n");
return false;
}
std::string error;
if(!DynamicLibrary::LoadLibraryPermanently("libopagent.so", &error)) {
DEBUG(dbgs()
<< "OProfile connector library libopagent.so could not be loaded: "
<< error << "\n");
}
// Get the addresses of the opagent functions
OpenAgentFunc = (op_open_agent_ptr_t)(intptr_t)
DynamicLibrary::SearchForAddressOfSymbol("op_open_agent");
CloseAgentFunc = (op_close_agent_ptr_t)(intptr_t)
DynamicLibrary::SearchForAddressOfSymbol("op_close_agent");
WriteNativeCodeFunc = (op_write_native_code_ptr_t)(intptr_t)
DynamicLibrary::SearchForAddressOfSymbol("op_write_native_code");
WriteDebugLineInfoFunc = (op_write_debug_line_info_ptr_t)(intptr_t)
DynamicLibrary::SearchForAddressOfSymbol("op_write_debug_line_info");
UnloadNativeCodeFunc = (op_unload_native_code_ptr_t)(intptr_t)
DynamicLibrary::SearchForAddressOfSymbol("op_unload_native_code");
MajorVersionFunc = (op_major_version_ptr_t)(intptr_t)
DynamicLibrary::SearchForAddressOfSymbol("op_major_version");
MinorVersionFunc = (op_major_version_ptr_t)(intptr_t)
DynamicLibrary::SearchForAddressOfSymbol("op_minor_version");
// With missing functions, we can do nothing
if (!OpenAgentFunc
|| !CloseAgentFunc
|| !WriteNativeCodeFunc
|| !WriteDebugLineInfoFunc
|| !UnloadNativeCodeFunc) {
OpenAgentFunc = 0;
CloseAgentFunc = 0;
WriteNativeCodeFunc = 0;
WriteDebugLineInfoFunc = 0;
UnloadNativeCodeFunc = 0;
return false;
}
return true;
}
bool OProfileWrapper::isOProfileRunning() {
if (IsOProfileRunningFunc != 0)
return IsOProfileRunningFunc();
return checkForOProfileProcEntry();
}
bool OProfileWrapper::checkForOProfileProcEntry() {
DIR* ProcDir;
ProcDir = opendir("/proc");
if (!ProcDir)
return false;
// Walk the /proc tree looking for the oprofile daemon
struct dirent* Entry;
while (0 != (Entry = readdir(ProcDir))) {
if (Entry->d_type == DT_DIR) {
// Build a path from the current entry name
SmallString<256> CmdLineFName;
raw_svector_ostream(CmdLineFName) << "/proc/" << Entry->d_name
<< "/cmdline";
// Open the cmdline file
int CmdLineFD = open(CmdLineFName.c_str(), S_IRUSR);
if (CmdLineFD != -1) {
char ExeName[PATH_MAX+1];
char* BaseName = 0;
// Read the cmdline file
ssize_t NumRead = read(CmdLineFD, ExeName, PATH_MAX+1);
close(CmdLineFD);
ssize_t Idx = 0;
if (ExeName[0] != '/') {
BaseName = ExeName;
}
// Find the terminator for the first string
while (Idx < NumRead-1 && ExeName[Idx] != 0) {
Idx++;
}
// Go back to the last non-null character
Idx--;
// Find the last path separator in the first string
while (Idx > 0) {
if (ExeName[Idx] == '/') {
BaseName = ExeName + Idx + 1;
break;
}
Idx--;
}
// Test this to see if it is the oprofile daemon
if (BaseName != 0 && (!strcmp("oprofiled", BaseName) ||
!strcmp("operf", BaseName))) {
// If it is, we're done
closedir(ProcDir);
return true;
}
}
}
}
// We've looked through all the files and didn't find the daemon
closedir(ProcDir);
return false;
}
bool OProfileWrapper::op_open_agent() {
if (!Initialized)
initialize();
if (OpenAgentFunc != 0) {
Agent = OpenAgentFunc();
return Agent != 0;
}
return false;
}
int OProfileWrapper::op_close_agent() {
if (!Initialized)
initialize();
int ret = -1;
if (Agent && CloseAgentFunc) {
ret = CloseAgentFunc(Agent);
if (ret == 0) {
Agent = 0;
}
}
return ret;
}
bool OProfileWrapper::isAgentAvailable() {
return Agent != 0;
}
int OProfileWrapper::op_write_native_code(const char* Name,
uint64_t Addr,
void const* Code,
const unsigned int Size) {
if (!Initialized)
initialize();
if (Agent && WriteNativeCodeFunc)
return WriteNativeCodeFunc(Agent, Name, Addr, Code, Size);
return -1;
}
int OProfileWrapper::op_write_debug_line_info(
void const* Code,
size_t NumEntries,
struct debug_line_info const* Info) {
if (!Initialized)
initialize();
if (Agent && WriteDebugLineInfoFunc)
return WriteDebugLineInfoFunc(Agent, Code, NumEntries, Info);
return -1;
}
int OProfileWrapper::op_major_version() {
if (!Initialized)
initialize();
if (Agent && MajorVersionFunc)
return MajorVersionFunc();
return -1;
}
int OProfileWrapper::op_minor_version() {
if (!Initialized)
initialize();
if (Agent && MinorVersionFunc)
return MinorVersionFunc();
return -1;
}
int OProfileWrapper::op_unload_native_code(uint64_t Addr) {
if (!Initialized)
initialize();
if (Agent && UnloadNativeCodeFunc)
return UnloadNativeCodeFunc(Agent, Addr);
return -1;
}
} // namespace llvm
|
0 | repos/DirectXShaderCompiler/lib/ExecutionEngine | repos/DirectXShaderCompiler/lib/ExecutionEngine/OProfileJIT/CMakeLists.txt |
include_directories( ${LLVM_OPROFILE_DIR} ${CMAKE_CURRENT_SOURCE_DIR}/.. )
add_llvm_library(LLVMOProfileJIT
OProfileJITEventListener.cpp
OProfileWrapper.cpp
)
|
0 | repos/DirectXShaderCompiler/lib/ExecutionEngine | repos/DirectXShaderCompiler/lib/ExecutionEngine/OProfileJIT/LLVMBuild.txt | ;===- ./lib/ExecutionEngine/OProfileJIT/LLVMBuild.txt ----------*- Conf -*--===;
;
; The LLVM Compiler Infrastructure
;
; This file is distributed under the University of Illinois Open Source
; License. See LICENSE.TXT for details.
;
;===------------------------------------------------------------------------===;
;
; This is an LLVMBuild description file for the components in this subdirectory.
;
; For more information on the LLVMBuild system, please see:
;
; http://llvm.org/docs/LLVMBuild.html
;
;===------------------------------------------------------------------------===;
[common]
[component_0]
type = OptionalLibrary
name = OProfileJIT
parent = ExecutionEngine
required_libraries = Support Object ExecutionEngine
|
0 | repos/DirectXShaderCompiler/lib/ExecutionEngine | repos/DirectXShaderCompiler/lib/ExecutionEngine/OProfileJIT/OProfileJITEventListener.cpp | //===-- OProfileJITEventListener.cpp - Tell OProfile about JITted code ----===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines a JITEventListener object that uses OProfileWrapper to tell
// oprofile about JITted functions, including source line information.
//
//===----------------------------------------------------------------------===//
#include "llvm/Config/config.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/ExecutionEngine/JITEventListener.h"
#include "llvm/ExecutionEngine/OProfileWrapper.h"
#include "llvm/ExecutionEngine/RuntimeDyld.h"
#include "llvm/IR/DebugInfo.h"
#include "llvm/IR/Function.h"
#include "llvm/Object/ObjectFile.h"
#include "llvm/Object/SymbolSize.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/Errno.h"
#include "llvm/Support/raw_ostream.h"
#include <dirent.h>
#include <fcntl.h>
using namespace llvm;
using namespace llvm::object;
#define DEBUG_TYPE "oprofile-jit-event-listener"
namespace {
class OProfileJITEventListener : public JITEventListener {
std::unique_ptr<OProfileWrapper> Wrapper;
void initialize();
std::map<const char*, OwningBinary<ObjectFile>> DebugObjects;
public:
OProfileJITEventListener(std::unique_ptr<OProfileWrapper> LibraryWrapper)
: Wrapper(std::move(LibraryWrapper)) {
initialize();
}
~OProfileJITEventListener();
void NotifyObjectEmitted(const ObjectFile &Obj,
const RuntimeDyld::LoadedObjectInfo &L) override;
void NotifyFreeingObject(const ObjectFile &Obj) override;
};
void OProfileJITEventListener::initialize() {
if (!Wrapper->op_open_agent()) {
const std::string err_str = sys::StrError();
DEBUG(dbgs() << "Failed to connect to OProfile agent: " << err_str << "\n");
} else {
DEBUG(dbgs() << "Connected to OProfile agent.\n");
}
}
OProfileJITEventListener::~OProfileJITEventListener() {
if (Wrapper->isAgentAvailable()) {
if (Wrapper->op_close_agent() == -1) {
const std::string err_str = sys::StrError();
DEBUG(dbgs() << "Failed to disconnect from OProfile agent: "
<< err_str << "\n");
} else {
DEBUG(dbgs() << "Disconnected from OProfile agent.\n");
}
}
}
void OProfileJITEventListener::NotifyObjectEmitted(
const ObjectFile &Obj,
const RuntimeDyld::LoadedObjectInfo &L) {
if (!Wrapper->isAgentAvailable()) {
return;
}
OwningBinary<ObjectFile> DebugObjOwner = L.getObjectForDebug(Obj);
const ObjectFile &DebugObj = *DebugObjOwner.getBinary();
// Use symbol info to iterate functions in the object.
for (const std::pair<SymbolRef, uint64_t> &P : computeSymbolSizes(DebugObj)) {
SymbolRef Sym = P.first;
if (Sym.getType() != SymbolRef::ST_Function)
continue;
ErrorOr<StringRef> NameOrErr = Sym.getName();
if (NameOrErr.getError())
continue;
StringRef Name = *NameOrErr;
ErrorOr<uint64_t> AddrOrErr = Sym.getAddress();
if (AddrOrErr.getError())
continue;
uint64_t Addr = *AddrOrErr;
uint64_t Size = P.second;
if (Wrapper->op_write_native_code(Name.data(), Addr, (void *)Addr, Size) ==
-1) {
DEBUG(dbgs() << "Failed to tell OProfile about native function " << Name
<< " at [" << (void *)Addr << "-" << ((char *)Addr + Size)
<< "]\n");
continue;
}
// TODO: support line number info (similar to IntelJITEventListener.cpp)
}
DebugObjects[Obj.getData().data()] = std::move(DebugObjOwner);
}
void OProfileJITEventListener::NotifyFreeingObject(const ObjectFile &Obj) {
if (Wrapper->isAgentAvailable()) {
// If there was no agent registered when the original object was loaded then
// we won't have created a debug object for it, so bail out.
if (DebugObjects.find(Obj.getData().data()) == DebugObjects.end())
return;
const ObjectFile &DebugObj = *DebugObjects[Obj.getData().data()].getBinary();
// Use symbol info to iterate functions in the object.
for (symbol_iterator I = DebugObj.symbol_begin(),
E = DebugObj.symbol_end();
I != E; ++I) {
if (I->getType() == SymbolRef::ST_Function) {
ErrorOr<uint64_t> AddrOrErr = I->getAddress();
if (AddrOrErr.getError())
continue;
uint64_t Addr = *AddrOrErr;
if (Wrapper->op_unload_native_code(Addr) == -1) {
DEBUG(dbgs()
<< "Failed to tell OProfile about unload of native function at "
<< (void*)Addr << "\n");
continue;
}
}
}
}
DebugObjects.erase(Obj.getData().data());
}
} // anonymous namespace.
namespace llvm {
JITEventListener *JITEventListener::createOProfileJITEventListener() {
return new OProfileJITEventListener(llvm::make_unique<OProfileWrapper>());
}
} // namespace llvm
|
0 | repos/DirectXShaderCompiler/lib/ExecutionEngine | repos/DirectXShaderCompiler/lib/ExecutionEngine/MCJIT/MCJIT.cpp | //===-- MCJIT.cpp - MC-based Just-in-Time Compiler ------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "MCJIT.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ExecutionEngine/GenericValue.h"
#include "llvm/ExecutionEngine/JITEventListener.h"
#include "llvm/ExecutionEngine/MCJIT.h"
#include "llvm/ExecutionEngine/SectionMemoryManager.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/LegacyPassManager.h"
#include "llvm/IR/Mangler.h"
#include "llvm/IR/Module.h"
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/Object/Archive.h"
#include "llvm/Object/ObjectFile.h"
#include "llvm/Support/DynamicLibrary.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/MutexGuard.h"
using namespace llvm;
void ObjectCache::anchor() {}
namespace {
static struct RegisterJIT {
RegisterJIT() { MCJIT::Register(); }
} JITRegistrator;
}
extern "C" void LLVMLinkInMCJIT() {
}
ExecutionEngine*
MCJIT::createJIT(std::unique_ptr<Module> M,
std::string *ErrorStr,
std::shared_ptr<MCJITMemoryManager> MemMgr,
std::shared_ptr<RuntimeDyld::SymbolResolver> Resolver,
std::unique_ptr<TargetMachine> TM) {
// Try to register the program as a source of symbols to resolve against.
//
// FIXME: Don't do this here.
sys::DynamicLibrary::LoadLibraryPermanently(nullptr, nullptr);
if (!MemMgr || !Resolver) {
auto RTDyldMM = std::make_shared<SectionMemoryManager>();
if (!MemMgr)
MemMgr = RTDyldMM;
if (!Resolver)
Resolver = RTDyldMM;
}
return new MCJIT(std::move(M), std::move(TM), std::move(MemMgr),
std::move(Resolver));
}
MCJIT::MCJIT(std::unique_ptr<Module> M, std::unique_ptr<TargetMachine> tm,
std::shared_ptr<MCJITMemoryManager> MemMgr,
std::shared_ptr<RuntimeDyld::SymbolResolver> Resolver)
: ExecutionEngine(std::move(M)), TM(std::move(tm)), Ctx(nullptr),
MemMgr(std::move(MemMgr)), Resolver(*this, std::move(Resolver)),
Dyld(*this->MemMgr, this->Resolver), ObjCache(nullptr) {
// FIXME: We are managing our modules, so we do not want the base class
// ExecutionEngine to manage them as well. To avoid double destruction
// of the first (and only) module added in ExecutionEngine constructor
// we remove it from EE and will destruct it ourselves.
//
// It may make sense to move our module manager (based on SmallStPtr) back
// into EE if the JIT and Interpreter can live with it.
// If so, additional functions: addModule, removeModule, FindFunctionNamed,
// runStaticConstructorsDestructors could be moved back to EE as well.
//
std::unique_ptr<Module> First = std::move(Modules[0]);
Modules.clear();
OwnedModules.addModule(std::move(First));
setDataLayout(TM->getDataLayout());
RegisterJITEventListener(JITEventListener::createGDBRegistrationListener());
}
MCJIT::~MCJIT() {
MutexGuard locked(lock);
Dyld.deregisterEHFrames();
for (auto &Obj : LoadedObjects)
if (Obj)
NotifyFreeingObject(*Obj);
Archives.clear();
}
void MCJIT::addModule(std::unique_ptr<Module> M) {
MutexGuard locked(lock);
OwnedModules.addModule(std::move(M));
}
bool MCJIT::removeModule(Module *M) {
MutexGuard locked(lock);
return OwnedModules.removeModule(M);
}
void MCJIT::addObjectFile(std::unique_ptr<object::ObjectFile> Obj) {
std::unique_ptr<RuntimeDyld::LoadedObjectInfo> L = Dyld.loadObject(*Obj);
if (Dyld.hasError())
report_fatal_error(Dyld.getErrorString());
NotifyObjectEmitted(*Obj, *L);
LoadedObjects.push_back(std::move(Obj));
}
void MCJIT::addObjectFile(object::OwningBinary<object::ObjectFile> Obj) {
std::unique_ptr<object::ObjectFile> ObjFile;
std::unique_ptr<MemoryBuffer> MemBuf;
std::tie(ObjFile, MemBuf) = Obj.takeBinary();
addObjectFile(std::move(ObjFile));
Buffers.push_back(std::move(MemBuf));
}
void MCJIT::addArchive(object::OwningBinary<object::Archive> A) {
Archives.push_back(std::move(A));
}
void MCJIT::setObjectCache(ObjectCache* NewCache) {
MutexGuard locked(lock);
ObjCache = NewCache;
}
std::unique_ptr<MemoryBuffer> MCJIT::emitObject(Module *M) {
MutexGuard locked(lock);
// This must be a module which has already been added but not loaded to this
// MCJIT instance, since these conditions are tested by our caller,
// generateCodeForModule.
legacy::PassManager PM;
// The RuntimeDyld will take ownership of this shortly
SmallVector<char, 4096> ObjBufferSV;
raw_svector_ostream ObjStream(ObjBufferSV);
// Turn the machine code intermediate representation into bytes in memory
// that may be executed.
if (TM->addPassesToEmitMC(PM, Ctx, ObjStream, !getVerifyModules()))
report_fatal_error("Target does not support MC emission!");
// Initialize passes.
PM.run(*M);
// Flush the output buffer to get the generated code into memory
ObjStream.flush();
std::unique_ptr<MemoryBuffer> CompiledObjBuffer(
new ObjectMemoryBuffer(std::move(ObjBufferSV)));
// If we have an object cache, tell it about the new object.
// Note that we're using the compiled image, not the loaded image (as below).
if (ObjCache) {
// MemoryBuffer is a thin wrapper around the actual memory, so it's OK
// to create a temporary object here and delete it after the call.
MemoryBufferRef MB = CompiledObjBuffer->getMemBufferRef();
ObjCache->notifyObjectCompiled(M, MB);
}
return CompiledObjBuffer;
}
void MCJIT::generateCodeForModule(Module *M) {
// Get a thread lock to make sure we aren't trying to load multiple times
MutexGuard locked(lock);
// This must be a module which has already been added to this MCJIT instance.
assert(OwnedModules.ownsModule(M) &&
"MCJIT::generateCodeForModule: Unknown module.");
// Re-compilation is not supported
if (OwnedModules.hasModuleBeenLoaded(M))
return;
std::unique_ptr<MemoryBuffer> ObjectToLoad;
// Try to load the pre-compiled object from cache if possible
if (ObjCache)
ObjectToLoad = ObjCache->getObject(M);
M->setDataLayout(*TM->getDataLayout());
// If the cache did not contain a suitable object, compile the object
if (!ObjectToLoad) {
ObjectToLoad = emitObject(M);
assert(ObjectToLoad && "Compilation did not produce an object.");
}
// Load the object into the dynamic linker.
// MCJIT now owns the ObjectImage pointer (via its LoadedObjects list).
ErrorOr<std::unique_ptr<object::ObjectFile>> LoadedObject =
object::ObjectFile::createObjectFile(ObjectToLoad->getMemBufferRef());
std::unique_ptr<RuntimeDyld::LoadedObjectInfo> L =
Dyld.loadObject(*LoadedObject.get());
if (Dyld.hasError())
report_fatal_error(Dyld.getErrorString());
NotifyObjectEmitted(*LoadedObject.get(), *L);
Buffers.push_back(std::move(ObjectToLoad));
LoadedObjects.push_back(std::move(*LoadedObject));
OwnedModules.markModuleAsLoaded(M);
}
void MCJIT::finalizeLoadedModules() {
MutexGuard locked(lock);
// Resolve any outstanding relocations.
Dyld.resolveRelocations();
OwnedModules.markAllLoadedModulesAsFinalized();
// Register EH frame data for any module we own which has been loaded
Dyld.registerEHFrames();
// Set page permissions.
MemMgr->finalizeMemory();
}
// FIXME: Rename this.
void MCJIT::finalizeObject() {
MutexGuard locked(lock);
// Generate code for module is going to move objects out of the 'added' list,
// so we need to copy that out before using it:
SmallVector<Module*, 16> ModsToAdd;
for (auto M : OwnedModules.added())
ModsToAdd.push_back(M);
for (auto M : ModsToAdd)
generateCodeForModule(M);
finalizeLoadedModules();
}
void MCJIT::finalizeModule(Module *M) {
MutexGuard locked(lock);
// This must be a module which has already been added to this MCJIT instance.
assert(OwnedModules.ownsModule(M) && "MCJIT::finalizeModule: Unknown module.");
// If the module hasn't been compiled, just do that.
if (!OwnedModules.hasModuleBeenLoaded(M))
generateCodeForModule(M);
finalizeLoadedModules();
}
RuntimeDyld::SymbolInfo MCJIT::findExistingSymbol(const std::string &Name) {
SmallString<128> FullName;
Mangler::getNameWithPrefix(FullName, Name, *TM->getDataLayout());
if (void *Addr = getPointerToGlobalIfAvailable(FullName))
return RuntimeDyld::SymbolInfo(static_cast<uint64_t>(
reinterpret_cast<uintptr_t>(Addr)),
JITSymbolFlags::Exported);
return Dyld.getSymbol(FullName);
}
Module *MCJIT::findModuleForSymbol(const std::string &Name,
bool CheckFunctionsOnly) {
MutexGuard locked(lock);
// If it hasn't already been generated, see if it's in one of our modules.
for (ModulePtrSet::iterator I = OwnedModules.begin_added(),
E = OwnedModules.end_added();
I != E; ++I) {
Module *M = *I;
Function *F = M->getFunction(Name);
if (F && !F->isDeclaration())
return M;
if (!CheckFunctionsOnly) {
GlobalVariable *G = M->getGlobalVariable(Name);
if (G && !G->isDeclaration())
return M;
// FIXME: Do we need to worry about global aliases?
}
}
// We didn't find the symbol in any of our modules.
return nullptr;
}
uint64_t MCJIT::getSymbolAddress(const std::string &Name,
bool CheckFunctionsOnly) {
return findSymbol(Name, CheckFunctionsOnly).getAddress();
}
RuntimeDyld::SymbolInfo MCJIT::findSymbol(const std::string &Name,
bool CheckFunctionsOnly) {
MutexGuard locked(lock);
// First, check to see if we already have this symbol.
if (auto Sym = findExistingSymbol(Name))
return Sym;
for (object::OwningBinary<object::Archive> &OB : Archives) {
object::Archive *A = OB.getBinary();
// Look for our symbols in each Archive
object::Archive::child_iterator ChildIt = A->findSym(Name);
if (ChildIt != A->child_end()) {
// FIXME: Support nested archives?
ErrorOr<std::unique_ptr<object::Binary>> ChildBinOrErr =
ChildIt->getAsBinary();
if (ChildBinOrErr.getError())
continue;
std::unique_ptr<object::Binary> &ChildBin = ChildBinOrErr.get();
if (ChildBin->isObject()) {
std::unique_ptr<object::ObjectFile> OF(
static_cast<object::ObjectFile *>(ChildBin.release()));
// This causes the object file to be loaded.
addObjectFile(std::move(OF));
// The address should be here now.
if (auto Sym = findExistingSymbol(Name))
return Sym;
}
}
}
// If it hasn't already been generated, see if it's in one of our modules.
Module *M = findModuleForSymbol(Name, CheckFunctionsOnly);
if (M) {
generateCodeForModule(M);
// Check the RuntimeDyld table again, it should be there now.
return findExistingSymbol(Name);
}
// If a LazyFunctionCreator is installed, use it to get/create the function.
// FIXME: Should we instead have a LazySymbolCreator callback?
if (LazyFunctionCreator) {
auto Addr = static_cast<uint64_t>(
reinterpret_cast<uintptr_t>(LazyFunctionCreator(Name)));
return RuntimeDyld::SymbolInfo(Addr, JITSymbolFlags::Exported);
}
return nullptr;
}
uint64_t MCJIT::getGlobalValueAddress(const std::string &Name) {
MutexGuard locked(lock);
uint64_t Result = getSymbolAddress(Name, false);
if (Result != 0)
finalizeLoadedModules();
return Result;
}
uint64_t MCJIT::getFunctionAddress(const std::string &Name) {
MutexGuard locked(lock);
uint64_t Result = getSymbolAddress(Name, true);
if (Result != 0)
finalizeLoadedModules();
return Result;
}
// Deprecated. Use getFunctionAddress instead.
void *MCJIT::getPointerToFunction(Function *F) {
MutexGuard locked(lock);
Mangler Mang;
SmallString<128> Name;
TM->getNameWithPrefix(Name, F, Mang);
if (F->isDeclaration() || F->hasAvailableExternallyLinkage()) {
bool AbortOnFailure = !F->hasExternalWeakLinkage();
void *Addr = getPointerToNamedFunction(Name, AbortOnFailure);
updateGlobalMapping(F, Addr);
return Addr;
}
Module *M = F->getParent();
bool HasBeenAddedButNotLoaded = OwnedModules.hasModuleBeenAddedButNotLoaded(M);
// Make sure the relevant module has been compiled and loaded.
if (HasBeenAddedButNotLoaded)
generateCodeForModule(M);
else if (!OwnedModules.hasModuleBeenLoaded(M)) {
// If this function doesn't belong to one of our modules, we're done.
// FIXME: Asking for the pointer to a function that hasn't been registered,
// and isn't a declaration (which is handled above) should probably
// be an assertion.
return nullptr;
}
// FIXME: Should the Dyld be retaining module information? Probably not.
//
// This is the accessor for the target address, so make sure to check the
// load address of the symbol, not the local address.
return (void*)Dyld.getSymbol(Name).getAddress();
}
void MCJIT::runStaticConstructorsDestructorsInModulePtrSet(
bool isDtors, ModulePtrSet::iterator I, ModulePtrSet::iterator E) {
for (; I != E; ++I) {
ExecutionEngine::runStaticConstructorsDestructors(**I, isDtors);
}
}
void MCJIT::runStaticConstructorsDestructors(bool isDtors) {
// Execute global ctors/dtors for each module in the program.
runStaticConstructorsDestructorsInModulePtrSet(
isDtors, OwnedModules.begin_added(), OwnedModules.end_added());
runStaticConstructorsDestructorsInModulePtrSet(
isDtors, OwnedModules.begin_loaded(), OwnedModules.end_loaded());
runStaticConstructorsDestructorsInModulePtrSet(
isDtors, OwnedModules.begin_finalized(), OwnedModules.end_finalized());
}
Function *MCJIT::FindFunctionNamedInModulePtrSet(const char *FnName,
ModulePtrSet::iterator I,
ModulePtrSet::iterator E) {
for (; I != E; ++I) {
Function *F = (*I)->getFunction(FnName);
if (F && !F->isDeclaration())
return F;
}
return nullptr;
}
GlobalVariable *MCJIT::FindGlobalVariableNamedInModulePtrSet(const char *Name,
bool AllowInternal,
ModulePtrSet::iterator I,
ModulePtrSet::iterator E) {
for (; I != E; ++I) {
GlobalVariable *GV = (*I)->getGlobalVariable(Name, AllowInternal);
if (GV && !GV->isDeclaration())
return GV;
}
return nullptr;
}
Function *MCJIT::FindFunctionNamed(const char *FnName) {
Function *F = FindFunctionNamedInModulePtrSet(
FnName, OwnedModules.begin_added(), OwnedModules.end_added());
if (!F)
F = FindFunctionNamedInModulePtrSet(FnName, OwnedModules.begin_loaded(),
OwnedModules.end_loaded());
if (!F)
F = FindFunctionNamedInModulePtrSet(FnName, OwnedModules.begin_finalized(),
OwnedModules.end_finalized());
return F;
}
GlobalVariable *MCJIT::FindGlobalVariableNamed(const char *Name, bool AllowInternal) {
GlobalVariable *GV = FindGlobalVariableNamedInModulePtrSet(
Name, AllowInternal, OwnedModules.begin_added(), OwnedModules.end_added());
if (!GV)
GV = FindGlobalVariableNamedInModulePtrSet(Name, AllowInternal, OwnedModules.begin_loaded(),
OwnedModules.end_loaded());
if (!GV)
GV = FindGlobalVariableNamedInModulePtrSet(Name, AllowInternal, OwnedModules.begin_finalized(),
OwnedModules.end_finalized());
return GV;
}
GenericValue MCJIT::runFunction(Function *F, ArrayRef<GenericValue> ArgValues) {
assert(F && "Function *F was null at entry to run()");
void *FPtr = getPointerToFunction(F);
assert(FPtr && "Pointer to fn's code was null after getPointerToFunction");
FunctionType *FTy = F->getFunctionType();
Type *RetTy = FTy->getReturnType();
assert((FTy->getNumParams() == ArgValues.size() ||
(FTy->isVarArg() && FTy->getNumParams() <= ArgValues.size())) &&
"Wrong number of arguments passed into function!");
assert(FTy->getNumParams() == ArgValues.size() &&
"This doesn't support passing arguments through varargs (yet)!");
// Handle some common cases first. These cases correspond to common `main'
// prototypes.
if (RetTy->isIntegerTy(32) || RetTy->isVoidTy()) {
switch (ArgValues.size()) {
case 3:
if (FTy->getParamType(0)->isIntegerTy(32) &&
FTy->getParamType(1)->isPointerTy() &&
FTy->getParamType(2)->isPointerTy()) {
int (*PF)(int, char **, const char **) =
(int(*)(int, char **, const char **))(intptr_t)FPtr;
// Call the function.
GenericValue rv;
rv.IntVal = APInt(32, PF(ArgValues[0].IntVal.getZExtValue(),
(char **)GVTOP(ArgValues[1]),
(const char **)GVTOP(ArgValues[2])));
return rv;
}
break;
case 2:
if (FTy->getParamType(0)->isIntegerTy(32) &&
FTy->getParamType(1)->isPointerTy()) {
int (*PF)(int, char **) = (int(*)(int, char **))(intptr_t)FPtr;
// Call the function.
GenericValue rv;
rv.IntVal = APInt(32, PF(ArgValues[0].IntVal.getZExtValue(),
(char **)GVTOP(ArgValues[1])));
return rv;
}
break;
case 1:
if (FTy->getNumParams() == 1 &&
FTy->getParamType(0)->isIntegerTy(32)) {
GenericValue rv;
int (*PF)(int) = (int(*)(int))(intptr_t)FPtr;
rv.IntVal = APInt(32, PF(ArgValues[0].IntVal.getZExtValue()));
return rv;
}
break;
}
}
// Handle cases where no arguments are passed first.
if (ArgValues.empty()) {
GenericValue rv;
switch (RetTy->getTypeID()) {
default: llvm_unreachable("Unknown return type for function call!");
case Type::IntegerTyID: {
unsigned BitWidth = cast<IntegerType>(RetTy)->getBitWidth();
if (BitWidth == 1)
rv.IntVal = APInt(BitWidth, ((bool(*)())(intptr_t)FPtr)());
else if (BitWidth <= 8)
rv.IntVal = APInt(BitWidth, ((char(*)())(intptr_t)FPtr)());
else if (BitWidth <= 16)
rv.IntVal = APInt(BitWidth, ((short(*)())(intptr_t)FPtr)());
else if (BitWidth <= 32)
rv.IntVal = APInt(BitWidth, ((int(*)())(intptr_t)FPtr)());
else if (BitWidth <= 64)
rv.IntVal = APInt(BitWidth, ((int64_t(*)())(intptr_t)FPtr)());
else
llvm_unreachable("Integer types > 64 bits not supported");
return rv;
}
case Type::VoidTyID:
rv.IntVal = APInt(32, ((int(*)())(intptr_t)FPtr)());
return rv;
case Type::FloatTyID:
rv.FloatVal = ((float(*)())(intptr_t)FPtr)();
return rv;
case Type::DoubleTyID:
rv.DoubleVal = ((double(*)())(intptr_t)FPtr)();
return rv;
case Type::X86_FP80TyID:
case Type::FP128TyID:
case Type::PPC_FP128TyID:
llvm_unreachable("long double not supported yet");
case Type::PointerTyID:
return PTOGV(((void*(*)())(intptr_t)FPtr)());
}
}
llvm_unreachable("Full-featured argument passing not supported yet!");
}
void *MCJIT::getPointerToNamedFunction(StringRef Name, bool AbortOnFailure) {
if (!isSymbolSearchingDisabled()) {
void *ptr =
reinterpret_cast<void*>(
static_cast<uintptr_t>(Resolver.findSymbol(Name).getAddress()));
if (ptr)
return ptr;
}
/// If a LazyFunctionCreator is installed, use it to get/create the function.
if (LazyFunctionCreator)
if (void *RP = LazyFunctionCreator(Name))
return RP;
if (AbortOnFailure) {
report_fatal_error("Program used external function '"+Name+
"' which could not be resolved!");
}
return nullptr;
}
void MCJIT::RegisterJITEventListener(JITEventListener *L) {
if (!L)
return;
MutexGuard locked(lock);
EventListeners.push_back(L);
}
void MCJIT::UnregisterJITEventListener(JITEventListener *L) {
if (!L)
return;
MutexGuard locked(lock);
auto I = std::find(EventListeners.rbegin(), EventListeners.rend(), L);
if (I != EventListeners.rend()) {
std::swap(*I, EventListeners.back());
EventListeners.pop_back();
}
}
void MCJIT::NotifyObjectEmitted(const object::ObjectFile& Obj,
const RuntimeDyld::LoadedObjectInfo &L) {
MutexGuard locked(lock);
MemMgr->notifyObjectLoaded(this, Obj);
for (unsigned I = 0, S = EventListeners.size(); I < S; ++I) {
EventListeners[I]->NotifyObjectEmitted(Obj, L);
}
}
void MCJIT::NotifyFreeingObject(const object::ObjectFile& Obj) {
MutexGuard locked(lock);
for (JITEventListener *L : EventListeners)
L->NotifyFreeingObject(Obj);
}
RuntimeDyld::SymbolInfo
LinkingSymbolResolver::findSymbol(const std::string &Name) {
auto Result = ParentEngine.findSymbol(Name, false);
// If the symbols wasn't found and it begins with an underscore, try again
// without the underscore.
if (!Result && Name[0] == '_')
Result = ParentEngine.findSymbol(Name.substr(1), false);
if (Result)
return Result;
if (ParentEngine.isSymbolSearchingDisabled())
return nullptr;
return ClientResolver->findSymbol(Name);
}
|
0 | repos/DirectXShaderCompiler/lib/ExecutionEngine | repos/DirectXShaderCompiler/lib/ExecutionEngine/MCJIT/MCJIT.h | //===-- MCJIT.h - Class definition for the MCJIT ----------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_LIB_EXECUTIONENGINE_MCJIT_MCJIT_H
#define LLVM_LIB_EXECUTIONENGINE_MCJIT_MCJIT_H
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ExecutionEngine/ExecutionEngine.h"
#include "llvm/ExecutionEngine/ObjectCache.h"
#include "llvm/ExecutionEngine/ObjectMemoryBuffer.h"
#include "llvm/ExecutionEngine/RTDyldMemoryManager.h"
#include "llvm/ExecutionEngine/RuntimeDyld.h"
#include "llvm/IR/Module.h"
namespace llvm {
class MCJIT;
// This is a helper class that the MCJIT execution engine uses for linking
// functions across modules that it owns. It aggregates the memory manager
// that is passed in to the MCJIT constructor and defers most functionality
// to that object.
class LinkingSymbolResolver : public RuntimeDyld::SymbolResolver {
public:
LinkingSymbolResolver(MCJIT &Parent,
std::shared_ptr<RuntimeDyld::SymbolResolver> Resolver)
: ParentEngine(Parent), ClientResolver(std::move(Resolver)) {}
RuntimeDyld::SymbolInfo findSymbol(const std::string &Name) override;
// MCJIT doesn't support logical dylibs.
RuntimeDyld::SymbolInfo
findSymbolInLogicalDylib(const std::string &Name) override {
return nullptr;
}
private:
MCJIT &ParentEngine;
std::shared_ptr<RuntimeDyld::SymbolResolver> ClientResolver;
};
// About Module states: added->loaded->finalized.
//
// The purpose of the "added" state is having modules in standby. (added=known
// but not compiled). The idea is that you can add a module to provide function
// definitions but if nothing in that module is referenced by a module in which
// a function is executed (note the wording here because it's not exactly the
// ideal case) then the module never gets compiled. This is sort of lazy
// compilation.
//
// The purpose of the "loaded" state (loaded=compiled and required sections
// copied into local memory but not yet ready for execution) is to have an
// intermediate state wherein clients can remap the addresses of sections, using
// MCJIT::mapSectionAddress, (in preparation for later copying to a new location
// or an external process) before relocations and page permissions are applied.
//
// It might not be obvious at first glance, but the "remote-mcjit" case in the
// lli tool does this. In that case, the intermediate action is taken by the
// RemoteMemoryManager in response to the notifyObjectLoaded function being
// called.
class MCJIT : public ExecutionEngine {
MCJIT(std::unique_ptr<Module> M, std::unique_ptr<TargetMachine> tm,
std::shared_ptr<MCJITMemoryManager> MemMgr,
std::shared_ptr<RuntimeDyld::SymbolResolver> Resolver);
typedef llvm::SmallPtrSet<Module *, 4> ModulePtrSet;
class OwningModuleContainer {
public:
OwningModuleContainer() {
}
~OwningModuleContainer() {
freeModulePtrSet(AddedModules);
freeModulePtrSet(LoadedModules);
freeModulePtrSet(FinalizedModules);
}
ModulePtrSet::iterator begin_added() { return AddedModules.begin(); }
ModulePtrSet::iterator end_added() { return AddedModules.end(); }
iterator_range<ModulePtrSet::iterator> added() {
return iterator_range<ModulePtrSet::iterator>(begin_added(), end_added());
}
ModulePtrSet::iterator begin_loaded() { return LoadedModules.begin(); }
ModulePtrSet::iterator end_loaded() { return LoadedModules.end(); }
ModulePtrSet::iterator begin_finalized() { return FinalizedModules.begin(); }
ModulePtrSet::iterator end_finalized() { return FinalizedModules.end(); }
void addModule(std::unique_ptr<Module> M) {
AddedModules.insert(M.release());
}
bool removeModule(Module *M) {
return AddedModules.erase(M) || LoadedModules.erase(M) ||
FinalizedModules.erase(M);
}
bool hasModuleBeenAddedButNotLoaded(Module *M) {
return AddedModules.count(M) != 0;
}
bool hasModuleBeenLoaded(Module *M) {
// If the module is in either the "loaded" or "finalized" sections it
// has been loaded.
return (LoadedModules.count(M) != 0 ) || (FinalizedModules.count(M) != 0);
}
bool hasModuleBeenFinalized(Module *M) {
return FinalizedModules.count(M) != 0;
}
bool ownsModule(Module* M) {
return (AddedModules.count(M) != 0) || (LoadedModules.count(M) != 0) ||
(FinalizedModules.count(M) != 0);
}
void markModuleAsLoaded(Module *M) {
// This checks against logic errors in the MCJIT implementation.
// This function should never be called with either a Module that MCJIT
// does not own or a Module that has already been loaded and/or finalized.
assert(AddedModules.count(M) &&
"markModuleAsLoaded: Module not found in AddedModules");
// Remove the module from the "Added" set.
AddedModules.erase(M);
// Add the Module to the "Loaded" set.
LoadedModules.insert(M);
}
void markModuleAsFinalized(Module *M) {
// This checks against logic errors in the MCJIT implementation.
// This function should never be called with either a Module that MCJIT
// does not own, a Module that has not been loaded or a Module that has
// already been finalized.
assert(LoadedModules.count(M) &&
"markModuleAsFinalized: Module not found in LoadedModules");
// Remove the module from the "Loaded" section of the list.
LoadedModules.erase(M);
// Add the Module to the "Finalized" section of the list by inserting it
// before the 'end' iterator.
FinalizedModules.insert(M);
}
void markAllLoadedModulesAsFinalized() {
for (ModulePtrSet::iterator I = LoadedModules.begin(),
E = LoadedModules.end();
I != E; ++I) {
Module *M = *I;
FinalizedModules.insert(M);
}
LoadedModules.clear();
}
private:
ModulePtrSet AddedModules;
ModulePtrSet LoadedModules;
ModulePtrSet FinalizedModules;
void freeModulePtrSet(ModulePtrSet& MPS) {
// Go through the module set and delete everything.
for (ModulePtrSet::iterator I = MPS.begin(), E = MPS.end(); I != E; ++I) {
Module *M = *I;
delete M;
}
MPS.clear();
}
};
std::unique_ptr<TargetMachine> TM;
MCContext *Ctx;
std::shared_ptr<MCJITMemoryManager> MemMgr;
LinkingSymbolResolver Resolver;
RuntimeDyld Dyld;
std::vector<JITEventListener*> EventListeners;
OwningModuleContainer OwnedModules;
SmallVector<object::OwningBinary<object::Archive>, 2> Archives;
SmallVector<std::unique_ptr<MemoryBuffer>, 2> Buffers;
SmallVector<std::unique_ptr<object::ObjectFile>, 2> LoadedObjects;
// An optional ObjectCache to be notified of compiled objects and used to
// perform lookup of pre-compiled code to avoid re-compilation.
ObjectCache *ObjCache;
Function *FindFunctionNamedInModulePtrSet(const char *FnName,
ModulePtrSet::iterator I,
ModulePtrSet::iterator E);
GlobalVariable *FindGlobalVariableNamedInModulePtrSet(const char *Name,
bool AllowInternal,
ModulePtrSet::iterator I,
ModulePtrSet::iterator E);
void runStaticConstructorsDestructorsInModulePtrSet(bool isDtors,
ModulePtrSet::iterator I,
ModulePtrSet::iterator E);
public:
~MCJIT() override;
/// @name ExecutionEngine interface implementation
/// @{
void addModule(std::unique_ptr<Module> M) override;
void addObjectFile(std::unique_ptr<object::ObjectFile> O) override;
void addObjectFile(object::OwningBinary<object::ObjectFile> O) override;
void addArchive(object::OwningBinary<object::Archive> O) override;
bool removeModule(Module *M) override;
/// FindFunctionNamed - Search all of the active modules to find the function that
/// defines FnName. This is very slow operation and shouldn't be used for
/// general code.
virtual Function *FindFunctionNamed(const char *FnName) override;
/// FindGlobalVariableNamed - Search all of the active modules to find the global variable
/// that defines Name. This is very slow operation and shouldn't be used for
/// general code.
virtual GlobalVariable *FindGlobalVariableNamed(const char *Name, bool AllowInternal = false) override;
/// Sets the object manager that MCJIT should use to avoid compilation.
void setObjectCache(ObjectCache *manager) override;
void setProcessAllSections(bool ProcessAllSections) override {
Dyld.setProcessAllSections(ProcessAllSections);
}
void generateCodeForModule(Module *M) override;
/// finalizeObject - ensure the module is fully processed and is usable.
///
/// It is the user-level function for completing the process of making the
/// object usable for execution. It should be called after sections within an
/// object have been relocated using mapSectionAddress. When this method is
/// called the MCJIT execution engine will reapply relocations for a loaded
/// object.
/// Is it OK to finalize a set of modules, add modules and finalize again.
// FIXME: Do we really need both of these?
void finalizeObject() override;
virtual void finalizeModule(Module *);
void finalizeLoadedModules();
/// runStaticConstructorsDestructors - This method is used to execute all of
/// the static constructors or destructors for a program.
///
/// \param isDtors - Run the destructors instead of constructors.
void runStaticConstructorsDestructors(bool isDtors) override;
void *getPointerToFunction(Function *F) override;
GenericValue runFunction(Function *F,
ArrayRef<GenericValue> ArgValues) override;
/// getPointerToNamedFunction - This method returns the address of the
/// specified function by using the dlsym function call. As such it is only
/// useful for resolving library symbols, not code generated symbols.
///
/// If AbortOnFailure is false and no function with the given name is
/// found, this function silently returns a null pointer. Otherwise,
/// it prints a message to stderr and aborts.
///
void *getPointerToNamedFunction(StringRef Name,
bool AbortOnFailure = true) override;
/// mapSectionAddress - map a section to its target address space value.
/// Map the address of a JIT section as returned from the memory manager
/// to the address in the target process as the running code will see it.
/// This is the address which will be used for relocation resolution.
void mapSectionAddress(const void *LocalAddress,
uint64_t TargetAddress) override {
Dyld.mapSectionAddress(LocalAddress, TargetAddress);
}
void RegisterJITEventListener(JITEventListener *L) override;
void UnregisterJITEventListener(JITEventListener *L) override;
// If successful, these function will implicitly finalize all loaded objects.
// To get a function address within MCJIT without causing a finalize, use
// getSymbolAddress.
uint64_t getGlobalValueAddress(const std::string &Name) override;
uint64_t getFunctionAddress(const std::string &Name) override;
TargetMachine *getTargetMachine() override { return TM.get(); }
/// @}
/// @name (Private) Registration Interfaces
/// @{
static void Register() {
MCJITCtor = createJIT;
}
static ExecutionEngine*
createJIT(std::unique_ptr<Module> M,
std::string *ErrorStr,
std::shared_ptr<MCJITMemoryManager> MemMgr,
std::shared_ptr<RuntimeDyld::SymbolResolver> Resolver,
std::unique_ptr<TargetMachine> TM);
// @}
RuntimeDyld::SymbolInfo findSymbol(const std::string &Name,
bool CheckFunctionsOnly);
// DEPRECATED - Please use findSymbol instead.
// This is not directly exposed via the ExecutionEngine API, but it is
// used by the LinkingMemoryManager.
uint64_t getSymbolAddress(const std::string &Name,
bool CheckFunctionsOnly);
protected:
/// emitObject -- Generate a JITed object in memory from the specified module
/// Currently, MCJIT only supports a single module and the module passed to
/// this function call is expected to be the contained module. The module
/// is passed as a parameter here to prepare for multiple module support in
/// the future.
std::unique_ptr<MemoryBuffer> emitObject(Module *M);
void NotifyObjectEmitted(const object::ObjectFile& Obj,
const RuntimeDyld::LoadedObjectInfo &L);
void NotifyFreeingObject(const object::ObjectFile& Obj);
RuntimeDyld::SymbolInfo findExistingSymbol(const std::string &Name);
Module *findModuleForSymbol(const std::string &Name,
bool CheckFunctionsOnly);
};
} // End llvm namespace
#endif
|
0 | repos/DirectXShaderCompiler/lib/ExecutionEngine | repos/DirectXShaderCompiler/lib/ExecutionEngine/MCJIT/CMakeLists.txt | add_llvm_library(LLVMMCJIT
MCJIT.cpp
DEPENDS
intrinsics_gen
)
|
0 | repos/DirectXShaderCompiler/lib/ExecutionEngine | repos/DirectXShaderCompiler/lib/ExecutionEngine/MCJIT/LLVMBuild.txt | ;===- ./lib/ExecutionEngine/MCJIT/LLVMBuild.txt ----------------*- Conf -*--===;
;
; The LLVM Compiler Infrastructure
;
; This file is distributed under the University of Illinois Open Source
; License. See LICENSE.TXT for details.
;
;===------------------------------------------------------------------------===;
;
; This is an LLVMBuild description file for the components in this subdirectory.
;
; For more information on the LLVMBuild system, please see:
;
; http://llvm.org/docs/LLVMBuild.html
;
;===------------------------------------------------------------------------===;
[component_0]
type = Library
name = MCJIT
parent = ExecutionEngine
required_libraries = Core ExecutionEngine Object RuntimeDyld Support Target
|
0 | repos/DirectXShaderCompiler/lib/ExecutionEngine | repos/DirectXShaderCompiler/lib/ExecutionEngine/MCJIT/ObjectBuffer.h | //===--- ObjectBuffer.h - Utility class to wrap object memory ---*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file declares a wrapper class to hold the memory into which an
// object will be generated.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_EXECUTIONENGINE_OBJECTBUFFER_H
#define LLVM_EXECUTIONENGINE_OBJECTBUFFER_H
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/raw_ostream.h"
namespace llvm {
class ObjectMemoryBuffer : public MemoryBuffer {
public:
template <unsigned N>
ObjectMemoryBuffer(SmallVector<char, N> SV)
: SV(SV), BufferName("<in-memory object>") {
init(this->SV.begin(), this->SV.end(), false);
}
template <unsigned N>
ObjectMemoryBuffer(SmallVector<char, N> SV, StringRef Name)
: SV(SV), BufferName(Name) {
init(this->SV.begin(), this->SV.end(), false);
}
const char* getBufferIdentifier() const override { return BufferName.c_str(); }
BufferKind getBufferKind() const override { return MemoryBuffer_Malloc; }
private:
SmallVector<char, 4096> SV;
std::string BufferName;
};
} // namespace llvm
#endif
|
0 | repos/DirectXShaderCompiler/lib/ExecutionEngine | repos/DirectXShaderCompiler/lib/ExecutionEngine/Orc/IndirectionUtils.cpp | //===---- IndirectionUtils.cpp - Utilities for call indirection in Orc ----===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/Triple.h"
#include "llvm/ExecutionEngine/Orc/IndirectionUtils.h"
#include "llvm/IR/CallSite.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/Transforms/Utils/Cloning.h"
#include <set>
#include <sstream>
namespace llvm {
namespace orc {
Constant* createIRTypedAddress(FunctionType &FT, TargetAddress Addr) {
Constant *AddrIntVal =
ConstantInt::get(Type::getInt64Ty(FT.getContext()), Addr);
Constant *AddrPtrVal =
ConstantExpr::getCast(Instruction::IntToPtr, AddrIntVal,
PointerType::get(&FT, 0));
return AddrPtrVal;
}
GlobalVariable* createImplPointer(PointerType &PT, Module &M,
const Twine &Name, Constant *Initializer) {
auto IP = new GlobalVariable(M, &PT, false, GlobalValue::ExternalLinkage,
Initializer, Name, nullptr,
GlobalValue::NotThreadLocal, 0, true);
IP->setVisibility(GlobalValue::HiddenVisibility);
return IP;
}
void makeStub(Function &F, GlobalVariable &ImplPointer) {
assert(F.isDeclaration() && "Can't turn a definition into a stub.");
assert(F.getParent() && "Function isn't in a module.");
Module &M = *F.getParent();
BasicBlock *EntryBlock = BasicBlock::Create(M.getContext(), "entry", &F);
IRBuilder<> Builder(EntryBlock);
LoadInst *ImplAddr = Builder.CreateLoad(&ImplPointer);
std::vector<Value*> CallArgs;
for (auto &A : F.args())
CallArgs.push_back(&A);
CallInst *Call = Builder.CreateCall(ImplAddr, CallArgs);
Call->setTailCall();
Call->setAttributes(F.getAttributes());
if (F.getReturnType()->isVoidTy())
Builder.CreateRetVoid();
else
Builder.CreateRet(Call);
}
// Utility class for renaming global values and functions during partitioning.
class GlobalRenamer {
public:
static bool needsRenaming(const Value &New) {
if (!New.hasName() || New.getName().startswith("\01L"))
return true;
return false;
}
const std::string& getRename(const Value &Orig) {
// See if we have a name for this global.
{
auto I = Names.find(&Orig);
if (I != Names.end())
return I->second;
}
// Nope. Create a new one.
// FIXME: Use a more robust uniquing scheme. (This may blow up if the user
// writes a "__orc_anon[[:digit:]]* method).
unsigned ID = Names.size();
std::ostringstream NameStream;
NameStream << "__orc_anon" << ID++;
auto I = Names.insert(std::make_pair(&Orig, NameStream.str()));
return I.first->second;
}
private:
DenseMap<const Value*, std::string> Names;
};
static void raiseVisibilityOnValue(GlobalValue &V, GlobalRenamer &R) {
if (V.hasLocalLinkage()) {
if (R.needsRenaming(V))
V.setName(R.getRename(V));
V.setLinkage(GlobalValue::ExternalLinkage);
V.setVisibility(GlobalValue::HiddenVisibility);
}
V.setUnnamedAddr(false);
assert(!R.needsRenaming(V) && "Invalid global name.");
}
void makeAllSymbolsExternallyAccessible(Module &M) {
GlobalRenamer Renamer;
for (auto &F : M)
raiseVisibilityOnValue(F, Renamer);
for (auto &GV : M.globals())
raiseVisibilityOnValue(GV, Renamer);
}
Function* cloneFunctionDecl(Module &Dst, const Function &F,
ValueToValueMapTy *VMap) {
assert(F.getParent() != &Dst && "Can't copy decl over existing function.");
Function *NewF =
Function::Create(cast<FunctionType>(F.getType()->getElementType()),
F.getLinkage(), F.getName(), &Dst);
NewF->copyAttributesFrom(&F);
if (VMap) {
(*VMap)[&F] = NewF;
auto NewArgI = NewF->arg_begin();
for (auto ArgI = F.arg_begin(), ArgE = F.arg_end(); ArgI != ArgE;
++ArgI, ++NewArgI)
(*VMap)[ArgI] = NewArgI;
}
return NewF;
}
void moveFunctionBody(Function &OrigF, ValueToValueMapTy &VMap,
ValueMaterializer *Materializer,
Function *NewF) {
assert(!OrigF.isDeclaration() && "Nothing to move");
if (!NewF)
NewF = cast<Function>(VMap[&OrigF]);
else
assert(VMap[&OrigF] == NewF && "Incorrect function mapping in VMap.");
assert(NewF && "Function mapping missing from VMap.");
assert(NewF->getParent() != OrigF.getParent() &&
"moveFunctionBody should only be used to move bodies between "
"modules.");
SmallVector<ReturnInst *, 8> Returns; // Ignore returns cloned.
CloneFunctionInto(NewF, &OrigF, VMap, /*ModuleLevelChanges=*/true, Returns,
"", nullptr, nullptr, Materializer);
OrigF.deleteBody();
}
GlobalVariable* cloneGlobalVariableDecl(Module &Dst, const GlobalVariable &GV,
ValueToValueMapTy *VMap) {
assert(GV.getParent() != &Dst && "Can't copy decl over existing global var.");
GlobalVariable *NewGV = new GlobalVariable(
Dst, GV.getType()->getElementType(), GV.isConstant(),
GV.getLinkage(), nullptr, GV.getName(), nullptr,
GV.getThreadLocalMode(), GV.getType()->getAddressSpace());
NewGV->copyAttributesFrom(&GV);
if (VMap)
(*VMap)[&GV] = NewGV;
return NewGV;
}
void moveGlobalVariableInitializer(GlobalVariable &OrigGV,
ValueToValueMapTy &VMap,
ValueMaterializer *Materializer,
GlobalVariable *NewGV) {
assert(OrigGV.hasInitializer() && "Nothing to move");
if (!NewGV)
NewGV = cast<GlobalVariable>(VMap[&OrigGV]);
else
assert(VMap[&OrigGV] == NewGV &&
"Incorrect global variable mapping in VMap.");
assert(NewGV->getParent() != OrigGV.getParent() &&
"moveGlobalVariable should only be used to move initializers between "
"modules");
NewGV->setInitializer(MapValue(OrigGV.getInitializer(), VMap, RF_None,
nullptr, Materializer));
}
} // End namespace orc.
} // End namespace llvm.
|
0 | repos/DirectXShaderCompiler/lib/ExecutionEngine | repos/DirectXShaderCompiler/lib/ExecutionEngine/Orc/OrcMCJITReplacement.cpp | //===-------- OrcMCJITReplacement.cpp - Orc-based MCJIT replacement -------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "OrcMCJITReplacement.h"
#include "llvm/ExecutionEngine/GenericValue.h"
namespace {
static struct RegisterJIT {
RegisterJIT() { llvm::orc::OrcMCJITReplacement::Register(); }
} JITRegistrator;
}
extern "C" void LLVMLinkInOrcMCJITReplacement() {}
namespace llvm {
namespace orc {
GenericValue
OrcMCJITReplacement::runFunction(Function *F,
ArrayRef<GenericValue> ArgValues) {
assert(F && "Function *F was null at entry to run()");
void *FPtr = getPointerToFunction(F);
assert(FPtr && "Pointer to fn's code was null after getPointerToFunction");
FunctionType *FTy = F->getFunctionType();
Type *RetTy = FTy->getReturnType();
assert((FTy->getNumParams() == ArgValues.size() ||
(FTy->isVarArg() && FTy->getNumParams() <= ArgValues.size())) &&
"Wrong number of arguments passed into function!");
assert(FTy->getNumParams() == ArgValues.size() &&
"This doesn't support passing arguments through varargs (yet)!");
// Handle some common cases first. These cases correspond to common `main'
// prototypes.
if (RetTy->isIntegerTy(32) || RetTy->isVoidTy()) {
switch (ArgValues.size()) {
case 3:
if (FTy->getParamType(0)->isIntegerTy(32) &&
FTy->getParamType(1)->isPointerTy() &&
FTy->getParamType(2)->isPointerTy()) {
int (*PF)(int, char **, const char **) =
(int (*)(int, char **, const char **))(intptr_t)FPtr;
// Call the function.
GenericValue rv;
rv.IntVal = APInt(32, PF(ArgValues[0].IntVal.getZExtValue(),
(char **)GVTOP(ArgValues[1]),
(const char **)GVTOP(ArgValues[2])));
return rv;
}
break;
case 2:
if (FTy->getParamType(0)->isIntegerTy(32) &&
FTy->getParamType(1)->isPointerTy()) {
int (*PF)(int, char **) = (int (*)(int, char **))(intptr_t)FPtr;
// Call the function.
GenericValue rv;
rv.IntVal = APInt(32, PF(ArgValues[0].IntVal.getZExtValue(),
(char **)GVTOP(ArgValues[1])));
return rv;
}
break;
case 1:
if (FTy->getNumParams() == 1 && FTy->getParamType(0)->isIntegerTy(32)) {
GenericValue rv;
int (*PF)(int) = (int (*)(int))(intptr_t)FPtr;
rv.IntVal = APInt(32, PF(ArgValues[0].IntVal.getZExtValue()));
return rv;
}
break;
}
}
// Handle cases where no arguments are passed first.
if (ArgValues.empty()) {
GenericValue rv;
switch (RetTy->getTypeID()) {
default:
llvm_unreachable("Unknown return type for function call!");
case Type::IntegerTyID: {
unsigned BitWidth = cast<IntegerType>(RetTy)->getBitWidth();
if (BitWidth == 1)
rv.IntVal = APInt(BitWidth, ((bool (*)())(intptr_t)FPtr)());
else if (BitWidth <= 8)
rv.IntVal = APInt(BitWidth, ((char (*)())(intptr_t)FPtr)());
else if (BitWidth <= 16)
rv.IntVal = APInt(BitWidth, ((short (*)())(intptr_t)FPtr)());
else if (BitWidth <= 32)
rv.IntVal = APInt(BitWidth, ((int (*)())(intptr_t)FPtr)());
else if (BitWidth <= 64)
rv.IntVal = APInt(BitWidth, ((int64_t (*)())(intptr_t)FPtr)());
else
llvm_unreachable("Integer types > 64 bits not supported");
return rv;
}
case Type::VoidTyID:
rv.IntVal = APInt(32, ((int (*)())(intptr_t)FPtr)());
return rv;
case Type::FloatTyID:
rv.FloatVal = ((float (*)())(intptr_t)FPtr)();
return rv;
case Type::DoubleTyID:
rv.DoubleVal = ((double (*)())(intptr_t)FPtr)();
return rv;
case Type::X86_FP80TyID:
case Type::FP128TyID:
case Type::PPC_FP128TyID:
llvm_unreachable("long double not supported yet");
case Type::PointerTyID:
return PTOGV(((void *(*)())(intptr_t)FPtr)());
}
}
llvm_unreachable("Full-featured argument passing not supported yet!");
}
} // End namespace orc.
} // End namespace llvm.
|
0 | repos/DirectXShaderCompiler/lib/ExecutionEngine | repos/DirectXShaderCompiler/lib/ExecutionEngine/Orc/CMakeLists.txt | add_llvm_library(LLVMOrcJIT
ExecutionUtils.cpp
IndirectionUtils.cpp
NullResolver.cpp
OrcMCJITReplacement.cpp
OrcTargetSupport.cpp
ADDITIONAL_HEADER_DIRS
${LLVM_MAIN_INCLUDE_DIR}/llvm/ExecutionEngine/Orc
DEPENDS
intrinsics_gen
)
|
0 | repos/DirectXShaderCompiler/lib/ExecutionEngine | repos/DirectXShaderCompiler/lib/ExecutionEngine/Orc/LLVMBuild.txt | ;===- ./lib/ExecutionEngine/MCJIT/LLVMBuild.txt ----------------*- Conf -*--===;
;
; The LLVM Compiler Infrastructure
;
; This file is distributed under the University of Illinois Open Source
; License. See LICENSE.TXT for details.
;
;===------------------------------------------------------------------------===;
;
; This is an LLVMBuild description file for the components in this subdirectory.
;
; For more information on the LLVMBuild system, please see:
;
; http://llvm.org/docs/LLVMBuild.html
;
;===------------------------------------------------------------------------===;
[component_0]
type = Library
name = OrcJIT
parent = ExecutionEngine
required_libraries = Core ExecutionEngine Object RuntimeDyld Support TransformUtils
|
0 | repos/DirectXShaderCompiler/lib/ExecutionEngine | repos/DirectXShaderCompiler/lib/ExecutionEngine/Orc/OrcTargetSupport.cpp |
#include "llvm/ADT/Triple.h"
#include "llvm/ExecutionEngine/Orc/OrcTargetSupport.h"
#include <array>
using namespace llvm::orc;
namespace {
uint64_t executeCompileCallback(JITCompileCallbackManagerBase *JCBM,
TargetAddress CallbackID) {
return JCBM->executeCompileCallback(CallbackID);
}
}
namespace llvm {
namespace orc {
const char* OrcX86_64::ResolverBlockName = "orc_resolver_block";
void OrcX86_64::insertResolverBlock(
Module &M, JITCompileCallbackManagerBase &JCBM) {
// Trampoline code-sequence length, used to get trampoline address from return
// address.
const unsigned X86_64_TrampolineLength = 6;
// List of x86-64 GPRs to save. Note - RBP saved separately below.
std::array<const char *, 14> GPRs = {{
"rax", "rbx", "rcx", "rdx",
"rsi", "rdi", "r8", "r9",
"r10", "r11", "r12", "r13",
"r14", "r15"
}};
// Address of the executeCompileCallback function.
uint64_t CallbackAddr =
static_cast<uint64_t>(
reinterpret_cast<uintptr_t>(executeCompileCallback));
std::ostringstream AsmStream;
Triple TT(M.getTargetTriple());
// Switch to text section.
if (TT.getOS() == Triple::Darwin)
AsmStream << ".section __TEXT,__text,regular,pure_instructions\n"
<< ".align 4, 0x90\n";
else
AsmStream << ".text\n"
<< ".align 16, 0x90\n";
// Bake in a pointer to the callback manager immediately before the
// start of the resolver function.
AsmStream << "jit_callback_manager_addr:\n"
<< " .quad " << &JCBM << "\n";
// Start the resolver function.
AsmStream << ResolverBlockName << ":\n"
<< " pushq %rbp\n"
<< " movq %rsp, %rbp\n";
// Store the GPRs.
for (const auto &GPR : GPRs)
AsmStream << " pushq %" << GPR << "\n";
// Store floating-point state with FXSAVE.
// Note: We need to keep the stack 16-byte aligned, so if we've emitted an odd
// number of 64-bit pushes so far (GPRs.size() plus 1 for RBP) then add
// an extra 64 bits of padding to the FXSave area.
unsigned Padding = (GPRs.size() + 1) % 2 ? 8 : 0;
unsigned FXSaveSize = 512 + Padding;
AsmStream << " subq $" << FXSaveSize << ", %rsp\n"
<< " fxsave64 (%rsp)\n"
// Load callback manager address, compute trampoline address, call JIT.
<< " lea jit_callback_manager_addr(%rip), %rdi\n"
<< " movq (%rdi), %rdi\n"
<< " movq 0x8(%rbp), %rsi\n"
<< " subq $" << X86_64_TrampolineLength << ", %rsi\n"
<< " movabsq $" << CallbackAddr << ", %rax\n"
<< " callq *%rax\n"
// Replace the return to the trampoline with the return address of the
// compiled function body.
<< " movq %rax, 0x8(%rbp)\n"
// Restore the floating point state.
<< " fxrstor64 (%rsp)\n"
<< " addq $" << FXSaveSize << ", %rsp\n";
for (const auto &GPR : make_range(GPRs.rbegin(), GPRs.rend()))
AsmStream << " popq %" << GPR << "\n";
// Restore original RBP and return to compiled function body.
AsmStream << " popq %rbp\n"
<< " retq\n";
M.appendModuleInlineAsm(AsmStream.str());
}
OrcX86_64::LabelNameFtor
OrcX86_64::insertCompileCallbackTrampolines(Module &M,
TargetAddress ResolverBlockAddr,
unsigned NumCalls,
unsigned StartIndex) {
const char *ResolverBlockPtrName = "Lorc_resolve_block_addr";
std::ostringstream AsmStream;
Triple TT(M.getTargetTriple());
if (TT.getOS() == Triple::Darwin)
AsmStream << ".section __TEXT,__text,regular,pure_instructions\n"
<< ".align 4, 0x90\n";
else
AsmStream << ".text\n"
<< ".align 16, 0x90\n";
AsmStream << ResolverBlockPtrName << ":\n"
<< " .quad " << ResolverBlockAddr << "\n";
auto GetLabelName =
[=](unsigned I) {
std::ostringstream LabelStream;
LabelStream << "orc_jcc_" << (StartIndex + I);
return LabelStream.str();
};
for (unsigned I = 0; I < NumCalls; ++I)
AsmStream << GetLabelName(I) << ":\n"
<< " callq *" << ResolverBlockPtrName << "(%rip)\n";
M.appendModuleInlineAsm(AsmStream.str());
return GetLabelName;
}
} // End namespace orc.
} // End namespace llvm.
|
0 | repos/DirectXShaderCompiler/lib/ExecutionEngine | repos/DirectXShaderCompiler/lib/ExecutionEngine/Orc/OrcMCJITReplacement.h | //===---- OrcMCJITReplacement.h - Orc based MCJIT replacement ---*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// Orc based MCJIT replacement.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_LIB_EXECUTIONENGINE_ORC_ORCMCJITREPLACEMENT_H
#define LLVM_LIB_EXECUTIONENGINE_ORC_ORCMCJITREPLACEMENT_H
#include "llvm/ExecutionEngine/ExecutionEngine.h"
#include "llvm/ExecutionEngine/Orc/CompileUtils.h"
#include "llvm/ExecutionEngine/Orc/IRCompileLayer.h"
#include "llvm/ExecutionEngine/Orc/LazyEmittingLayer.h"
#include "llvm/ExecutionEngine/Orc/ObjectLinkingLayer.h"
#include "llvm/Object/Archive.h"
namespace llvm {
namespace orc {
class OrcMCJITReplacement : public ExecutionEngine {
// OrcMCJITReplacement needs to do a little extra book-keeping to ensure that
// Orc's automatic finalization doesn't kick in earlier than MCJIT clients are
// expecting - see finalizeMemory.
class MCJITReplacementMemMgr : public MCJITMemoryManager {
public:
MCJITReplacementMemMgr(OrcMCJITReplacement &M,
std::shared_ptr<MCJITMemoryManager> ClientMM)
: M(M), ClientMM(std::move(ClientMM)) {}
uint8_t *allocateCodeSection(uintptr_t Size, unsigned Alignment,
unsigned SectionID,
StringRef SectionName) override {
uint8_t *Addr =
ClientMM->allocateCodeSection(Size, Alignment, SectionID,
SectionName);
M.SectionsAllocatedSinceLastLoad.insert(Addr);
return Addr;
}
uint8_t *allocateDataSection(uintptr_t Size, unsigned Alignment,
unsigned SectionID, StringRef SectionName,
bool IsReadOnly) override {
uint8_t *Addr = ClientMM->allocateDataSection(Size, Alignment, SectionID,
SectionName, IsReadOnly);
M.SectionsAllocatedSinceLastLoad.insert(Addr);
return Addr;
}
void reserveAllocationSpace(uintptr_t CodeSize, uintptr_t DataSizeRO,
uintptr_t DataSizeRW) override {
return ClientMM->reserveAllocationSpace(CodeSize, DataSizeRO,
DataSizeRW);
}
bool needsToReserveAllocationSpace() override {
return ClientMM->needsToReserveAllocationSpace();
}
void registerEHFrames(uint8_t *Addr, uint64_t LoadAddr,
size_t Size) override {
return ClientMM->registerEHFrames(Addr, LoadAddr, Size);
}
void deregisterEHFrames(uint8_t *Addr, uint64_t LoadAddr,
size_t Size) override {
return ClientMM->deregisterEHFrames(Addr, LoadAddr, Size);
}
void notifyObjectLoaded(ExecutionEngine *EE,
const object::ObjectFile &O) override {
return ClientMM->notifyObjectLoaded(EE, O);
}
bool finalizeMemory(std::string *ErrMsg = nullptr) override {
// Each set of objects loaded will be finalized exactly once, but since
// symbol lookup during relocation may recursively trigger the
// loading/relocation of other modules, and since we're forwarding all
// finalizeMemory calls to a single underlying memory manager, we need to
// defer forwarding the call on until all necessary objects have been
// loaded. Otherwise, during the relocation of a leaf object, we will end
// up finalizing memory, causing a crash further up the stack when we
// attempt to apply relocations to finalized memory.
// To avoid finalizing too early, look at how many objects have been
// loaded but not yet finalized. This is a bit of a hack that relies on
// the fact that we're lazily emitting object files: The only way you can
// get more than one set of objects loaded but not yet finalized is if
// they were loaded during relocation of another set.
if (M.UnfinalizedSections.size() == 1)
return ClientMM->finalizeMemory(ErrMsg);
return false;
}
private:
OrcMCJITReplacement &M;
std::shared_ptr<MCJITMemoryManager> ClientMM;
};
class LinkingResolver : public RuntimeDyld::SymbolResolver {
public:
LinkingResolver(OrcMCJITReplacement &M) : M(M) {}
RuntimeDyld::SymbolInfo findSymbol(const std::string &Name) override {
return M.findMangledSymbol(Name);
}
RuntimeDyld::SymbolInfo
findSymbolInLogicalDylib(const std::string &Name) override {
return M.ClientResolver->findSymbolInLogicalDylib(Name);
}
private:
OrcMCJITReplacement &M;
};
private:
static ExecutionEngine *
createOrcMCJITReplacement(std::string *ErrorMsg,
std::shared_ptr<MCJITMemoryManager> MemMgr,
std::shared_ptr<RuntimeDyld::SymbolResolver> Resolver,
std::unique_ptr<TargetMachine> TM) {
return new OrcMCJITReplacement(std::move(MemMgr), std::move(Resolver),
std::move(TM));
}
public:
static void Register() {
OrcMCJITReplacementCtor = createOrcMCJITReplacement;
}
OrcMCJITReplacement(
std::shared_ptr<MCJITMemoryManager> MemMgr,
std::shared_ptr<RuntimeDyld::SymbolResolver> ClientResolver,
std::unique_ptr<TargetMachine> TM)
: TM(std::move(TM)), MemMgr(*this, std::move(MemMgr)),
Resolver(*this), ClientResolver(std::move(ClientResolver)),
NotifyObjectLoaded(*this), NotifyFinalized(*this),
ObjectLayer(NotifyObjectLoaded, NotifyFinalized),
CompileLayer(ObjectLayer, SimpleCompiler(*this->TM)),
LazyEmitLayer(CompileLayer) {
setDataLayout(this->TM->getDataLayout());
}
void addModule(std::unique_ptr<Module> M) override {
// If this module doesn't have a DataLayout attached then attach the
// default.
if (M->getDataLayout().isDefault())
M->setDataLayout(*getDataLayout());
Modules.push_back(std::move(M));
std::vector<Module *> Ms;
Ms.push_back(&*Modules.back());
LazyEmitLayer.addModuleSet(std::move(Ms), &MemMgr, &Resolver);
}
void addObjectFile(std::unique_ptr<object::ObjectFile> O) override {
std::vector<std::unique_ptr<object::ObjectFile>> Objs;
Objs.push_back(std::move(O));
ObjectLayer.addObjectSet(std::move(Objs), &MemMgr, &Resolver);
}
void addObjectFile(object::OwningBinary<object::ObjectFile> O) override {
std::unique_ptr<object::ObjectFile> Obj;
std::unique_ptr<MemoryBuffer> Buf;
std::tie(Obj, Buf) = O.takeBinary();
std::vector<std::unique_ptr<object::ObjectFile>> Objs;
Objs.push_back(std::move(Obj));
auto H =
ObjectLayer.addObjectSet(std::move(Objs), &MemMgr, &Resolver);
std::vector<std::unique_ptr<MemoryBuffer>> Bufs;
Bufs.push_back(std::move(Buf));
ObjectLayer.takeOwnershipOfBuffers(H, std::move(Bufs));
}
void addArchive(object::OwningBinary<object::Archive> A) override {
Archives.push_back(std::move(A));
}
uint64_t getSymbolAddress(StringRef Name) {
return findSymbol(Name).getAddress();
}
RuntimeDyld::SymbolInfo findSymbol(StringRef Name) {
return findMangledSymbol(Mangle(Name));
}
void finalizeObject() override {
// This is deprecated - Aim to remove in ExecutionEngine.
// REMOVE IF POSSIBLE - Doesn't make sense for New JIT.
}
void mapSectionAddress(const void *LocalAddress,
uint64_t TargetAddress) override {
for (auto &P : UnfinalizedSections)
if (P.second.count(LocalAddress))
ObjectLayer.mapSectionAddress(P.first, LocalAddress, TargetAddress);
}
uint64_t getGlobalValueAddress(const std::string &Name) override {
return getSymbolAddress(Name);
}
uint64_t getFunctionAddress(const std::string &Name) override {
return getSymbolAddress(Name);
}
void *getPointerToFunction(Function *F) override {
uint64_t FAddr = getSymbolAddress(F->getName());
return reinterpret_cast<void *>(static_cast<uintptr_t>(FAddr));
}
void *getPointerToNamedFunction(StringRef Name,
bool AbortOnFailure = true) override {
uint64_t Addr = getSymbolAddress(Name);
if (!Addr && AbortOnFailure)
llvm_unreachable("Missing symbol!");
return reinterpret_cast<void *>(static_cast<uintptr_t>(Addr));
}
GenericValue runFunction(Function *F,
ArrayRef<GenericValue> ArgValues) override;
void setObjectCache(ObjectCache *NewCache) override {
CompileLayer.setObjectCache(NewCache);
}
private:
RuntimeDyld::SymbolInfo findMangledSymbol(StringRef Name) {
if (auto Sym = LazyEmitLayer.findSymbol(Name, false))
return RuntimeDyld::SymbolInfo(Sym.getAddress(), Sym.getFlags());
if (auto Sym = ClientResolver->findSymbol(Name))
return RuntimeDyld::SymbolInfo(Sym.getAddress(), Sym.getFlags());
if (auto Sym = scanArchives(Name))
return RuntimeDyld::SymbolInfo(Sym.getAddress(), Sym.getFlags());
return nullptr;
}
JITSymbol scanArchives(StringRef Name) {
for (object::OwningBinary<object::Archive> &OB : Archives) {
object::Archive *A = OB.getBinary();
// Look for our symbols in each Archive
object::Archive::child_iterator ChildIt = A->findSym(Name);
if (ChildIt != A->child_end()) {
// FIXME: Support nested archives?
ErrorOr<std::unique_ptr<object::Binary>> ChildBinOrErr =
ChildIt->getAsBinary();
if (ChildBinOrErr.getError())
continue;
std::unique_ptr<object::Binary> &ChildBin = ChildBinOrErr.get();
if (ChildBin->isObject()) {
std::vector<std::unique_ptr<object::ObjectFile>> ObjSet;
ObjSet.push_back(std::unique_ptr<object::ObjectFile>(
static_cast<object::ObjectFile *>(ChildBin.release())));
ObjectLayer.addObjectSet(std::move(ObjSet), &MemMgr, &Resolver);
if (auto Sym = ObjectLayer.findSymbol(Name, true))
return Sym;
}
}
}
return nullptr;
}
class NotifyObjectLoadedT {
public:
typedef std::vector<std::unique_ptr<object::ObjectFile>> ObjListT;
typedef std::vector<std::unique_ptr<RuntimeDyld::LoadedObjectInfo>>
LoadedObjInfoListT;
NotifyObjectLoadedT(OrcMCJITReplacement &M) : M(M) {}
void operator()(ObjectLinkingLayerBase::ObjSetHandleT H,
const ObjListT &Objects,
const LoadedObjInfoListT &Infos) const {
M.UnfinalizedSections[H] = std::move(M.SectionsAllocatedSinceLastLoad);
M.SectionsAllocatedSinceLastLoad = SectionAddrSet();
assert(Objects.size() == Infos.size() &&
"Incorrect number of Infos for Objects.");
for (unsigned I = 0; I < Objects.size(); ++I)
M.MemMgr.notifyObjectLoaded(&M, *Objects[I]);
};
private:
OrcMCJITReplacement &M;
};
class NotifyFinalizedT {
public:
NotifyFinalizedT(OrcMCJITReplacement &M) : M(M) {}
void operator()(ObjectLinkingLayerBase::ObjSetHandleT H) {
M.UnfinalizedSections.erase(H);
}
private:
OrcMCJITReplacement &M;
};
std::string Mangle(StringRef Name) {
std::string MangledName;
{
raw_string_ostream MangledNameStream(MangledName);
Mang.getNameWithPrefix(MangledNameStream, Name, *TM->getDataLayout());
}
return MangledName;
}
typedef ObjectLinkingLayer<NotifyObjectLoadedT> ObjectLayerT;
typedef IRCompileLayer<ObjectLayerT> CompileLayerT;
typedef LazyEmittingLayer<CompileLayerT> LazyEmitLayerT;
std::unique_ptr<TargetMachine> TM;
MCJITReplacementMemMgr MemMgr;
LinkingResolver Resolver;
std::shared_ptr<RuntimeDyld::SymbolResolver> ClientResolver;
Mangler Mang;
NotifyObjectLoadedT NotifyObjectLoaded;
NotifyFinalizedT NotifyFinalized;
ObjectLayerT ObjectLayer;
CompileLayerT CompileLayer;
LazyEmitLayerT LazyEmitLayer;
// We need to store ObjLayerT::ObjSetHandles for each of the object sets
// that have been emitted but not yet finalized so that we can forward the
// mapSectionAddress calls appropriately.
typedef std::set<const void *> SectionAddrSet;
struct ObjSetHandleCompare {
bool operator()(ObjectLayerT::ObjSetHandleT H1,
ObjectLayerT::ObjSetHandleT H2) const {
return &*H1 < &*H2;
}
};
SectionAddrSet SectionsAllocatedSinceLastLoad;
std::map<ObjectLayerT::ObjSetHandleT, SectionAddrSet, ObjSetHandleCompare>
UnfinalizedSections;
std::vector<object::OwningBinary<object::Archive>> Archives;
};
} // End namespace orc.
} // End namespace llvm.
#endif // LLVM_LIB_EXECUTIONENGINE_ORC_MCJITREPLACEMENT_H
|
0 | repos/DirectXShaderCompiler/lib/ExecutionEngine | repos/DirectXShaderCompiler/lib/ExecutionEngine/Orc/NullResolver.cpp | //===---------- NullResolver.cpp - Reject symbol lookup requests ----------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "llvm/ExecutionEngine/Orc/NullResolver.h"
#include "llvm/Support/ErrorHandling.h"
namespace llvm {
namespace orc {
RuntimeDyld::SymbolInfo NullResolver::findSymbol(const std::string &Name) {
llvm_unreachable("Unexpected cross-object symbol reference");
}
RuntimeDyld::SymbolInfo
NullResolver::findSymbolInLogicalDylib(const std::string &Name) {
llvm_unreachable("Unexpected cross-object symbol reference");
}
} // End namespace orc.
} // End namespace llvm.
|
0 | repos/DirectXShaderCompiler/lib/ExecutionEngine | repos/DirectXShaderCompiler/lib/ExecutionEngine/Orc/ExecutionUtils.cpp | //===---- ExecutionUtils.cpp - Utilities for executing functions in Orc ---===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "llvm/ExecutionEngine/Orc/ExecutionUtils.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/GlobalVariable.h"
#include "llvm/IR/Module.h"
namespace llvm {
namespace orc {
CtorDtorIterator::CtorDtorIterator(const GlobalVariable *GV, bool End)
: InitList(
GV ? dyn_cast_or_null<ConstantArray>(GV->getInitializer()) : nullptr),
I((InitList && End) ? InitList->getNumOperands() : 0) {
}
bool CtorDtorIterator::operator==(const CtorDtorIterator &Other) const {
assert(InitList == Other.InitList && "Incomparable iterators.");
return I == Other.I;
}
bool CtorDtorIterator::operator!=(const CtorDtorIterator &Other) const {
return !(*this == Other);
}
CtorDtorIterator& CtorDtorIterator::operator++() {
++I;
return *this;
}
CtorDtorIterator CtorDtorIterator::operator++(int) {
CtorDtorIterator Temp = *this;
++I;
return Temp;
}
CtorDtorIterator::Element CtorDtorIterator::operator*() const {
ConstantStruct *CS = dyn_cast<ConstantStruct>(InitList->getOperand(I));
assert(CS && "Unrecognized type in llvm.global_ctors/llvm.global_dtors");
Constant *FuncC = CS->getOperand(1);
Function *Func = nullptr;
// Extract function pointer, pulling off any casts.
while (FuncC) {
if (Function *F = dyn_cast_or_null<Function>(FuncC)) {
Func = F;
break;
} else if (ConstantExpr *CE = dyn_cast_or_null<ConstantExpr>(FuncC)) {
if (CE->isCast())
FuncC = dyn_cast_or_null<ConstantExpr>(CE->getOperand(0));
else
break;
} else {
// This isn't anything we recognize. Bail out with Func left set to null.
break;
}
}
ConstantInt *Priority = dyn_cast<ConstantInt>(CS->getOperand(0));
Value *Data = CS->getOperand(2);
return Element(Priority->getZExtValue(), Func, Data);
}
iterator_range<CtorDtorIterator> getConstructors(const Module &M) {
const GlobalVariable *CtorsList = M.getNamedGlobal("llvm.global_ctors");
return make_range(CtorDtorIterator(CtorsList, false),
CtorDtorIterator(CtorsList, true));
}
iterator_range<CtorDtorIterator> getDestructors(const Module &M) {
const GlobalVariable *DtorsList = M.getNamedGlobal("llvm.global_dtors");
return make_range(CtorDtorIterator(DtorsList, false),
CtorDtorIterator(DtorsList, true));
}
void LocalCXXRuntimeOverrides::runDestructors() {
auto& CXXDestructorDataPairs = DSOHandleOverride;
for (auto &P : CXXDestructorDataPairs)
P.first(P.second);
CXXDestructorDataPairs.clear();
}
int LocalCXXRuntimeOverrides::CXAAtExitOverride(DestructorPtr Destructor,
void *Arg, void *DSOHandle) {
auto& CXXDestructorDataPairs =
*reinterpret_cast<CXXDestructorDataPairList*>(DSOHandle);
CXXDestructorDataPairs.push_back(std::make_pair(Destructor, Arg));
return 0;
}
} // End namespace orc.
} // End namespace llvm.
|
0 | repos/DirectXShaderCompiler/lib/ExecutionEngine | repos/DirectXShaderCompiler/lib/ExecutionEngine/IntelJITEvents/jitprofiling.c | /*===-- jitprofiling.c - JIT (Just-In-Time) Profiling API----------*- C -*-===*
*
* The LLVM Compiler Infrastructure
*
* This file is distributed under the University of Illinois Open Source
* License. See LICENSE.TXT for details.
*
*===----------------------------------------------------------------------===*
*
* This file provides Intel(R) Performance Analyzer JIT (Just-In-Time)
* Profiling API implementation.
*
* NOTE: This file comes in a style different from the rest of LLVM
* source base since this is a piece of code shared from Intel(R)
* products. Please do not reformat / re-style this code to make
* subsequent merges and contributions from the original source base eaiser.
*
*===----------------------------------------------------------------------===*/
#include "ittnotify_config.h"
#if ITT_PLATFORM==ITT_PLATFORM_WIN
#include <windows.h>
#pragma optimize("", off)
#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
#include <pthread.h>
#include <dlfcn.h>
#include <stdint.h>
#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
#include <malloc.h>
#include <stdlib.h>
#include "jitprofiling.h"
static const char rcsid[] = "\n@(#) $Revision: 243501 $\n";
#define DLL_ENVIRONMENT_VAR "VS_PROFILER"
#ifndef NEW_DLL_ENVIRONMENT_VAR
#if ITT_ARCH==ITT_ARCH_IA32
#define NEW_DLL_ENVIRONMENT_VAR "INTEL_JIT_PROFILER32"
#else
#define NEW_DLL_ENVIRONMENT_VAR "INTEL_JIT_PROFILER64"
#endif
#endif /* NEW_DLL_ENVIRONMENT_VAR */
#if ITT_PLATFORM==ITT_PLATFORM_WIN
#define DEFAULT_DLLNAME "JitPI.dll"
HINSTANCE m_libHandle = NULL;
#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
#define DEFAULT_DLLNAME "libJitPI.so"
void* m_libHandle = NULL;
#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
/* default location of JIT profiling agent on Android */
#define ANDROID_JIT_AGENT_PATH "/data/intel/libittnotify.so"
/* the function pointers */
typedef unsigned int(*TPInitialize)(void);
static TPInitialize FUNC_Initialize=NULL;
typedef unsigned int(*TPNotify)(unsigned int, void*);
static TPNotify FUNC_NotifyEvent=NULL;
static iJIT_IsProfilingActiveFlags executionMode = iJIT_NOTHING_RUNNING;
/* end collector dll part. */
/* loadiJIT_Funcs() : this function is called just in the beginning
* and is responsible to load the functions from BistroJavaCollector.dll
* result:
* on success: the functions loads, iJIT_DLL_is_missing=0, return value = 1
* on failure: the functions are NULL, iJIT_DLL_is_missing=1, return value = 0
*/
static int loadiJIT_Funcs(void);
/* global representing whether the BistroJavaCollector can't be loaded */
static int iJIT_DLL_is_missing = 0;
/* Virtual stack - the struct is used as a virtual stack for each thread.
* Every thread initializes with a stack of size INIT_TOP_STACK.
* Every method entry decreases from the current stack point,
* and when a thread stack reaches its top of stack (return from the global
* function), the top of stack and the current stack increase. Notice that
* when returning from a function the stack pointer is the address of
* the function return.
*/
#if ITT_PLATFORM==ITT_PLATFORM_WIN
static DWORD threadLocalStorageHandle = 0;
#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
static pthread_key_t threadLocalStorageHandle = (pthread_key_t)0;
#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
#define INIT_TOP_Stack 10000
typedef struct
{
unsigned int TopStack;
unsigned int CurrentStack;
} ThreadStack, *pThreadStack;
/* end of virtual stack. */
/*
* The function for reporting virtual-machine related events to VTune.
* Note: when reporting iJVM_EVENT_TYPE_ENTER_NIDS, there is no need to fill
* in the stack_id field in the iJIT_Method_NIDS structure, as VTune fills it.
* The return value in iJVM_EVENT_TYPE_ENTER_NIDS &&
* iJVM_EVENT_TYPE_LEAVE_NIDS events will be 0 in case of failure.
* in iJVM_EVENT_TYPE_METHOD_LOAD_FINISHED event
* it will be -1 if EventSpecificData == 0 otherwise it will be 0.
*/
ITT_EXTERN_C int JITAPI
iJIT_NotifyEvent(iJIT_JVM_EVENT event_type, void *EventSpecificData)
{
int ReturnValue;
/*
* This section is for debugging outside of VTune.
* It creates the environment variables that indicates call graph mode.
* If running outside of VTune remove the remark.
*
*
* static int firstTime = 1;
* char DoCallGraph[12] = "DoCallGraph";
* if (firstTime)
* {
* firstTime = 0;
* SetEnvironmentVariable( "BISTRO_COLLECTORS_DO_CALLGRAPH", DoCallGraph);
* }
*
* end of section.
*/
/* initialization part - the functions have not been loaded yet. This part
* will load the functions, and check if we are in Call Graph mode.
* (for special treatment).
*/
if (!FUNC_NotifyEvent)
{
if (iJIT_DLL_is_missing)
return 0;
/* load the Function from the DLL */
if (!loadiJIT_Funcs())
return 0;
/* Call Graph initialization. */
}
/* If the event is method entry/exit, check that in the current mode
* VTune is allowed to receive it
*/
if ((event_type == iJVM_EVENT_TYPE_ENTER_NIDS ||
event_type == iJVM_EVENT_TYPE_LEAVE_NIDS) &&
(executionMode != iJIT_CALLGRAPH_ON))
{
return 0;
}
/* This section is performed when method enter event occurs.
* It updates the virtual stack, or creates it if this is the first
* method entry in the thread. The stack pointer is decreased.
*/
if (event_type == iJVM_EVENT_TYPE_ENTER_NIDS)
{
#if ITT_PLATFORM==ITT_PLATFORM_WIN
pThreadStack threadStack =
(pThreadStack)TlsGetValue (threadLocalStorageHandle);
#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
pThreadStack threadStack =
(pThreadStack)pthread_getspecific(threadLocalStorageHandle);
#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
/* check for use of reserved method IDs */
if ( ((piJIT_Method_NIDS) EventSpecificData)->method_id <= 999 )
return 0;
if (!threadStack)
{
/* initialize the stack. */
threadStack = (pThreadStack) calloc (sizeof(ThreadStack), 1);
threadStack->TopStack = INIT_TOP_Stack;
threadStack->CurrentStack = INIT_TOP_Stack;
#if ITT_PLATFORM==ITT_PLATFORM_WIN
TlsSetValue(threadLocalStorageHandle,(void*)threadStack);
#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
pthread_setspecific(threadLocalStorageHandle,(void*)threadStack);
#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
}
/* decrease the stack. */
((piJIT_Method_NIDS) EventSpecificData)->stack_id =
(threadStack->CurrentStack)--;
}
/* This section is performed when method leave event occurs
* It updates the virtual stack.
* Increases the stack pointer.
* If the stack pointer reached the top (left the global function)
* increase the pointer and the top pointer.
*/
if (event_type == iJVM_EVENT_TYPE_LEAVE_NIDS)
{
#if ITT_PLATFORM==ITT_PLATFORM_WIN
pThreadStack threadStack =
(pThreadStack)TlsGetValue (threadLocalStorageHandle);
#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
pThreadStack threadStack =
(pThreadStack)pthread_getspecific(threadLocalStorageHandle);
#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
/* check for use of reserved method IDs */
if ( ((piJIT_Method_NIDS) EventSpecificData)->method_id <= 999 )
return 0;
if (!threadStack)
{
/* Error: first report in this thread is method exit */
exit (1);
}
((piJIT_Method_NIDS) EventSpecificData)->stack_id =
++(threadStack->CurrentStack) + 1;
if (((piJIT_Method_NIDS) EventSpecificData)->stack_id
> threadStack->TopStack)
((piJIT_Method_NIDS) EventSpecificData)->stack_id =
(unsigned int)-1;
}
if (event_type == iJVM_EVENT_TYPE_METHOD_LOAD_FINISHED)
{
/* check for use of reserved method IDs */
if ( ((piJIT_Method_Load) EventSpecificData)->method_id <= 999 )
return 0;
}
ReturnValue = (int)FUNC_NotifyEvent(event_type, EventSpecificData);
return ReturnValue;
}
/* The new mode call back routine */
ITT_EXTERN_C void JITAPI
iJIT_RegisterCallbackEx(void *userdata, iJIT_ModeChangedEx
NewModeCallBackFuncEx)
{
/* is it already missing... or the load of functions from the DLL failed */
if (iJIT_DLL_is_missing || !loadiJIT_Funcs())
{
/* then do not bother with notifications */
NewModeCallBackFuncEx(userdata, iJIT_NO_NOTIFICATIONS);
/* Error: could not load JIT functions. */
return;
}
/* nothing to do with the callback */
}
/*
* This function allows the user to query in which mode, if at all,
*VTune is running
*/
ITT_EXTERN_C iJIT_IsProfilingActiveFlags JITAPI iJIT_IsProfilingActive()
{
if (!iJIT_DLL_is_missing)
{
loadiJIT_Funcs();
}
return executionMode;
}
/* this function loads the collector dll (BistroJavaCollector)
* and the relevant functions.
* on success: all functions load, iJIT_DLL_is_missing = 0, return value = 1
* on failure: all functions are NULL, iJIT_DLL_is_missing = 1, return value = 0
*/
static int loadiJIT_Funcs()
{
static int bDllWasLoaded = 0;
char *dllName = (char*)rcsid; /* !! Just to avoid unused code elimination */
#if ITT_PLATFORM==ITT_PLATFORM_WIN
DWORD dNameLength = 0;
#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
if(bDllWasLoaded)
{
/* dll was already loaded, no need to do it for the second time */
return 1;
}
/* Assumes that the DLL will not be found */
iJIT_DLL_is_missing = 1;
FUNC_NotifyEvent = NULL;
if (m_libHandle)
{
#if ITT_PLATFORM==ITT_PLATFORM_WIN
FreeLibrary(m_libHandle);
#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
dlclose(m_libHandle);
#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
m_libHandle = NULL;
}
/* Try to get the dll name from the environment */
#if ITT_PLATFORM==ITT_PLATFORM_WIN
dNameLength = GetEnvironmentVariableA(NEW_DLL_ENVIRONMENT_VAR, NULL, 0);
if (dNameLength)
{
DWORD envret = 0;
dllName = (char*)malloc(sizeof(char) * (dNameLength + 1));
envret = GetEnvironmentVariableA(NEW_DLL_ENVIRONMENT_VAR,
dllName, dNameLength);
if (envret)
{
/* Try to load the dll from the PATH... */
m_libHandle = LoadLibraryExA(dllName,
NULL, LOAD_WITH_ALTERED_SEARCH_PATH);
}
free(dllName);
} else {
/* Try to use old VS_PROFILER variable */
dNameLength = GetEnvironmentVariableA(DLL_ENVIRONMENT_VAR, NULL, 0);
if (dNameLength)
{
DWORD envret = 0;
dllName = (char*)malloc(sizeof(char) * (dNameLength + 1));
envret = GetEnvironmentVariableA(DLL_ENVIRONMENT_VAR,
dllName, dNameLength);
if (envret)
{
/* Try to load the dll from the PATH... */
m_libHandle = LoadLibraryA(dllName);
}
free(dllName);
}
}
#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
dllName = getenv(NEW_DLL_ENVIRONMENT_VAR);
if (!dllName)
dllName = getenv(DLL_ENVIRONMENT_VAR);
#ifdef ANDROID
if (!dllName)
dllName = ANDROID_JIT_AGENT_PATH;
#endif
if (dllName)
{
/* Try to load the dll from the PATH... */
m_libHandle = dlopen(dllName, RTLD_LAZY);
}
#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
if (!m_libHandle)
{
#if ITT_PLATFORM==ITT_PLATFORM_WIN
m_libHandle = LoadLibraryA(DEFAULT_DLLNAME);
#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
m_libHandle = dlopen(DEFAULT_DLLNAME, RTLD_LAZY);
#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
}
/* if the dll wasn't loaded - exit. */
if (!m_libHandle)
{
iJIT_DLL_is_missing = 1; /* don't try to initialize
* JIT agent the second time
*/
return 0;
}
#if ITT_PLATFORM==ITT_PLATFORM_WIN
FUNC_NotifyEvent = (TPNotify)GetProcAddress(m_libHandle, "NotifyEvent");
#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
FUNC_NotifyEvent = (TPNotify)(intptr_t)dlsym(m_libHandle, "NotifyEvent");
#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
if (!FUNC_NotifyEvent)
{
FUNC_Initialize = NULL;
return 0;
}
#if ITT_PLATFORM==ITT_PLATFORM_WIN
FUNC_Initialize = (TPInitialize)GetProcAddress(m_libHandle, "Initialize");
#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
FUNC_Initialize = (TPInitialize)(intptr_t)dlsym(m_libHandle, "Initialize");
#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
if (!FUNC_Initialize)
{
FUNC_NotifyEvent = NULL;
return 0;
}
executionMode = (iJIT_IsProfilingActiveFlags)FUNC_Initialize();
bDllWasLoaded = 1;
iJIT_DLL_is_missing = 0; /* DLL is ok. */
/*
* Call Graph mode: init the thread local storage
* (need to store the virtual stack there).
*/
if ( executionMode == iJIT_CALLGRAPH_ON )
{
/* Allocate a thread local storage slot for the thread "stack" */
if (!threadLocalStorageHandle)
#if ITT_PLATFORM==ITT_PLATFORM_WIN
threadLocalStorageHandle = TlsAlloc();
#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
pthread_key_create(&threadLocalStorageHandle, NULL);
#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
}
return 1;
}
/*
* This function should be called by the user whenever a thread ends,
* to free the thread "virtual stack" storage
*/
ITT_EXTERN_C void JITAPI FinalizeThread()
{
if (threadLocalStorageHandle)
{
#if ITT_PLATFORM==ITT_PLATFORM_WIN
pThreadStack threadStack =
(pThreadStack)TlsGetValue (threadLocalStorageHandle);
#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
pThreadStack threadStack =
(pThreadStack)pthread_getspecific(threadLocalStorageHandle);
#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
if (threadStack)
{
free (threadStack);
threadStack = NULL;
#if ITT_PLATFORM==ITT_PLATFORM_WIN
TlsSetValue (threadLocalStorageHandle, threadStack);
#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
pthread_setspecific(threadLocalStorageHandle, threadStack);
#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
}
}
}
/*
* This function should be called by the user when the process ends,
* to free the local storage index
*/
ITT_EXTERN_C void JITAPI FinalizeProcess()
{
if (m_libHandle)
{
#if ITT_PLATFORM==ITT_PLATFORM_WIN
FreeLibrary(m_libHandle);
#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
dlclose(m_libHandle);
#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
m_libHandle = NULL;
}
if (threadLocalStorageHandle)
#if ITT_PLATFORM==ITT_PLATFORM_WIN
TlsFree (threadLocalStorageHandle);
#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
pthread_key_delete(threadLocalStorageHandle);
#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
}
/*
* This function should be called by the user for any method once.
* The function will return a unique method ID, the user should maintain
* the ID for each method
*/
ITT_EXTERN_C unsigned int JITAPI iJIT_GetNewMethodID()
{
static unsigned int methodID = 0x100000;
if (methodID == 0)
return 0; /* ERROR : this is not a valid value */
return methodID++;
}
|
0 | repos/DirectXShaderCompiler/lib/ExecutionEngine | repos/DirectXShaderCompiler/lib/ExecutionEngine/IntelJITEvents/IntelJITEventListener.cpp | //===-- IntelJITEventListener.cpp - Tell Intel profiler about JITed code --===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines a JITEventListener object to tell Intel(R) VTune(TM)
// Amplifier XE 2011 about JITted functions.
//
//===----------------------------------------------------------------------===//
#include "llvm/Config/config.h"
#include "IntelJITEventsWrapper.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/DebugInfo/DIContext.h"
#include "llvm/DebugInfo/DWARF/DWARFContext.h"
#include "llvm/ExecutionEngine/JITEventListener.h"
#include "llvm/IR/DebugInfo.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/Metadata.h"
#include "llvm/IR/ValueHandle.h"
#include "llvm/Object/ObjectFile.h"
#include "llvm/Object/SymbolSize.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/Errno.h"
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
using namespace llvm::object;
#define DEBUG_TYPE "amplifier-jit-event-listener"
namespace {
class IntelJITEventListener : public JITEventListener {
typedef DenseMap<void*, unsigned int> MethodIDMap;
std::unique_ptr<IntelJITEventsWrapper> Wrapper;
MethodIDMap MethodIDs;
typedef SmallVector<const void *, 64> MethodAddressVector;
typedef DenseMap<const void *, MethodAddressVector> ObjectMap;
ObjectMap LoadedObjectMap;
std::map<const char*, OwningBinary<ObjectFile>> DebugObjects;
public:
IntelJITEventListener(IntelJITEventsWrapper* libraryWrapper) {
Wrapper.reset(libraryWrapper);
}
~IntelJITEventListener() {
}
void NotifyObjectEmitted(const ObjectFile &Obj,
const RuntimeDyld::LoadedObjectInfo &L) override;
void NotifyFreeingObject(const ObjectFile &Obj) override;
};
static LineNumberInfo DILineInfoToIntelJITFormat(uintptr_t StartAddress,
uintptr_t Address,
DILineInfo Line) {
LineNumberInfo Result;
Result.Offset = Address - StartAddress;
Result.LineNumber = Line.Line;
return Result;
}
static iJIT_Method_Load FunctionDescToIntelJITFormat(
IntelJITEventsWrapper& Wrapper,
const char* FnName,
uintptr_t FnStart,
size_t FnSize) {
iJIT_Method_Load Result;
memset(&Result, 0, sizeof(iJIT_Method_Load));
Result.method_id = Wrapper.iJIT_GetNewMethodID();
Result.method_name = const_cast<char*>(FnName);
Result.method_load_address = reinterpret_cast<void*>(FnStart);
Result.method_size = FnSize;
Result.class_id = 0;
Result.class_file_name = NULL;
Result.user_data = NULL;
Result.user_data_size = 0;
Result.env = iJDE_JittingAPI;
return Result;
}
void IntelJITEventListener::NotifyObjectEmitted(
const ObjectFile &Obj,
const RuntimeDyld::LoadedObjectInfo &L) {
OwningBinary<ObjectFile> DebugObjOwner = L.getObjectForDebug(Obj);
const ObjectFile &DebugObj = *DebugObjOwner.getBinary();
// Get the address of the object image for use as a unique identifier
const void* ObjData = DebugObj.getData().data();
DIContext* Context = new DWARFContextInMemory(DebugObj);
MethodAddressVector Functions;
// Use symbol info to iterate functions in the object.
for (const std::pair<SymbolRef, uint64_t> &P : computeSymbolSizes(DebugObj)) {
SymbolRef Sym = P.first;
std::vector<LineNumberInfo> LineInfo;
std::string SourceFileName;
if (Sym.getType() != SymbolRef::ST_Function)
continue;
ErrorOr<StringRef> Name = Sym.getName();
if (!Name)
continue;
ErrorOr<uint64_t> AddrOrErr = Sym.getAddress();
if (AddrOrErr.getError())
continue;
uint64_t Addr = *AddrOrErr;
uint64_t Size = P.second;
// Record this address in a local vector
Functions.push_back((void*)Addr);
// Build the function loaded notification message
iJIT_Method_Load FunctionMessage =
FunctionDescToIntelJITFormat(*Wrapper, Name->data(), Addr, Size);
DILineInfoTable Lines = Context->getLineInfoForAddressRange(Addr, Size);
DILineInfoTable::iterator Begin = Lines.begin();
DILineInfoTable::iterator End = Lines.end();
for (DILineInfoTable::iterator It = Begin; It != End; ++It) {
LineInfo.push_back(
DILineInfoToIntelJITFormat((uintptr_t)Addr, It->first, It->second));
}
if (LineInfo.size() == 0) {
FunctionMessage.source_file_name = 0;
FunctionMessage.line_number_size = 0;
FunctionMessage.line_number_table = 0;
} else {
// Source line information for the address range is provided as
// a code offset for the start of the corresponding sub-range and
// a source line. JIT API treats offsets in LineNumberInfo structures
// as the end of the corresponding code region. The start of the code
// is taken from the previous element. Need to shift the elements.
LineNumberInfo last = LineInfo.back();
last.Offset = FunctionMessage.method_size;
LineInfo.push_back(last);
for (size_t i = LineInfo.size() - 2; i > 0; --i)
LineInfo[i].LineNumber = LineInfo[i - 1].LineNumber;
SourceFileName = Lines.front().second.FileName;
FunctionMessage.source_file_name =
const_cast<char *>(SourceFileName.c_str());
FunctionMessage.line_number_size = LineInfo.size();
FunctionMessage.line_number_table = &*LineInfo.begin();
}
Wrapper->iJIT_NotifyEvent(iJVM_EVENT_TYPE_METHOD_LOAD_FINISHED,
&FunctionMessage);
MethodIDs[(void*)Addr] = FunctionMessage.method_id;
}
// To support object unload notification, we need to keep a list of
// registered function addresses for each loaded object. We will
// use the MethodIDs map to get the registered ID for each function.
LoadedObjectMap[ObjData] = Functions;
DebugObjects[Obj.getData().data()] = std::move(DebugObjOwner);
}
void IntelJITEventListener::NotifyFreeingObject(const ObjectFile &Obj) {
// This object may not have been registered with the listener. If it wasn't,
// bail out.
if (DebugObjects.find(Obj.getData().data()) == DebugObjects.end())
return;
// Get the address of the object image for use as a unique identifier
const ObjectFile &DebugObj = *DebugObjects[Obj.getData().data()].getBinary();
const void* ObjData = DebugObj.getData().data();
// Get the object's function list from LoadedObjectMap
ObjectMap::iterator OI = LoadedObjectMap.find(ObjData);
if (OI == LoadedObjectMap.end())
return;
MethodAddressVector& Functions = OI->second;
// Walk the function list, unregistering each function
for (MethodAddressVector::iterator FI = Functions.begin(),
FE = Functions.end();
FI != FE;
++FI) {
void* FnStart = const_cast<void*>(*FI);
MethodIDMap::iterator MI = MethodIDs.find(FnStart);
if (MI != MethodIDs.end()) {
Wrapper->iJIT_NotifyEvent(iJVM_EVENT_TYPE_METHOD_UNLOAD_START,
&MI->second);
MethodIDs.erase(MI);
}
}
// Erase the object from LoadedObjectMap
LoadedObjectMap.erase(OI);
DebugObjects.erase(Obj.getData().data());
}
} // anonymous namespace.
namespace llvm {
JITEventListener *JITEventListener::createIntelJITEventListener() {
return new IntelJITEventListener(new IntelJITEventsWrapper);
}
// for testing
JITEventListener *JITEventListener::createIntelJITEventListener(
IntelJITEventsWrapper* TestImpl) {
return new IntelJITEventListener(TestImpl);
}
} // namespace llvm
|
0 | repos/DirectXShaderCompiler/lib/ExecutionEngine | repos/DirectXShaderCompiler/lib/ExecutionEngine/IntelJITEvents/jitprofiling.h | /*===-- jitprofiling.h - JIT Profiling API-------------------------*- C -*-===*
*
* The LLVM Compiler Infrastructure
*
* This file is distributed under the University of Illinois Open Source
* License. See LICENSE.TXT for details.
*
*===----------------------------------------------------------------------===*
*
* This file provides Intel(R) Performance Analyzer JIT (Just-In-Time)
* Profiling API declaration.
*
* NOTE: This file comes in a style different from the rest of LLVM
* source base since this is a piece of code shared from Intel(R)
* products. Please do not reformat / re-style this code to make
* subsequent merges and contributions from the original source base eaiser.
*
*===----------------------------------------------------------------------===*/
#ifndef __JITPROFILING_H__
#define __JITPROFILING_H__
/*
* Various constants used by functions
*/
/* event notification */
typedef enum iJIT_jvm_event
{
/* shutdown */
/*
* Program exiting EventSpecificData NA
*/
iJVM_EVENT_TYPE_SHUTDOWN = 2,
/* JIT profiling */
/*
* issued after method code jitted into memory but before code is executed
* EventSpecificData is an iJIT_Method_Load
*/
iJVM_EVENT_TYPE_METHOD_LOAD_FINISHED=13,
/* issued before unload. Method code will no longer be executed, but code
* and info are still in memory. The VTune profiler may capture method
* code only at this point EventSpecificData is iJIT_Method_Id
*/
iJVM_EVENT_TYPE_METHOD_UNLOAD_START,
/* Method Profiling */
/* method name, Id and stack is supplied
* issued when a method is about to be entered EventSpecificData is
* iJIT_Method_NIDS
*/
iJVM_EVENT_TYPE_ENTER_NIDS = 19,
/* method name, Id and stack is supplied
* issued when a method is about to be left EventSpecificData is
* iJIT_Method_NIDS
*/
iJVM_EVENT_TYPE_LEAVE_NIDS
} iJIT_JVM_EVENT;
typedef enum _iJIT_ModeFlags
{
/* No need to Notify VTune, since VTune is not running */
iJIT_NO_NOTIFICATIONS = 0x0000,
/* when turned on the jit must call
* iJIT_NotifyEvent
* (
* iJVM_EVENT_TYPE_METHOD_LOAD_FINISHED,
* )
* for all the method already jitted
*/
iJIT_BE_NOTIFY_ON_LOAD = 0x0001,
/* when turned on the jit must call
* iJIT_NotifyEvent
* (
* iJVM_EVENT_TYPE_METHOD_UNLOAD_FINISHED,
* ) for all the method that are unloaded
*/
iJIT_BE_NOTIFY_ON_UNLOAD = 0x0002,
/* when turned on the jit must instrument all
* the currently jited code with calls on
* method entries
*/
iJIT_BE_NOTIFY_ON_METHOD_ENTRY = 0x0004,
/* when turned on the jit must instrument all
* the currently jited code with calls
* on method exit
*/
iJIT_BE_NOTIFY_ON_METHOD_EXIT = 0x0008
} iJIT_ModeFlags;
/* Flags used by iJIT_IsProfilingActive() */
typedef enum _iJIT_IsProfilingActiveFlags
{
/* No profiler is running. Currently not used */
iJIT_NOTHING_RUNNING = 0x0000,
/* Sampling is running. This is the default value
* returned by iJIT_IsProfilingActive()
*/
iJIT_SAMPLING_ON = 0x0001,
/* Call Graph is running */
iJIT_CALLGRAPH_ON = 0x0002
} iJIT_IsProfilingActiveFlags;
/* Enumerator for the environment of methods*/
typedef enum _iJDEnvironmentType
{
iJDE_JittingAPI = 2
} iJDEnvironmentType;
/**********************************
* Data structures for the events *
**********************************/
/* structure for the events:
* iJVM_EVENT_TYPE_METHOD_UNLOAD_START
*/
typedef struct _iJIT_Method_Id
{
/* Id of the method (same as the one passed in
* the iJIT_Method_Load struct
*/
unsigned int method_id;
} *piJIT_Method_Id, iJIT_Method_Id;
/* structure for the events:
* iJVM_EVENT_TYPE_ENTER_NIDS,
* iJVM_EVENT_TYPE_LEAVE_NIDS,
* iJVM_EVENT_TYPE_EXCEPTION_OCCURRED_NIDS
*/
typedef struct _iJIT_Method_NIDS
{
/* unique method ID */
unsigned int method_id;
/* NOTE: no need to fill this field, it's filled by VTune */
unsigned int stack_id;
/* method name (just the method, without the class) */
char* method_name;
} *piJIT_Method_NIDS, iJIT_Method_NIDS;
/* structures for the events:
* iJVM_EVENT_TYPE_METHOD_LOAD_FINISHED
*/
typedef struct _LineNumberInfo
{
/* x86 Offset from the beginning of the method*/
unsigned int Offset;
/* source line number from the beginning of the source file */
unsigned int LineNumber;
} *pLineNumberInfo, LineNumberInfo;
typedef struct _iJIT_Method_Load
{
/* unique method ID - can be any unique value, (except 0 - 999) */
unsigned int method_id;
/* method name (can be with or without the class and signature, in any case
* the class name will be added to it)
*/
char* method_name;
/* virtual address of that method - This determines the method range for the
* iJVM_EVENT_TYPE_ENTER/LEAVE_METHOD_ADDR events
*/
void* method_load_address;
/* Size in memory - Must be exact */
unsigned int method_size;
/* Line Table size in number of entries - Zero if none */
unsigned int line_number_size;
/* Pointer to the beginning of the line numbers info array */
pLineNumberInfo line_number_table;
/* unique class ID */
unsigned int class_id;
/* class file name */
char* class_file_name;
/* source file name */
char* source_file_name;
/* bits supplied by the user for saving in the JIT file */
void* user_data;
/* the size of the user data buffer */
unsigned int user_data_size;
/* NOTE: no need to fill this field, it's filled by VTune */
iJDEnvironmentType env;
} *piJIT_Method_Load, iJIT_Method_Load;
/* API Functions */
#ifdef __cplusplus
extern "C" {
#endif
#ifndef CDECL
# if defined WIN32 || defined _WIN32
# define CDECL __cdecl
# else /* defined WIN32 || defined _WIN32 */
# if defined _M_X64 || defined _M_AMD64 || defined __x86_64__
# define CDECL /* not actual on x86_64 platform */
# else /* _M_X64 || _M_AMD64 || __x86_64__ */
# define CDECL __attribute__ ((cdecl))
# endif /* _M_X64 || _M_AMD64 || __x86_64__ */
# endif /* defined WIN32 || defined _WIN32 */
#endif /* CDECL */
// HLSL Change: changed calling convention to __stdcall
#define JITAPI __stdcall
/* called when the settings are changed with new settings */
typedef void (*iJIT_ModeChangedEx)(void *UserData, iJIT_ModeFlags Flags);
int JITAPI iJIT_NotifyEvent(iJIT_JVM_EVENT event_type, void *EventSpecificData);
/* The new mode call back routine */
void JITAPI iJIT_RegisterCallbackEx(void *userdata,
iJIT_ModeChangedEx NewModeCallBackFuncEx);
iJIT_IsProfilingActiveFlags JITAPI iJIT_IsProfilingActive(void);
void JITAPI FinalizeThread(void);
void JITAPI FinalizeProcess(void);
unsigned int JITAPI iJIT_GetNewMethodID(void);
#ifdef __cplusplus
}
#endif
#endif /* __JITPROFILING_H__ */
|
0 | repos/DirectXShaderCompiler/lib/ExecutionEngine | repos/DirectXShaderCompiler/lib/ExecutionEngine/IntelJITEvents/ittnotify_config.h | /*===-- ittnotify_config.h - JIT Profiling API internal config-----*- C -*-===*
*
* The LLVM Compiler Infrastructure
*
* This file is distributed under the University of Illinois Open Source
* License. See LICENSE.TXT for details.
*
*===----------------------------------------------------------------------===*
*
* This file provides Intel(R) Performance Analyzer JIT (Just-In-Time)
* Profiling API internal config.
*
* NOTE: This file comes in a style different from the rest of LLVM
* source base since this is a piece of code shared from Intel(R)
* products. Please do not reformat / re-style this code to make
* subsequent merges and contributions from the original source base eaiser.
*
*===----------------------------------------------------------------------===*/
#ifndef _ITTNOTIFY_CONFIG_H_
#define _ITTNOTIFY_CONFIG_H_
/** @cond exclude_from_documentation */
#ifndef ITT_OS_WIN
# define ITT_OS_WIN 1
#endif /* ITT_OS_WIN */
#ifndef ITT_OS_LINUX
# define ITT_OS_LINUX 2
#endif /* ITT_OS_LINUX */
#ifndef ITT_OS_MAC
# define ITT_OS_MAC 3
#endif /* ITT_OS_MAC */
#ifndef ITT_OS
# if defined WIN32 || defined _WIN32
# define ITT_OS ITT_OS_WIN
# elif defined( __APPLE__ ) && defined( __MACH__ )
# define ITT_OS ITT_OS_MAC
# else
# define ITT_OS ITT_OS_LINUX
# endif
#endif /* ITT_OS */
#ifndef ITT_PLATFORM_WIN
# define ITT_PLATFORM_WIN 1
#endif /* ITT_PLATFORM_WIN */
#ifndef ITT_PLATFORM_POSIX
# define ITT_PLATFORM_POSIX 2
#endif /* ITT_PLATFORM_POSIX */
#ifndef ITT_PLATFORM
# if ITT_OS==ITT_OS_WIN
# define ITT_PLATFORM ITT_PLATFORM_WIN
# else
# define ITT_PLATFORM ITT_PLATFORM_POSIX
# endif /* _WIN32 */
#endif /* ITT_PLATFORM */
#if defined(_UNICODE) && !defined(UNICODE)
#define UNICODE
#endif
#include <stddef.h>
#if ITT_PLATFORM==ITT_PLATFORM_WIN
#include <tchar.h>
#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
#include <stdint.h>
#if defined(UNICODE) || defined(_UNICODE)
#include <wchar.h>
#endif /* UNICODE || _UNICODE */
#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
#ifndef CDECL
# if ITT_PLATFORM==ITT_PLATFORM_WIN
# define CDECL __cdecl
# else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
# if defined _M_X64 || defined _M_AMD64 || defined __x86_64__
# define CDECL /* not actual on x86_64 platform */
# else /* _M_X64 || _M_AMD64 || __x86_64__ */
# define CDECL __attribute__ ((cdecl))
# endif /* _M_X64 || _M_AMD64 || __x86_64__ */
# endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
#endif /* CDECL */
#ifndef STDCALL
# if ITT_PLATFORM==ITT_PLATFORM_WIN
# define STDCALL __stdcall
# else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
# if defined _M_X64 || defined _M_AMD64 || defined __x86_64__
# define STDCALL /* not supported on x86_64 platform */
# else /* _M_X64 || _M_AMD64 || __x86_64__ */
# define STDCALL __attribute__ ((stdcall))
# endif /* _M_X64 || _M_AMD64 || __x86_64__ */
# endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
#endif /* STDCALL */
#define ITTAPI CDECL
#define LIBITTAPI CDECL
/* TODO: Temporary for compatibility! */
#define ITTAPI_CALL CDECL
#define LIBITTAPI_CALL CDECL
#if ITT_PLATFORM==ITT_PLATFORM_WIN
/* use __forceinline (VC++ specific) */
#define ITT_INLINE __forceinline
#define ITT_INLINE_ATTRIBUTE /* nothing */
#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
/*
* Generally, functions are not inlined unless optimization is specified.
* For functions declared inline, this attribute inlines the function even
* if no optimization level was specified.
*/
#ifdef __STRICT_ANSI__
#define ITT_INLINE static
#else /* __STRICT_ANSI__ */
#define ITT_INLINE static inline
#endif /* __STRICT_ANSI__ */
#define ITT_INLINE_ATTRIBUTE __attribute__ ((always_inline))
#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
/** @endcond */
#ifndef ITT_ARCH_IA32
# define ITT_ARCH_IA32 1
#endif /* ITT_ARCH_IA32 */
#ifndef ITT_ARCH_IA32E
# define ITT_ARCH_IA32E 2
#endif /* ITT_ARCH_IA32E */
#ifndef ITT_ARCH_IA64
# define ITT_ARCH_IA64 3
#endif /* ITT_ARCH_IA64 */
#ifndef ITT_ARCH
# if defined _M_X64 || defined _M_AMD64 || defined __x86_64__
# define ITT_ARCH ITT_ARCH_IA32E
# elif defined _M_IA64 || defined __ia64
# define ITT_ARCH ITT_ARCH_IA64
# else
# define ITT_ARCH ITT_ARCH_IA32
# endif
#endif
#ifdef __cplusplus
# define ITT_EXTERN_C extern "C"
#else
# define ITT_EXTERN_C /* nothing */
#endif /* __cplusplus */
#define ITT_TO_STR_AUX(x) #x
#define ITT_TO_STR(x) ITT_TO_STR_AUX(x)
#define __ITT_BUILD_ASSERT(expr, suffix) do { \
static char __itt_build_check_##suffix[(expr) ? 1 : -1]; \
__itt_build_check_##suffix[0] = 0; \
} while(0)
#define _ITT_BUILD_ASSERT(expr, suffix) __ITT_BUILD_ASSERT((expr), suffix)
#define ITT_BUILD_ASSERT(expr) _ITT_BUILD_ASSERT((expr), __LINE__)
#define ITT_MAGIC { 0xED, 0xAB, 0xAB, 0xEC, 0x0D, 0xEE, 0xDA, 0x30 }
/* Replace with snapshot date YYYYMMDD for promotion build. */
#define API_VERSION_BUILD 20111111
#ifndef API_VERSION_NUM
#define API_VERSION_NUM 0.0.0
#endif /* API_VERSION_NUM */
#define API_VERSION "ITT-API-Version " ITT_TO_STR(API_VERSION_NUM) \
" (" ITT_TO_STR(API_VERSION_BUILD) ")"
/* OS communication functions */
#if ITT_PLATFORM==ITT_PLATFORM_WIN
#include <windows.h>
typedef HMODULE lib_t;
typedef DWORD TIDT;
typedef CRITICAL_SECTION mutex_t;
#define MUTEX_INITIALIZER { 0 }
#define strong_alias(name, aliasname) /* empty for Windows */
#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
#include <dlfcn.h>
#if defined(UNICODE) || defined(_UNICODE)
#include <wchar.h>
#endif /* UNICODE */
#ifndef _GNU_SOURCE
#define _GNU_SOURCE 1 /* need for PTHREAD_MUTEX_RECURSIVE */
#endif /* _GNU_SOURCE */
#include <pthread.h>
typedef void* lib_t;
typedef pthread_t TIDT;
typedef pthread_mutex_t mutex_t;
#define MUTEX_INITIALIZER PTHREAD_MUTEX_INITIALIZER
#define _strong_alias(name, aliasname) \
extern __typeof (name) aliasname __attribute__ ((alias (#name)));
#define strong_alias(name, aliasname) _strong_alias(name, aliasname)
#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
#if ITT_PLATFORM==ITT_PLATFORM_WIN
#define __itt_get_proc(lib, name) GetProcAddress(lib, name)
#define __itt_mutex_init(mutex) InitializeCriticalSection(mutex)
#define __itt_mutex_lock(mutex) EnterCriticalSection(mutex)
#define __itt_mutex_unlock(mutex) LeaveCriticalSection(mutex)
#define __itt_load_lib(name) LoadLibraryA(name)
#define __itt_unload_lib(handle) FreeLibrary(handle)
#define __itt_system_error() (int)GetLastError()
#define __itt_fstrcmp(s1, s2) lstrcmpA(s1, s2)
#define __itt_fstrlen(s) lstrlenA(s)
#define __itt_fstrcpyn(s1, s2, l) lstrcpynA(s1, s2, l)
#define __itt_fstrdup(s) _strdup(s)
#define __itt_thread_id() GetCurrentThreadId()
#define __itt_thread_yield() SwitchToThread()
#ifndef ITT_SIMPLE_INIT
ITT_INLINE long
__itt_interlocked_increment(volatile long* ptr) ITT_INLINE_ATTRIBUTE;
ITT_INLINE long __itt_interlocked_increment(volatile long* ptr)
{
return InterlockedIncrement(ptr);
}
#endif /* ITT_SIMPLE_INIT */
#else /* ITT_PLATFORM!=ITT_PLATFORM_WIN */
#define __itt_get_proc(lib, name) dlsym(lib, name)
#define __itt_mutex_init(mutex) {\
pthread_mutexattr_t mutex_attr; \
int error_code = pthread_mutexattr_init(&mutex_attr); \
if (error_code) \
__itt_report_error(__itt_error_system, "pthread_mutexattr_init", \
error_code); \
error_code = pthread_mutexattr_settype(&mutex_attr, \
PTHREAD_MUTEX_RECURSIVE); \
if (error_code) \
__itt_report_error(__itt_error_system, "pthread_mutexattr_settype", \
error_code); \
error_code = pthread_mutex_init(mutex, &mutex_attr); \
if (error_code) \
__itt_report_error(__itt_error_system, "pthread_mutex_init", \
error_code); \
error_code = pthread_mutexattr_destroy(&mutex_attr); \
if (error_code) \
__itt_report_error(__itt_error_system, "pthread_mutexattr_destroy", \
error_code); \
}
#define __itt_mutex_lock(mutex) pthread_mutex_lock(mutex)
#define __itt_mutex_unlock(mutex) pthread_mutex_unlock(mutex)
#define __itt_load_lib(name) dlopen(name, RTLD_LAZY)
#define __itt_unload_lib(handle) dlclose(handle)
#define __itt_system_error() errno
#define __itt_fstrcmp(s1, s2) strcmp(s1, s2)
#define __itt_fstrlen(s) strlen(s)
#define __itt_fstrcpyn(s1, s2, l) strncpy(s1, s2, l)
#define __itt_fstrdup(s) strdup(s)
#define __itt_thread_id() pthread_self()
#define __itt_thread_yield() sched_yield()
#if ITT_ARCH==ITT_ARCH_IA64
#ifdef __INTEL_COMPILER
#define __TBB_machine_fetchadd4(addr, val) __fetchadd4_acq((void *)addr, val)
#else /* __INTEL_COMPILER */
/* TODO: Add Support for not Intel compilers for IA64 */
#endif /* __INTEL_COMPILER */
#else /* ITT_ARCH!=ITT_ARCH_IA64 */
ITT_INLINE long
__TBB_machine_fetchadd4(volatile void* ptr, long addend) ITT_INLINE_ATTRIBUTE;
ITT_INLINE long __TBB_machine_fetchadd4(volatile void* ptr, long addend)
{
long result;
__asm__ __volatile__("lock\nxadd %0,%1"
: "=r"(result),"=m"(*(long*)ptr)
: "0"(addend), "m"(*(long*)ptr)
: "memory");
return result;
}
#endif /* ITT_ARCH==ITT_ARCH_IA64 */
#ifndef ITT_SIMPLE_INIT
ITT_INLINE long
__itt_interlocked_increment(volatile long* ptr) ITT_INLINE_ATTRIBUTE;
ITT_INLINE long __itt_interlocked_increment(volatile long* ptr)
{
return __TBB_machine_fetchadd4(ptr, 1) + 1L;
}
#endif /* ITT_SIMPLE_INIT */
#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
typedef enum {
__itt_collection_normal = 0,
__itt_collection_paused = 1
} __itt_collection_state;
typedef enum {
__itt_thread_normal = 0,
__itt_thread_ignored = 1
} __itt_thread_state;
#pragma pack(push, 8)
typedef struct ___itt_thread_info
{
const char* nameA; /*!< Copy of original name in ASCII. */
#if defined(UNICODE) || defined(_UNICODE)
const wchar_t* nameW; /*!< Copy of original name in UNICODE. */
#else /* UNICODE || _UNICODE */
void* nameW;
#endif /* UNICODE || _UNICODE */
TIDT tid;
__itt_thread_state state; /*!< Thread state (paused or normal) */
int extra1; /*!< Reserved to the runtime */
void* extra2; /*!< Reserved to the runtime */
struct ___itt_thread_info* next;
} __itt_thread_info;
#include "ittnotify_types.h" /* For __itt_group_id definition */
typedef struct ___itt_api_info_20101001
{
const char* name;
void** func_ptr;
void* init_func;
__itt_group_id group;
} __itt_api_info_20101001;
typedef struct ___itt_api_info
{
const char* name;
void** func_ptr;
void* init_func;
void* null_func;
__itt_group_id group;
} __itt_api_info;
struct ___itt_domain;
struct ___itt_string_handle;
typedef struct ___itt_global
{
unsigned char magic[8];
unsigned long version_major;
unsigned long version_minor;
unsigned long version_build;
volatile long api_initialized;
volatile long mutex_initialized;
volatile long atomic_counter;
mutex_t mutex;
lib_t lib;
void* error_handler;
const char** dll_path_ptr;
__itt_api_info* api_list_ptr;
struct ___itt_global* next;
/* Joinable structures below */
__itt_thread_info* thread_list;
struct ___itt_domain* domain_list;
struct ___itt_string_handle* string_list;
__itt_collection_state state;
} __itt_global;
#pragma pack(pop)
#define NEW_THREAD_INFO_W(gptr,h,h_tail,t,s,n) { \
h = (__itt_thread_info*)malloc(sizeof(__itt_thread_info)); \
if (h != NULL) { \
h->tid = t; \
h->nameA = NULL; \
h->nameW = n ? _wcsdup(n) : NULL; \
h->state = s; \
h->extra1 = 0; /* reserved */ \
h->extra2 = NULL; /* reserved */ \
h->next = NULL; \
if (h_tail == NULL) \
(gptr)->thread_list = h; \
else \
h_tail->next = h; \
} \
}
#define NEW_THREAD_INFO_A(gptr,h,h_tail,t,s,n) { \
h = (__itt_thread_info*)malloc(sizeof(__itt_thread_info)); \
if (h != NULL) { \
h->tid = t; \
h->nameA = n ? __itt_fstrdup(n) : NULL; \
h->nameW = NULL; \
h->state = s; \
h->extra1 = 0; /* reserved */ \
h->extra2 = NULL; /* reserved */ \
h->next = NULL; \
if (h_tail == NULL) \
(gptr)->thread_list = h; \
else \
h_tail->next = h; \
} \
}
#define NEW_DOMAIN_W(gptr,h,h_tail,name) { \
h = (__itt_domain*)malloc(sizeof(__itt_domain)); \
if (h != NULL) { \
h->flags = 0; /* domain is disabled by default */ \
h->nameA = NULL; \
h->nameW = name ? _wcsdup(name) : NULL; \
h->extra1 = 0; /* reserved */ \
h->extra2 = NULL; /* reserved */ \
h->next = NULL; \
if (h_tail == NULL) \
(gptr)->domain_list = h; \
else \
h_tail->next = h; \
} \
}
#define NEW_DOMAIN_A(gptr,h,h_tail,name) { \
h = (__itt_domain*)malloc(sizeof(__itt_domain)); \
if (h != NULL) { \
h->flags = 0; /* domain is disabled by default */ \
h->nameA = name ? __itt_fstrdup(name) : NULL; \
h->nameW = NULL; \
h->extra1 = 0; /* reserved */ \
h->extra2 = NULL; /* reserved */ \
h->next = NULL; \
if (h_tail == NULL) \
(gptr)->domain_list = h; \
else \
h_tail->next = h; \
} \
}
#define NEW_STRING_HANDLE_W(gptr,h,h_tail,name) { \
h = (__itt_string_handle*)malloc(sizeof(__itt_string_handle)); \
if (h != NULL) { \
h->strA = NULL; \
h->strW = name ? _wcsdup(name) : NULL; \
h->extra1 = 0; /* reserved */ \
h->extra2 = NULL; /* reserved */ \
h->next = NULL; \
if (h_tail == NULL) \
(gptr)->string_list = h; \
else \
h_tail->next = h; \
} \
}
#define NEW_STRING_HANDLE_A(gptr,h,h_tail,name) { \
h = (__itt_string_handle*)malloc(sizeof(__itt_string_handle)); \
if (h != NULL) { \
h->strA = name ? __itt_fstrdup(name) : NULL; \
h->strW = NULL; \
h->extra1 = 0; /* reserved */ \
h->extra2 = NULL; /* reserved */ \
h->next = NULL; \
if (h_tail == NULL) \
(gptr)->string_list = h; \
else \
h_tail->next = h; \
} \
}
#endif /* _ITTNOTIFY_CONFIG_H_ */
|
0 | repos/DirectXShaderCompiler/lib/ExecutionEngine | repos/DirectXShaderCompiler/lib/ExecutionEngine/IntelJITEvents/CMakeLists.txt | include_directories( ${CMAKE_CURRENT_SOURCE_DIR}/.. )
add_llvm_library(LLVMIntelJITEvents
IntelJITEventListener.cpp
jitprofiling.c
LINK_LIBS pthread ${CMAKE_DL_LIBS}
)
|
0 | repos/DirectXShaderCompiler/lib/ExecutionEngine | repos/DirectXShaderCompiler/lib/ExecutionEngine/IntelJITEvents/ittnotify_types.h | /*===-- ittnotify_types.h - JIT Profiling API internal types--------*- C -*-===*
*
* The LLVM Compiler Infrastructure
*
* This file is distributed under the University of Illinois Open Source
* License. See LICENSE.TXT for details.
*
*===----------------------------------------------------------------------===*
*
* NOTE: This file comes in a style different from the rest of LLVM
* source base since this is a piece of code shared from Intel(R)
* products. Please do not reformat / re-style this code to make
* subsequent merges and contributions from the original source base eaiser.
*
*===----------------------------------------------------------------------===*/
#ifndef _ITTNOTIFY_TYPES_H_
#define _ITTNOTIFY_TYPES_H_
typedef enum ___itt_group_id
{
__itt_group_none = 0,
__itt_group_legacy = 1<<0,
__itt_group_control = 1<<1,
__itt_group_thread = 1<<2,
__itt_group_mark = 1<<3,
__itt_group_sync = 1<<4,
__itt_group_fsync = 1<<5,
__itt_group_jit = 1<<6,
__itt_group_model = 1<<7,
__itt_group_splitter_min = 1<<7,
__itt_group_counter = 1<<8,
__itt_group_frame = 1<<9,
__itt_group_stitch = 1<<10,
__itt_group_heap = 1<<11,
__itt_group_splitter_max = 1<<12,
__itt_group_structure = 1<<12,
__itt_group_suppress = 1<<13,
__itt_group_all = -1
} __itt_group_id;
#pragma pack(push, 8)
typedef struct ___itt_group_list
{
__itt_group_id id;
const char* name;
} __itt_group_list;
#pragma pack(pop)
#define ITT_GROUP_LIST(varname) \
static __itt_group_list varname[] = { \
{ __itt_group_all, "all" }, \
{ __itt_group_control, "control" }, \
{ __itt_group_thread, "thread" }, \
{ __itt_group_mark, "mark" }, \
{ __itt_group_sync, "sync" }, \
{ __itt_group_fsync, "fsync" }, \
{ __itt_group_jit, "jit" }, \
{ __itt_group_model, "model" }, \
{ __itt_group_counter, "counter" }, \
{ __itt_group_frame, "frame" }, \
{ __itt_group_stitch, "stitch" }, \
{ __itt_group_heap, "heap" }, \
{ __itt_group_structure, "structure" }, \
{ __itt_group_suppress, "suppress" }, \
{ __itt_group_none, NULL } \
}
#endif /* _ITTNOTIFY_TYPES_H_ */
|
0 | repos/DirectXShaderCompiler/lib/ExecutionEngine | repos/DirectXShaderCompiler/lib/ExecutionEngine/IntelJITEvents/LLVMBuild.txt | ;===- ./lib/ExecutionEngine/JITProfileAmplifier/LLVMBuild.txt --*- Conf -*--===;
;
; The LLVM Compiler Infrastructure
;
; This file is distributed under the University of Illinois Open Source
; License. See LICENSE.TXT for details.
;
;===------------------------------------------------------------------------===;
;
; This is an LLVMBuild description file for the components in this subdirectory.
;
; For more information on the LLVMBuild system, please see:
;
; http://llvm.org/docs/LLVMBuild.html
;
;===------------------------------------------------------------------------===;
[common]
[component_0]
type = OptionalLibrary
name = IntelJITEvents
parent = ExecutionEngine
required_libraries = Core DebugInfoDWARF Support Object ExecutionEngine
|
0 | repos/DirectXShaderCompiler/lib/ExecutionEngine | repos/DirectXShaderCompiler/lib/ExecutionEngine/IntelJITEvents/IntelJITEventsWrapper.h | //===-- IntelJITEventsWrapper.h - Intel JIT Events API Wrapper --*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines a wrapper for the Intel JIT Events API. It allows for the
// implementation of the jitprofiling library to be swapped with an alternative
// implementation (for testing). To include this file, you must have the
// jitprofiling.h header available; it is available in Intel(R) VTune(TM)
// Amplifier XE 2011.
//
//===----------------------------------------------------------------------===//
#ifndef INTEL_JIT_EVENTS_WRAPPER_H
#define INTEL_JIT_EVENTS_WRAPPER_H
#include "jitprofiling.h"
namespace llvm {
class IntelJITEventsWrapper {
// Function pointer types for testing implementation of Intel jitprofiling
// library
typedef int (*NotifyEventPtr)(iJIT_JVM_EVENT, void*);
typedef void (*RegisterCallbackExPtr)(void *, iJIT_ModeChangedEx );
typedef iJIT_IsProfilingActiveFlags (*IsProfilingActivePtr)(void);
typedef void (*FinalizeThreadPtr)(void);
typedef void (*FinalizeProcessPtr)(void);
typedef unsigned int (*GetNewMethodIDPtr)(void);
NotifyEventPtr NotifyEventFunc;
RegisterCallbackExPtr RegisterCallbackExFunc;
IsProfilingActivePtr IsProfilingActiveFunc;
GetNewMethodIDPtr GetNewMethodIDFunc;
public:
bool isAmplifierRunning() {
return iJIT_IsProfilingActive() == iJIT_SAMPLING_ON;
}
IntelJITEventsWrapper()
: NotifyEventFunc(::iJIT_NotifyEvent),
RegisterCallbackExFunc(::iJIT_RegisterCallbackEx),
IsProfilingActiveFunc(::iJIT_IsProfilingActive),
GetNewMethodIDFunc(::iJIT_GetNewMethodID) {
}
IntelJITEventsWrapper(NotifyEventPtr NotifyEventImpl,
RegisterCallbackExPtr RegisterCallbackExImpl,
IsProfilingActivePtr IsProfilingActiveImpl,
FinalizeThreadPtr FinalizeThreadImpl,
FinalizeProcessPtr FinalizeProcessImpl,
GetNewMethodIDPtr GetNewMethodIDImpl)
: NotifyEventFunc(NotifyEventImpl),
RegisterCallbackExFunc(RegisterCallbackExImpl),
IsProfilingActiveFunc(IsProfilingActiveImpl),
GetNewMethodIDFunc(GetNewMethodIDImpl) {
}
// Sends an event announcing that a function has been emitted
// return values are event-specific. See Intel documentation for details.
int iJIT_NotifyEvent(iJIT_JVM_EVENT EventType, void *EventSpecificData) {
if (!NotifyEventFunc)
return -1;
return NotifyEventFunc(EventType, EventSpecificData);
}
// Registers a callback function to receive notice of profiling state changes
void iJIT_RegisterCallbackEx(void *UserData,
iJIT_ModeChangedEx NewModeCallBackFuncEx) {
if (RegisterCallbackExFunc)
RegisterCallbackExFunc(UserData, NewModeCallBackFuncEx);
}
// Returns the current profiler mode
iJIT_IsProfilingActiveFlags iJIT_IsProfilingActive(void) {
if (!IsProfilingActiveFunc)
return iJIT_NOTHING_RUNNING;
return IsProfilingActiveFunc();
}
// Generates a locally unique method ID for use in code registration
unsigned int iJIT_GetNewMethodID(void) {
if (!GetNewMethodIDFunc)
return -1;
return GetNewMethodIDFunc();
}
};
} //namespace llvm
#endif //INTEL_JIT_EVENTS_WRAPPER_H
|
0 | repos/DirectXShaderCompiler/lib/ExecutionEngine | repos/DirectXShaderCompiler/lib/ExecutionEngine/Interpreter/ExternalFunctions.cpp | //===-- ExternalFunctions.cpp - Implement External Functions --------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file contains both code to deal with invoking "external" functions, but
// also contains code that implements "exported" external functions.
//
// There are currently two mechanisms for handling external functions in the
// Interpreter. The first is to implement lle_* wrapper functions that are
// specific to well-known library functions which manually translate the
// arguments from GenericValues and make the call. If such a wrapper does
// not exist, and libffi is available, then the Interpreter will attempt to
// invoke the function using libffi, after finding its address.
//
//===----------------------------------------------------------------------===//
#include "Interpreter.h"
#include "llvm/Config/config.h" // Detect libffi
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Module.h"
#include "llvm/Support/DynamicLibrary.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/ManagedStatic.h"
#include "llvm/Support/Mutex.h"
#include "llvm/Support/UniqueLock.h"
#include <cmath>
#include <csignal>
#include <cstdio>
#include <cstring>
#include <map>
#ifdef HAVE_FFI_CALL
#ifdef HAVE_FFI_H
#include <ffi.h>
#define USE_LIBFFI
#elif HAVE_FFI_FFI_H
#include <ffi/ffi.h>
#define USE_LIBFFI
#endif
#endif
using namespace llvm;
static ManagedStatic<sys::Mutex> FunctionsLock;
typedef GenericValue (*ExFunc)(FunctionType *, ArrayRef<GenericValue>);
static ManagedStatic<std::map<const Function *, ExFunc> > ExportedFunctions;
static ManagedStatic<std::map<std::string, ExFunc> > FuncNames;
#ifdef USE_LIBFFI
typedef void (*RawFunc)();
static ManagedStatic<std::map<const Function *, RawFunc> > RawFunctions;
#endif
static Interpreter *TheInterpreter;
static char getTypeID(Type *Ty) {
switch (Ty->getTypeID()) {
case Type::VoidTyID: return 'V';
case Type::IntegerTyID:
switch (cast<IntegerType>(Ty)->getBitWidth()) {
case 1: return 'o';
case 8: return 'B';
case 16: return 'S';
case 32: return 'I';
case 64: return 'L';
default: return 'N';
}
case Type::FloatTyID: return 'F';
case Type::DoubleTyID: return 'D';
case Type::PointerTyID: return 'P';
case Type::FunctionTyID:return 'M';
case Type::StructTyID: return 'T';
case Type::ArrayTyID: return 'A';
default: return 'U';
}
}
// Try to find address of external function given a Function object.
// Please note, that interpreter doesn't know how to assemble a
// real call in general case (this is JIT job), that's why it assumes,
// that all external functions has the same (and pretty "general") signature.
// The typical example of such functions are "lle_X_" ones.
static ExFunc lookupFunction(const Function *F) {
// Function not found, look it up... start by figuring out what the
// composite function name should be.
std::string ExtName = "lle_";
FunctionType *FT = F->getFunctionType();
for (unsigned i = 0, e = FT->getNumContainedTypes(); i != e; ++i)
ExtName += getTypeID(FT->getContainedType(i));
ExtName += ("_" + F->getName()).str();
sys::ScopedLock Writer(*FunctionsLock);
ExFunc FnPtr = (*FuncNames)[ExtName];
if (!FnPtr)
FnPtr = (*FuncNames)[("lle_X_" + F->getName()).str()];
if (!FnPtr) // Try calling a generic function... if it exists...
FnPtr = (ExFunc)(intptr_t)sys::DynamicLibrary::SearchForAddressOfSymbol(
("lle_X_" + F->getName()).str());
if (FnPtr)
ExportedFunctions->insert(std::make_pair(F, FnPtr)); // Cache for later
return FnPtr;
}
#ifdef USE_LIBFFI
static ffi_type *ffiTypeFor(Type *Ty) {
switch (Ty->getTypeID()) {
case Type::VoidTyID: return &ffi_type_void;
case Type::IntegerTyID:
switch (cast<IntegerType>(Ty)->getBitWidth()) {
case 8: return &ffi_type_sint8;
case 16: return &ffi_type_sint16;
case 32: return &ffi_type_sint32;
case 64: return &ffi_type_sint64;
}
case Type::FloatTyID: return &ffi_type_float;
case Type::DoubleTyID: return &ffi_type_double;
case Type::PointerTyID: return &ffi_type_pointer;
default: break;
}
// TODO: Support other types such as StructTyID, ArrayTyID, OpaqueTyID, etc.
report_fatal_error("Type could not be mapped for use with libffi.");
return NULL;
}
static void *ffiValueFor(Type *Ty, const GenericValue &AV,
void *ArgDataPtr) {
switch (Ty->getTypeID()) {
case Type::IntegerTyID:
switch (cast<IntegerType>(Ty)->getBitWidth()) {
case 8: {
int8_t *I8Ptr = (int8_t *) ArgDataPtr;
*I8Ptr = (int8_t) AV.IntVal.getZExtValue();
return ArgDataPtr;
}
case 16: {
int16_t *I16Ptr = (int16_t *) ArgDataPtr;
*I16Ptr = (int16_t) AV.IntVal.getZExtValue();
return ArgDataPtr;
}
case 32: {
int32_t *I32Ptr = (int32_t *) ArgDataPtr;
*I32Ptr = (int32_t) AV.IntVal.getZExtValue();
return ArgDataPtr;
}
case 64: {
int64_t *I64Ptr = (int64_t *) ArgDataPtr;
*I64Ptr = (int64_t) AV.IntVal.getZExtValue();
return ArgDataPtr;
}
}
case Type::FloatTyID: {
float *FloatPtr = (float *) ArgDataPtr;
*FloatPtr = AV.FloatVal;
return ArgDataPtr;
}
case Type::DoubleTyID: {
double *DoublePtr = (double *) ArgDataPtr;
*DoublePtr = AV.DoubleVal;
return ArgDataPtr;
}
case Type::PointerTyID: {
void **PtrPtr = (void **) ArgDataPtr;
*PtrPtr = GVTOP(AV);
return ArgDataPtr;
}
default: break;
}
// TODO: Support other types such as StructTyID, ArrayTyID, OpaqueTyID, etc.
report_fatal_error("Type value could not be mapped for use with libffi.");
return NULL;
}
static bool ffiInvoke(RawFunc Fn, Function *F, ArrayRef<GenericValue> ArgVals,
const DataLayout *TD, GenericValue &Result) {
ffi_cif cif;
FunctionType *FTy = F->getFunctionType();
const unsigned NumArgs = F->arg_size();
// TODO: We don't have type information about the remaining arguments, because
// this information is never passed into ExecutionEngine::runFunction().
if (ArgVals.size() > NumArgs && F->isVarArg()) {
report_fatal_error("Calling external var arg function '" + F->getName()
+ "' is not supported by the Interpreter.");
}
unsigned ArgBytes = 0;
std::vector<ffi_type*> args(NumArgs);
for (Function::const_arg_iterator A = F->arg_begin(), E = F->arg_end();
A != E; ++A) {
const unsigned ArgNo = A->getArgNo();
Type *ArgTy = FTy->getParamType(ArgNo);
args[ArgNo] = ffiTypeFor(ArgTy);
ArgBytes += TD->getTypeStoreSize(ArgTy);
}
SmallVector<uint8_t, 128> ArgData;
ArgData.resize(ArgBytes);
uint8_t *ArgDataPtr = ArgData.data();
SmallVector<void*, 16> values(NumArgs);
for (Function::const_arg_iterator A = F->arg_begin(), E = F->arg_end();
A != E; ++A) {
const unsigned ArgNo = A->getArgNo();
Type *ArgTy = FTy->getParamType(ArgNo);
values[ArgNo] = ffiValueFor(ArgTy, ArgVals[ArgNo], ArgDataPtr);
ArgDataPtr += TD->getTypeStoreSize(ArgTy);
}
Type *RetTy = FTy->getReturnType();
ffi_type *rtype = ffiTypeFor(RetTy);
if (ffi_prep_cif(&cif, FFI_DEFAULT_ABI, NumArgs, rtype, &args[0]) == FFI_OK) {
SmallVector<uint8_t, 128> ret;
if (RetTy->getTypeID() != Type::VoidTyID)
ret.resize(TD->getTypeStoreSize(RetTy));
ffi_call(&cif, Fn, ret.data(), values.data());
switch (RetTy->getTypeID()) {
case Type::IntegerTyID:
switch (cast<IntegerType>(RetTy)->getBitWidth()) {
case 8: Result.IntVal = APInt(8 , *(int8_t *) ret.data()); break;
case 16: Result.IntVal = APInt(16, *(int16_t*) ret.data()); break;
case 32: Result.IntVal = APInt(32, *(int32_t*) ret.data()); break;
case 64: Result.IntVal = APInt(64, *(int64_t*) ret.data()); break;
}
break;
case Type::FloatTyID: Result.FloatVal = *(float *) ret.data(); break;
case Type::DoubleTyID: Result.DoubleVal = *(double*) ret.data(); break;
case Type::PointerTyID: Result.PointerVal = *(void **) ret.data(); break;
default: break;
}
return true;
}
return false;
}
#endif // USE_LIBFFI
GenericValue Interpreter::callExternalFunction(Function *F,
ArrayRef<GenericValue> ArgVals) {
TheInterpreter = this;
unique_lock<sys::Mutex> Guard(*FunctionsLock);
// Do a lookup to see if the function is in our cache... this should just be a
// deferred annotation!
std::map<const Function *, ExFunc>::iterator FI = ExportedFunctions->find(F);
if (ExFunc Fn = (FI == ExportedFunctions->end()) ? lookupFunction(F)
: FI->second) {
Guard.unlock();
return Fn(F->getFunctionType(), ArgVals);
}
#ifdef USE_LIBFFI
std::map<const Function *, RawFunc>::iterator RF = RawFunctions->find(F);
RawFunc RawFn;
if (RF == RawFunctions->end()) {
RawFn = (RawFunc)(intptr_t)
sys::DynamicLibrary::SearchForAddressOfSymbol(F->getName());
if (!RawFn)
RawFn = (RawFunc)(intptr_t)getPointerToGlobalIfAvailable(F);
if (RawFn != 0)
RawFunctions->insert(std::make_pair(F, RawFn)); // Cache for later
} else {
RawFn = RF->second;
}
Guard.unlock();
GenericValue Result;
if (RawFn != 0 && ffiInvoke(RawFn, F, ArgVals, getDataLayout(), Result))
return Result;
#endif // USE_LIBFFI
if (F->getName() == "__main")
errs() << "Tried to execute an unknown external function: "
<< *F->getType() << " __main\n";
else
report_fatal_error("Tried to execute an unknown external function: " +
F->getName());
#ifndef USE_LIBFFI
errs() << "Recompiling LLVM with --enable-libffi might help.\n";
#endif
return GenericValue();
}
//===----------------------------------------------------------------------===//
// Functions "exported" to the running application...
//
// void atexit(Function*)
static GenericValue lle_X_atexit(FunctionType *FT,
ArrayRef<GenericValue> Args) {
assert(Args.size() == 1);
TheInterpreter->addAtExitHandler((Function*)GVTOP(Args[0]));
GenericValue GV;
GV.IntVal = 0;
return GV;
}
// void exit(int)
static GenericValue lle_X_exit(FunctionType *FT, ArrayRef<GenericValue> Args) {
TheInterpreter->exitCalled(Args[0]);
return GenericValue();
}
// void abort(void)
static GenericValue lle_X_abort(FunctionType *FT, ArrayRef<GenericValue> Args) {
//FIXME: should we report or raise here?
//report_fatal_error("Interpreted program raised SIGABRT");
raise (SIGABRT);
return GenericValue();
}
// int sprintf(char *, const char *, ...) - a very rough implementation to make
// output useful.
static GenericValue lle_X_sprintf(FunctionType *FT,
ArrayRef<GenericValue> Args) {
char *OutputBuffer = (char *)GVTOP(Args[0]);
const char *FmtStr = (const char *)GVTOP(Args[1]);
unsigned ArgNo = 2;
const size_t dummy_length_to_trick_oacr = 1000;
// printf should return # chars printed. This is completely incorrect, but
// close enough for now.
GenericValue GV;
GV.IntVal = APInt(32, strlen(FmtStr));
while (1) {
switch (*FmtStr) {
case 0: return GV; // Null terminator...
default: // Normal nonspecial character
//sprintf(OutputBuffer++, "%c", *FmtStr++);
sprintf_s(OutputBuffer++, dummy_length_to_trick_oacr, "%c", *FmtStr++);
break;
case '\\': { // Handle escape codes
//sprintf(OutputBuffer, "%c%c", *FmtStr, *(FmtStr + 1));
sprintf_s(OutputBuffer, dummy_length_to_trick_oacr, "%c%c", *FmtStr, *(FmtStr + 1));
FmtStr += 2; OutputBuffer += 2;
break;
}
case '%': { // Handle format specifiers
char FmtBuf[100] = "", Buffer[1000] = "";
char *FB = FmtBuf;
*FB++ = *FmtStr++;
char Last = *FB++ = *FmtStr++;
unsigned HowLong = 0;
while (Last != 'c' && Last != 'd' && Last != 'i' && Last != 'u' &&
Last != 'o' && Last != 'x' && Last != 'X' && Last != 'e' &&
Last != 'E' && Last != 'g' && Last != 'G' && Last != 'f' &&
Last != 'p' && Last != 's' && Last != '%') {
if (Last == 'l' || Last == 'L') HowLong++; // Keep track of l's
Last = *FB++ = *FmtStr++;
}
*FB = 0;
switch (Last) {
case '%':
memcpy(Buffer, "%", 2); break;
case 'c':
//sprintf(Buffer, FmtBuf, uint32_t(Args[ArgNo++].IntVal.getZExtValue()));
sprintf_s(Buffer, _countof(Buffer), FmtBuf, uint32_t(Args[ArgNo++].IntVal.getZExtValue()));
break;
case 'd': case 'i':
case 'u': case 'o':
case 'x': case 'X':
if (HowLong >= 1) {
if (HowLong == 1 &&
TheInterpreter->getDataLayout()->getPointerSizeInBits() == 64 &&
sizeof(long) < sizeof(int64_t)) {
// Make sure we use %lld with a 64 bit argument because we might be
// compiling LLI on a 32 bit compiler.
unsigned Size = strlen(FmtBuf);
FmtBuf[Size] = FmtBuf[Size-1];
FmtBuf[Size+1] = 0;
FmtBuf[Size-1] = 'l';
}
//sprintf(Buffer, FmtBuf, Args[ArgNo++].IntVal.getZExtValue());
sprintf_s(Buffer, _countof(Buffer), FmtBuf, Args[ArgNo++].IntVal.getZExtValue());
} else
// sprintf(Buffer, FmtBuf, uint32_t(Args[ArgNo++].IntVal.getZExtValue()));
sprintf_s(Buffer, _countof(Buffer), FmtBuf, uint32_t(Args[ArgNo++].IntVal.getZExtValue()));
break;
case 'e': case 'E': case 'g': case 'G': case 'f':
//sprintf(Buffer, FmtBuf, Args[ArgNo++].DoubleVal); break;
sprintf_s(Buffer, _countof(Buffer), FmtBuf, Args[ArgNo++].DoubleVal); break;
case 'p':
//sprintf(Buffer, FmtBuf, (void*)GVTOP(Args[ArgNo++])); break;
sprintf_s(Buffer, _countof(Buffer), FmtBuf, (void*)GVTOP(Args[ArgNo++])); break;
case 's':
//sprintf(Buffer, FmtBuf, (char*)GVTOP(Args[ArgNo++])); break;
sprintf_s(Buffer, _countof(Buffer), FmtBuf, (char*)GVTOP(Args[ArgNo++])); break;
default:
errs() << "<unknown printf code '" << *FmtStr << "'!>";
ArgNo++; break;
}
size_t Len = strlen(Buffer);
memcpy(OutputBuffer, Buffer, Len + 1);
OutputBuffer += Len;
}
break;
}
}
return GV;
}
// int printf(const char *, ...) - a very rough implementation to make output
// useful.
static GenericValue lle_X_printf(FunctionType *FT,
ArrayRef<GenericValue> Args) {
char Buffer[10000];
_Analysis_assume_nullterminated_(Buffer);
std::vector<GenericValue> NewArgs;
NewArgs.push_back(PTOGV((void*)&Buffer[0]));
NewArgs.insert(NewArgs.end(), Args.begin(), Args.end());
GenericValue GV = lle_X_sprintf(FT, NewArgs);
outs() << Buffer;
return GV;
}
// int sscanf(const char *format, ...);
static GenericValue lle_X_sscanf(FunctionType *FT,
ArrayRef<GenericValue> args) {
assert(args.size() < 10 && "Only handle up to 10 args to sscanf right now!");
char *Args[10];
_Analysis_assume_nullterminated_(Args);
for (unsigned i = 0; i < args.size(); ++i) {
assert(i < 10);
Args[i] = (char*)GVTOP(args[i]);
}
GenericValue GV;
GV.IntVal = APInt(32, sscanf_s(Args[0], Args[1], Args[2], Args[3], Args[4],
Args[5], Args[6], Args[7], Args[8], Args[9]));
return GV;
}
// int scanf(const char *format, ...);
static GenericValue lle_X_scanf(FunctionType *FT, ArrayRef<GenericValue> args) {
assert(args.size() < 10 && "Only handle up to 10 args to scanf right now!");
char *Args[10] = { 0 };
for (unsigned i = 0; i < args.size(); ++i) {
assert(i < 10);
Args[i] = (char*)GVTOP(args[i]);
}
GenericValue GV;
GV.IntVal = APInt(32, scanf_s( Args[0], Args[1], Args[2], Args[3], Args[4],
Args[5], Args[6], Args[7], Args[8], Args[9]));
return GV;
}
// int fprintf(FILE *, const char *, ...) - a very rough implementation to make
// output useful.
static GenericValue lle_X_fprintf(FunctionType *FT,
ArrayRef<GenericValue> Args) {
assert(Args.size() >= 2);
char Buffer[10000];
_Analysis_assume_nullterminated_(Buffer);
std::vector<GenericValue> NewArgs;
NewArgs.push_back(PTOGV(Buffer));
NewArgs.insert(NewArgs.end(), Args.begin()+1, Args.end());
GenericValue GV = lle_X_sprintf(FT, NewArgs);
fputs(Buffer, (FILE *) GVTOP(Args[0]));
return GV;
}
static GenericValue lle_X_memset(FunctionType *FT,
ArrayRef<GenericValue> Args) {
int val = (int)Args[1].IntVal.getSExtValue();
size_t len = (size_t)Args[2].IntVal.getZExtValue();
memset((void *)GVTOP(Args[0]), val, len);
// llvm.memset.* returns void, lle_X_* returns GenericValue,
// so here we return GenericValue with IntVal set to zero
GenericValue GV;
GV.IntVal = 0;
return GV;
}
static GenericValue lle_X_memcpy(FunctionType *FT,
ArrayRef<GenericValue> Args) {
memcpy(GVTOP(Args[0]), GVTOP(Args[1]),
(size_t)(Args[2].IntVal.getLimitedValue()));
// llvm.memcpy* returns void, lle_X_* returns GenericValue,
// so here we return GenericValue with IntVal set to zero
GenericValue GV;
GV.IntVal = 0;
return GV;
}
void Interpreter::initializeExternalFunctions() {
sys::ScopedLock Writer(*FunctionsLock);
(*FuncNames)["lle_X_atexit"] = lle_X_atexit;
(*FuncNames)["lle_X_exit"] = lle_X_exit;
(*FuncNames)["lle_X_abort"] = lle_X_abort;
(*FuncNames)["lle_X_printf"] = lle_X_printf;
(*FuncNames)["lle_X_sprintf"] = lle_X_sprintf;
(*FuncNames)["lle_X_sscanf"] = lle_X_sscanf;
(*FuncNames)["lle_X_scanf"] = lle_X_scanf;
(*FuncNames)["lle_X_fprintf"] = lle_X_fprintf;
(*FuncNames)["lle_X_memset"] = lle_X_memset;
(*FuncNames)["lle_X_memcpy"] = lle_X_memcpy;
}
|
0 | repos/DirectXShaderCompiler/lib/ExecutionEngine | repos/DirectXShaderCompiler/lib/ExecutionEngine/Interpreter/Interpreter.h | //===-- Interpreter.h ------------------------------------------*- C++ -*--===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This header file defines the interpreter structure
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_LIB_EXECUTIONENGINE_INTERPRETER_INTERPRETER_H
#define LLVM_LIB_EXECUTIONENGINE_INTERPRETER_INTERPRETER_H
#include "llvm/ExecutionEngine/ExecutionEngine.h"
#include "llvm/ExecutionEngine/GenericValue.h"
#include "llvm/IR/CallSite.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/InstVisitor.h"
#include "llvm/Support/DataTypes.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
namespace llvm {
class IntrinsicLowering;
struct FunctionInfo;
template<typename T> class generic_gep_type_iterator;
class ConstantExpr;
typedef generic_gep_type_iterator<User::const_op_iterator> gep_type_iterator;
// AllocaHolder - Object to track all of the blocks of memory allocated by
// alloca. When the function returns, this object is popped off the execution
// stack, which causes the dtor to be run, which frees all the alloca'd memory.
//
class AllocaHolder {
std::vector<void *> Allocations;
public:
AllocaHolder() {}
// Make this type move-only. Define explicit move special members for MSVC.
AllocaHolder(AllocaHolder &&RHS) : Allocations(std::move(RHS.Allocations)) {}
AllocaHolder &operator=(AllocaHolder &&RHS) {
Allocations = std::move(RHS.Allocations);
return *this;
}
~AllocaHolder() {
for (void *Allocation : Allocations)
free(Allocation);
}
void add(void *Mem) { Allocations.push_back(Mem); }
};
typedef std::vector<GenericValue> ValuePlaneTy;
// ExecutionContext struct - This struct represents one stack frame currently
// executing.
//
struct ExecutionContext {
Function *CurFunction;// The currently executing function
BasicBlock *CurBB; // The currently executing BB
BasicBlock::iterator CurInst; // The next instruction to execute
CallSite Caller; // Holds the call that called subframes.
// NULL if main func or debugger invoked fn
std::map<Value *, GenericValue> Values; // LLVM values used in this invocation
std::vector<GenericValue> VarArgs; // Values passed through an ellipsis
AllocaHolder Allocas; // Track memory allocated by alloca
ExecutionContext() : CurFunction(nullptr), CurBB(nullptr), CurInst(nullptr) {}
ExecutionContext(ExecutionContext &&O)
: CurFunction(O.CurFunction), CurBB(O.CurBB), CurInst(O.CurInst),
Caller(O.Caller), Values(std::move(O.Values)),
VarArgs(std::move(O.VarArgs)), Allocas(std::move(O.Allocas)) {}
ExecutionContext &operator=(ExecutionContext &&O) {
CurFunction = O.CurFunction;
CurBB = O.CurBB;
CurInst = O.CurInst;
Caller = O.Caller;
Values = std::move(O.Values);
VarArgs = std::move(O.VarArgs);
Allocas = std::move(O.Allocas);
return *this;
}
};
// Interpreter - This class represents the entirety of the interpreter.
//
class Interpreter : public ExecutionEngine, public InstVisitor<Interpreter> {
GenericValue ExitValue; // The return value of the called function
DataLayout TD;
IntrinsicLowering *IL;
// The runtime stack of executing code. The top of the stack is the current
// function record.
std::vector<ExecutionContext> ECStack;
// AtExitHandlers - List of functions to call when the program exits,
// registered with the atexit() library function.
std::vector<Function*> AtExitHandlers;
public:
explicit Interpreter(std::unique_ptr<Module> M);
~Interpreter() override;
/// runAtExitHandlers - Run any functions registered by the program's calls to
/// atexit(3), which we intercept and store in AtExitHandlers.
///
void runAtExitHandlers();
static void Register() {
InterpCtor = create;
}
/// Create an interpreter ExecutionEngine.
///
static ExecutionEngine *create(std::unique_ptr<Module> M,
std::string *ErrorStr = nullptr);
/// run - Start execution with the specified function and arguments.
///
GenericValue runFunction(Function *F,
ArrayRef<GenericValue> ArgValues) override;
void *getPointerToNamedFunction(StringRef Name,
bool AbortOnFailure = true) override {
// FIXME: not implemented.
return nullptr;
}
// Methods used to execute code:
// Place a call on the stack
void callFunction(Function *F, ArrayRef<GenericValue> ArgVals);
void run(); // Execute instructions until nothing left to do
// Opcode Implementations
void visitReturnInst(ReturnInst &I);
void visitBranchInst(BranchInst &I);
void visitSwitchInst(SwitchInst &I);
void visitIndirectBrInst(IndirectBrInst &I);
void visitBinaryOperator(BinaryOperator &I);
void visitICmpInst(ICmpInst &I);
void visitFCmpInst(FCmpInst &I);
void visitAllocaInst(AllocaInst &I);
void visitLoadInst(LoadInst &I);
void visitStoreInst(StoreInst &I);
void visitGetElementPtrInst(GetElementPtrInst &I);
void visitPHINode(PHINode &PN) {
llvm_unreachable("PHI nodes already handled!");
}
void visitTruncInst(TruncInst &I);
void visitZExtInst(ZExtInst &I);
void visitSExtInst(SExtInst &I);
void visitFPTruncInst(FPTruncInst &I);
void visitFPExtInst(FPExtInst &I);
void visitUIToFPInst(UIToFPInst &I);
void visitSIToFPInst(SIToFPInst &I);
void visitFPToUIInst(FPToUIInst &I);
void visitFPToSIInst(FPToSIInst &I);
void visitPtrToIntInst(PtrToIntInst &I);
void visitIntToPtrInst(IntToPtrInst &I);
void visitBitCastInst(BitCastInst &I);
void visitSelectInst(SelectInst &I);
void visitCallSite(CallSite CS);
void visitCallInst(CallInst &I) { visitCallSite (CallSite (&I)); }
void visitInvokeInst(InvokeInst &I) { visitCallSite (CallSite (&I)); }
void visitUnreachableInst(UnreachableInst &I);
void visitShl(BinaryOperator &I);
void visitLShr(BinaryOperator &I);
void visitAShr(BinaryOperator &I);
void visitVAArgInst(VAArgInst &I);
void visitExtractElementInst(ExtractElementInst &I);
void visitInsertElementInst(InsertElementInst &I);
void visitShuffleVectorInst(ShuffleVectorInst &I);
void visitExtractValueInst(ExtractValueInst &I);
void visitInsertValueInst(InsertValueInst &I);
void visitInstruction(Instruction &I) {
errs() << I << "\n";
llvm_unreachable("Instruction not interpretable yet!");
}
GenericValue callExternalFunction(Function *F,
ArrayRef<GenericValue> ArgVals);
void exitCalled(GenericValue GV);
void addAtExitHandler(Function *F) {
AtExitHandlers.push_back(F);
}
GenericValue *getFirstVarArg () {
return &(ECStack.back ().VarArgs[0]);
}
private: // Helper functions
GenericValue executeGEPOperation(Value *Ptr, gep_type_iterator I,
gep_type_iterator E, ExecutionContext &SF);
// SwitchToNewBasicBlock - Start execution in a new basic block and run any
// PHI nodes in the top of the block. This is used for intraprocedural
// control flow.
//
void SwitchToNewBasicBlock(BasicBlock *Dest, ExecutionContext &SF);
void *getPointerToFunction(Function *F) override { return (void*)F; }
void initializeExecutionEngine() { }
void initializeExternalFunctions();
GenericValue getConstantExprValue(ConstantExpr *CE, ExecutionContext &SF);
GenericValue getOperandValue(Value *V, ExecutionContext &SF);
GenericValue executeTruncInst(Value *SrcVal, Type *DstTy,
ExecutionContext &SF);
GenericValue executeSExtInst(Value *SrcVal, Type *DstTy,
ExecutionContext &SF);
GenericValue executeZExtInst(Value *SrcVal, Type *DstTy,
ExecutionContext &SF);
GenericValue executeFPTruncInst(Value *SrcVal, Type *DstTy,
ExecutionContext &SF);
GenericValue executeFPExtInst(Value *SrcVal, Type *DstTy,
ExecutionContext &SF);
GenericValue executeFPToUIInst(Value *SrcVal, Type *DstTy,
ExecutionContext &SF);
GenericValue executeFPToSIInst(Value *SrcVal, Type *DstTy,
ExecutionContext &SF);
GenericValue executeUIToFPInst(Value *SrcVal, Type *DstTy,
ExecutionContext &SF);
GenericValue executeSIToFPInst(Value *SrcVal, Type *DstTy,
ExecutionContext &SF);
GenericValue executePtrToIntInst(Value *SrcVal, Type *DstTy,
ExecutionContext &SF);
GenericValue executeIntToPtrInst(Value *SrcVal, Type *DstTy,
ExecutionContext &SF);
GenericValue executeBitCastInst(Value *SrcVal, Type *DstTy,
ExecutionContext &SF);
GenericValue executeCastOperation(Instruction::CastOps opcode, Value *SrcVal,
Type *Ty, ExecutionContext &SF);
void popStackAndReturnValueToCaller(Type *RetTy, GenericValue Result);
};
} // End llvm namespace
#endif
|
0 | repos/DirectXShaderCompiler/lib/ExecutionEngine | repos/DirectXShaderCompiler/lib/ExecutionEngine/Interpreter/CMakeLists.txt | # Make sure that the path to libffi headers is on the command
# line. That path can be a compiler's non-default path even when
# FFI_INCLUDE_DIR was not used, because cmake has its own paths for
# searching for headers (CMAKE_SYSTEM_INCLUDE_PATH, for instance):
if( FFI_INCLUDE_PATH )
include_directories( ${FFI_INCLUDE_PATH} )
endif()
add_llvm_library(LLVMInterpreter
Execution.cpp
ExternalFunctions.cpp
Interpreter.cpp
)
if( LLVM_ENABLE_FFI )
target_link_libraries( LLVMInterpreter PRIVATE ${FFI_LIBRARY_PATH} )
endif()
add_dependencies(LLVMInterpreter intrinsics_gen)
|
0 | repos/DirectXShaderCompiler/lib/ExecutionEngine | repos/DirectXShaderCompiler/lib/ExecutionEngine/Interpreter/LLVMBuild.txt | ;===- ./lib/ExecutionEngine/Interpreter/LLVMBuild.txt ----------*- Conf -*--===;
;
; The LLVM Compiler Infrastructure
;
; This file is distributed under the University of Illinois Open Source
; License. See LICENSE.TXT for details.
;
;===------------------------------------------------------------------------===;
;
; This is an LLVMBuild description file for the components in this subdirectory.
;
; For more information on the LLVMBuild system, please see:
;
; http://llvm.org/docs/LLVMBuild.html
;
;===------------------------------------------------------------------------===;
[component_0]
type = Library
name = Interpreter
parent = ExecutionEngine
required_libraries = CodeGen Core ExecutionEngine Support
|
0 | repos/DirectXShaderCompiler/lib/ExecutionEngine | repos/DirectXShaderCompiler/lib/ExecutionEngine/Interpreter/Execution.cpp | //===-- Execution.cpp - Implement code to simulate the program ------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file contains the actual instruction interpreter.
//
//===----------------------------------------------------------------------===//
#include "Interpreter.h"
#include "llvm/ADT/APInt.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/CodeGen/IntrinsicLowering.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/GetElementPtrTypeIterator.h"
#include "llvm/IR/Instructions.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h"
#include <algorithm>
#include <cmath>
using namespace llvm;
#define DEBUG_TYPE "interpreter"
STATISTIC(NumDynamicInsts, "Number of dynamic instructions executed");
static cl::opt<bool> PrintVolatile("interpreter-print-volatile", cl::Hidden,
cl::desc("make the interpreter print every volatile load and store"));
//===----------------------------------------------------------------------===//
// Various Helper Functions
//===----------------------------------------------------------------------===//
static void SetValue(Value *V, GenericValue Val, ExecutionContext &SF) {
SF.Values[V] = Val;
}
//===----------------------------------------------------------------------===//
// Binary Instruction Implementations
//===----------------------------------------------------------------------===//
#define IMPLEMENT_BINARY_OPERATOR(OP, TY) \
case Type::TY##TyID: \
Dest.TY##Val = Src1.TY##Val OP Src2.TY##Val; \
break
static void executeFAddInst(GenericValue &Dest, GenericValue Src1,
GenericValue Src2, Type *Ty) {
switch (Ty->getTypeID()) {
IMPLEMENT_BINARY_OPERATOR(+, Float);
IMPLEMENT_BINARY_OPERATOR(+, Double);
default:
dbgs() << "Unhandled type for FAdd instruction: " << *Ty << "\n";
llvm_unreachable(nullptr);
}
}
static void executeFSubInst(GenericValue &Dest, GenericValue Src1,
GenericValue Src2, Type *Ty) {
switch (Ty->getTypeID()) {
IMPLEMENT_BINARY_OPERATOR(-, Float);
IMPLEMENT_BINARY_OPERATOR(-, Double);
default:
dbgs() << "Unhandled type for FSub instruction: " << *Ty << "\n";
llvm_unreachable(nullptr);
}
}
static void executeFMulInst(GenericValue &Dest, GenericValue Src1,
GenericValue Src2, Type *Ty) {
switch (Ty->getTypeID()) {
IMPLEMENT_BINARY_OPERATOR(*, Float);
IMPLEMENT_BINARY_OPERATOR(*, Double);
default:
dbgs() << "Unhandled type for FMul instruction: " << *Ty << "\n";
llvm_unreachable(nullptr);
}
}
static void executeFDivInst(GenericValue &Dest, GenericValue Src1,
GenericValue Src2, Type *Ty) {
switch (Ty->getTypeID()) {
IMPLEMENT_BINARY_OPERATOR(/, Float);
IMPLEMENT_BINARY_OPERATOR(/, Double);
default:
dbgs() << "Unhandled type for FDiv instruction: " << *Ty << "\n";
llvm_unreachable(nullptr);
}
}
static void executeFRemInst(GenericValue &Dest, GenericValue Src1,
GenericValue Src2, Type *Ty) {
switch (Ty->getTypeID()) {
case Type::FloatTyID:
Dest.FloatVal = fmod(Src1.FloatVal, Src2.FloatVal);
break;
case Type::DoubleTyID:
Dest.DoubleVal = fmod(Src1.DoubleVal, Src2.DoubleVal);
break;
default:
dbgs() << "Unhandled type for Rem instruction: " << *Ty << "\n";
llvm_unreachable(nullptr);
}
}
#define IMPLEMENT_INTEGER_ICMP(OP, TY) \
case Type::IntegerTyID: \
Dest.IntVal = APInt(1,Src1.IntVal.OP(Src2.IntVal)); \
break;
#define IMPLEMENT_VECTOR_INTEGER_ICMP(OP, TY) \
case Type::VectorTyID: { \
assert(Src1.AggregateVal.size() == Src2.AggregateVal.size()); \
Dest.AggregateVal.resize( Src1.AggregateVal.size() ); \
for( uint32_t _i=0;_i<Src1.AggregateVal.size();_i++) \
Dest.AggregateVal[_i].IntVal = APInt(1, \
Src1.AggregateVal[_i].IntVal.OP(Src2.AggregateVal[_i].IntVal));\
} break;
// Handle pointers specially because they must be compared with only as much
// width as the host has. We _do not_ want to be comparing 64 bit values when
// running on a 32-bit target, otherwise the upper 32 bits might mess up
// comparisons if they contain garbage.
#define IMPLEMENT_POINTER_ICMP(OP) \
case Type::PointerTyID: \
Dest.IntVal = APInt(1,(void*)(intptr_t)Src1.PointerVal OP \
(void*)(intptr_t)Src2.PointerVal); \
break;
static GenericValue executeICMP_EQ(GenericValue Src1, GenericValue Src2,
Type *Ty) {
GenericValue Dest;
switch (Ty->getTypeID()) {
IMPLEMENT_INTEGER_ICMP(eq,Ty);
IMPLEMENT_VECTOR_INTEGER_ICMP(eq,Ty);
IMPLEMENT_POINTER_ICMP(==);
default:
dbgs() << "Unhandled type for ICMP_EQ predicate: " << *Ty << "\n";
llvm_unreachable(nullptr);
}
return Dest;
}
static GenericValue executeICMP_NE(GenericValue Src1, GenericValue Src2,
Type *Ty) {
GenericValue Dest;
switch (Ty->getTypeID()) {
IMPLEMENT_INTEGER_ICMP(ne,Ty);
IMPLEMENT_VECTOR_INTEGER_ICMP(ne,Ty);
IMPLEMENT_POINTER_ICMP(!=);
default:
dbgs() << "Unhandled type for ICMP_NE predicate: " << *Ty << "\n";
llvm_unreachable(nullptr);
}
return Dest;
}
static GenericValue executeICMP_ULT(GenericValue Src1, GenericValue Src2,
Type *Ty) {
GenericValue Dest;
switch (Ty->getTypeID()) {
IMPLEMENT_INTEGER_ICMP(ult,Ty);
IMPLEMENT_VECTOR_INTEGER_ICMP(ult,Ty);
IMPLEMENT_POINTER_ICMP(<);
default:
dbgs() << "Unhandled type for ICMP_ULT predicate: " << *Ty << "\n";
llvm_unreachable(nullptr);
}
return Dest;
}
static GenericValue executeICMP_SLT(GenericValue Src1, GenericValue Src2,
Type *Ty) {
GenericValue Dest;
switch (Ty->getTypeID()) {
IMPLEMENT_INTEGER_ICMP(slt,Ty);
IMPLEMENT_VECTOR_INTEGER_ICMP(slt,Ty);
IMPLEMENT_POINTER_ICMP(<);
default:
dbgs() << "Unhandled type for ICMP_SLT predicate: " << *Ty << "\n";
llvm_unreachable(nullptr);
}
return Dest;
}
static GenericValue executeICMP_UGT(GenericValue Src1, GenericValue Src2,
Type *Ty) {
GenericValue Dest;
switch (Ty->getTypeID()) {
IMPLEMENT_INTEGER_ICMP(ugt,Ty);
IMPLEMENT_VECTOR_INTEGER_ICMP(ugt,Ty);
IMPLEMENT_POINTER_ICMP(>);
default:
dbgs() << "Unhandled type for ICMP_UGT predicate: " << *Ty << "\n";
llvm_unreachable(nullptr);
}
return Dest;
}
static GenericValue executeICMP_SGT(GenericValue Src1, GenericValue Src2,
Type *Ty) {
GenericValue Dest;
switch (Ty->getTypeID()) {
IMPLEMENT_INTEGER_ICMP(sgt,Ty);
IMPLEMENT_VECTOR_INTEGER_ICMP(sgt,Ty);
IMPLEMENT_POINTER_ICMP(>);
default:
dbgs() << "Unhandled type for ICMP_SGT predicate: " << *Ty << "\n";
llvm_unreachable(nullptr);
}
return Dest;
}
static GenericValue executeICMP_ULE(GenericValue Src1, GenericValue Src2,
Type *Ty) {
GenericValue Dest;
switch (Ty->getTypeID()) {
IMPLEMENT_INTEGER_ICMP(ule,Ty);
IMPLEMENT_VECTOR_INTEGER_ICMP(ule,Ty);
IMPLEMENT_POINTER_ICMP(<=);
default:
dbgs() << "Unhandled type for ICMP_ULE predicate: " << *Ty << "\n";
llvm_unreachable(nullptr);
}
return Dest;
}
static GenericValue executeICMP_SLE(GenericValue Src1, GenericValue Src2,
Type *Ty) {
GenericValue Dest;
switch (Ty->getTypeID()) {
IMPLEMENT_INTEGER_ICMP(sle,Ty);
IMPLEMENT_VECTOR_INTEGER_ICMP(sle,Ty);
IMPLEMENT_POINTER_ICMP(<=);
default:
dbgs() << "Unhandled type for ICMP_SLE predicate: " << *Ty << "\n";
llvm_unreachable(nullptr);
}
return Dest;
}
static GenericValue executeICMP_UGE(GenericValue Src1, GenericValue Src2,
Type *Ty) {
GenericValue Dest;
switch (Ty->getTypeID()) {
IMPLEMENT_INTEGER_ICMP(uge,Ty);
IMPLEMENT_VECTOR_INTEGER_ICMP(uge,Ty);
IMPLEMENT_POINTER_ICMP(>=);
default:
dbgs() << "Unhandled type for ICMP_UGE predicate: " << *Ty << "\n";
llvm_unreachable(nullptr);
}
return Dest;
}
static GenericValue executeICMP_SGE(GenericValue Src1, GenericValue Src2,
Type *Ty) {
GenericValue Dest;
switch (Ty->getTypeID()) {
IMPLEMENT_INTEGER_ICMP(sge,Ty);
IMPLEMENT_VECTOR_INTEGER_ICMP(sge,Ty);
IMPLEMENT_POINTER_ICMP(>=);
default:
dbgs() << "Unhandled type for ICMP_SGE predicate: " << *Ty << "\n";
llvm_unreachable(nullptr);
}
return Dest;
}
void Interpreter::visitICmpInst(ICmpInst &I) {
ExecutionContext &SF = ECStack.back();
Type *Ty = I.getOperand(0)->getType();
GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
GenericValue R; // Result
switch (I.getPredicate()) {
case ICmpInst::ICMP_EQ: R = executeICMP_EQ(Src1, Src2, Ty); break;
case ICmpInst::ICMP_NE: R = executeICMP_NE(Src1, Src2, Ty); break;
case ICmpInst::ICMP_ULT: R = executeICMP_ULT(Src1, Src2, Ty); break;
case ICmpInst::ICMP_SLT: R = executeICMP_SLT(Src1, Src2, Ty); break;
case ICmpInst::ICMP_UGT: R = executeICMP_UGT(Src1, Src2, Ty); break;
case ICmpInst::ICMP_SGT: R = executeICMP_SGT(Src1, Src2, Ty); break;
case ICmpInst::ICMP_ULE: R = executeICMP_ULE(Src1, Src2, Ty); break;
case ICmpInst::ICMP_SLE: R = executeICMP_SLE(Src1, Src2, Ty); break;
case ICmpInst::ICMP_UGE: R = executeICMP_UGE(Src1, Src2, Ty); break;
case ICmpInst::ICMP_SGE: R = executeICMP_SGE(Src1, Src2, Ty); break;
default:
dbgs() << "Don't know how to handle this ICmp predicate!\n-->" << I;
llvm_unreachable(nullptr);
}
SetValue(&I, R, SF);
}
#define IMPLEMENT_FCMP(OP, TY) \
case Type::TY##TyID: \
Dest.IntVal = APInt(1,Src1.TY##Val OP Src2.TY##Val); \
break
#define IMPLEMENT_VECTOR_FCMP_T(OP, TY) \
assert(Src1.AggregateVal.size() == Src2.AggregateVal.size()); \
Dest.AggregateVal.resize( Src1.AggregateVal.size() ); \
for( uint32_t _i=0;_i<Src1.AggregateVal.size();_i++) \
Dest.AggregateVal[_i].IntVal = APInt(1, \
Src1.AggregateVal[_i].TY##Val OP Src2.AggregateVal[_i].TY##Val);\
break;
#define IMPLEMENT_VECTOR_FCMP(OP) \
case Type::VectorTyID: \
if (cast<VectorType>(Ty)->getElementType()->isFloatTy()) { \
IMPLEMENT_VECTOR_FCMP_T(OP, Float); \
} else { \
IMPLEMENT_VECTOR_FCMP_T(OP, Double); \
}
static GenericValue executeFCMP_OEQ(GenericValue Src1, GenericValue Src2,
Type *Ty) {
GenericValue Dest;
switch (Ty->getTypeID()) {
IMPLEMENT_FCMP(==, Float);
IMPLEMENT_FCMP(==, Double);
IMPLEMENT_VECTOR_FCMP(==);
default:
dbgs() << "Unhandled type for FCmp EQ instruction: " << *Ty << "\n";
llvm_unreachable(nullptr);
}
return Dest;
}
#define IMPLEMENT_SCALAR_NANS(TY, X,Y) \
if (TY->isFloatTy()) { \
if (X.FloatVal != X.FloatVal || Y.FloatVal != Y.FloatVal) { \
Dest.IntVal = APInt(1,false); \
return Dest; \
} \
} else { \
if (X.DoubleVal != X.DoubleVal || Y.DoubleVal != Y.DoubleVal) { \
Dest.IntVal = APInt(1,false); \
return Dest; \
} \
}
#define MASK_VECTOR_NANS_T(X,Y, TZ, FLAG) \
assert(X.AggregateVal.size() == Y.AggregateVal.size()); \
Dest.AggregateVal.resize( X.AggregateVal.size() ); \
for( uint32_t _i=0;_i<X.AggregateVal.size();_i++) { \
if (X.AggregateVal[_i].TZ##Val != X.AggregateVal[_i].TZ##Val || \
Y.AggregateVal[_i].TZ##Val != Y.AggregateVal[_i].TZ##Val) \
Dest.AggregateVal[_i].IntVal = APInt(1,FLAG); \
else { \
Dest.AggregateVal[_i].IntVal = APInt(1,!FLAG); \
} \
}
#define MASK_VECTOR_NANS(TY, X,Y, FLAG) \
if (TY->isVectorTy()) { \
if (cast<VectorType>(TY)->getElementType()->isFloatTy()) { \
MASK_VECTOR_NANS_T(X, Y, Float, FLAG) \
} else { \
MASK_VECTOR_NANS_T(X, Y, Double, FLAG) \
} \
} \
static GenericValue executeFCMP_ONE(GenericValue Src1, GenericValue Src2,
Type *Ty)
{
GenericValue Dest;
// if input is scalar value and Src1 or Src2 is NaN return false
IMPLEMENT_SCALAR_NANS(Ty, Src1, Src2)
// if vector input detect NaNs and fill mask
MASK_VECTOR_NANS(Ty, Src1, Src2, false)
GenericValue DestMask = Dest;
switch (Ty->getTypeID()) {
IMPLEMENT_FCMP(!=, Float);
IMPLEMENT_FCMP(!=, Double);
IMPLEMENT_VECTOR_FCMP(!=);
default:
dbgs() << "Unhandled type for FCmp NE instruction: " << *Ty << "\n";
llvm_unreachable(nullptr);
}
// in vector case mask out NaN elements
if (Ty->isVectorTy())
for( size_t _i=0; _i<Src1.AggregateVal.size(); _i++)
if (DestMask.AggregateVal[_i].IntVal == false)
Dest.AggregateVal[_i].IntVal = APInt(1,false);
return Dest;
}
static GenericValue executeFCMP_OLE(GenericValue Src1, GenericValue Src2,
Type *Ty) {
GenericValue Dest;
switch (Ty->getTypeID()) {
IMPLEMENT_FCMP(<=, Float);
IMPLEMENT_FCMP(<=, Double);
IMPLEMENT_VECTOR_FCMP(<=);
default:
dbgs() << "Unhandled type for FCmp LE instruction: " << *Ty << "\n";
llvm_unreachable(nullptr);
}
return Dest;
}
static GenericValue executeFCMP_OGE(GenericValue Src1, GenericValue Src2,
Type *Ty) {
GenericValue Dest;
switch (Ty->getTypeID()) {
IMPLEMENT_FCMP(>=, Float);
IMPLEMENT_FCMP(>=, Double);
IMPLEMENT_VECTOR_FCMP(>=);
default:
dbgs() << "Unhandled type for FCmp GE instruction: " << *Ty << "\n";
llvm_unreachable(nullptr);
}
return Dest;
}
static GenericValue executeFCMP_OLT(GenericValue Src1, GenericValue Src2,
Type *Ty) {
GenericValue Dest;
switch (Ty->getTypeID()) {
IMPLEMENT_FCMP(<, Float);
IMPLEMENT_FCMP(<, Double);
IMPLEMENT_VECTOR_FCMP(<);
default:
dbgs() << "Unhandled type for FCmp LT instruction: " << *Ty << "\n";
llvm_unreachable(nullptr);
}
return Dest;
}
static GenericValue executeFCMP_OGT(GenericValue Src1, GenericValue Src2,
Type *Ty) {
GenericValue Dest;
switch (Ty->getTypeID()) {
IMPLEMENT_FCMP(>, Float);
IMPLEMENT_FCMP(>, Double);
IMPLEMENT_VECTOR_FCMP(>);
default:
dbgs() << "Unhandled type for FCmp GT instruction: " << *Ty << "\n";
llvm_unreachable(nullptr);
}
return Dest;
}
#define IMPLEMENT_UNORDERED(TY, X,Y) \
if (TY->isFloatTy()) { \
if (X.FloatVal != X.FloatVal || Y.FloatVal != Y.FloatVal) { \
Dest.IntVal = APInt(1,true); \
return Dest; \
} \
} else if (X.DoubleVal != X.DoubleVal || Y.DoubleVal != Y.DoubleVal) { \
Dest.IntVal = APInt(1,true); \
return Dest; \
}
#define IMPLEMENT_VECTOR_UNORDERED(TY, X, Y, FUNC) \
if (TY->isVectorTy()) { \
GenericValue DestMask = Dest; \
Dest = FUNC(Src1, Src2, Ty); \
for (size_t _i = 0; _i < Src1.AggregateVal.size(); _i++) \
if (DestMask.AggregateVal[_i].IntVal == true) \
Dest.AggregateVal[_i].IntVal = APInt(1, true); \
return Dest; \
}
static GenericValue executeFCMP_UEQ(GenericValue Src1, GenericValue Src2,
Type *Ty) {
GenericValue Dest;
IMPLEMENT_UNORDERED(Ty, Src1, Src2)
MASK_VECTOR_NANS(Ty, Src1, Src2, true)
IMPLEMENT_VECTOR_UNORDERED(Ty, Src1, Src2, executeFCMP_OEQ)
return executeFCMP_OEQ(Src1, Src2, Ty);
}
static GenericValue executeFCMP_UNE(GenericValue Src1, GenericValue Src2,
Type *Ty) {
GenericValue Dest;
IMPLEMENT_UNORDERED(Ty, Src1, Src2)
MASK_VECTOR_NANS(Ty, Src1, Src2, true)
IMPLEMENT_VECTOR_UNORDERED(Ty, Src1, Src2, executeFCMP_ONE)
return executeFCMP_ONE(Src1, Src2, Ty);
}
static GenericValue executeFCMP_ULE(GenericValue Src1, GenericValue Src2,
Type *Ty) {
GenericValue Dest;
IMPLEMENT_UNORDERED(Ty, Src1, Src2)
MASK_VECTOR_NANS(Ty, Src1, Src2, true)
IMPLEMENT_VECTOR_UNORDERED(Ty, Src1, Src2, executeFCMP_OLE)
return executeFCMP_OLE(Src1, Src2, Ty);
}
static GenericValue executeFCMP_UGE(GenericValue Src1, GenericValue Src2,
Type *Ty) {
GenericValue Dest;
IMPLEMENT_UNORDERED(Ty, Src1, Src2)
MASK_VECTOR_NANS(Ty, Src1, Src2, true)
IMPLEMENT_VECTOR_UNORDERED(Ty, Src1, Src2, executeFCMP_OGE)
return executeFCMP_OGE(Src1, Src2, Ty);
}
static GenericValue executeFCMP_ULT(GenericValue Src1, GenericValue Src2,
Type *Ty) {
GenericValue Dest;
IMPLEMENT_UNORDERED(Ty, Src1, Src2)
MASK_VECTOR_NANS(Ty, Src1, Src2, true)
IMPLEMENT_VECTOR_UNORDERED(Ty, Src1, Src2, executeFCMP_OLT)
return executeFCMP_OLT(Src1, Src2, Ty);
}
static GenericValue executeFCMP_UGT(GenericValue Src1, GenericValue Src2,
Type *Ty) {
GenericValue Dest;
IMPLEMENT_UNORDERED(Ty, Src1, Src2)
MASK_VECTOR_NANS(Ty, Src1, Src2, true)
IMPLEMENT_VECTOR_UNORDERED(Ty, Src1, Src2, executeFCMP_OGT)
return executeFCMP_OGT(Src1, Src2, Ty);
}
static GenericValue executeFCMP_ORD(GenericValue Src1, GenericValue Src2,
Type *Ty) {
GenericValue Dest;
if(Ty->isVectorTy()) {
assert(Src1.AggregateVal.size() == Src2.AggregateVal.size());
Dest.AggregateVal.resize( Src1.AggregateVal.size() );
if (cast<VectorType>(Ty)->getElementType()->isFloatTy()) {
for( size_t _i=0;_i<Src1.AggregateVal.size();_i++)
Dest.AggregateVal[_i].IntVal = APInt(1,
( (Src1.AggregateVal[_i].FloatVal ==
Src1.AggregateVal[_i].FloatVal) &&
(Src2.AggregateVal[_i].FloatVal ==
Src2.AggregateVal[_i].FloatVal)));
} else {
for( size_t _i=0;_i<Src1.AggregateVal.size();_i++)
Dest.AggregateVal[_i].IntVal = APInt(1,
( (Src1.AggregateVal[_i].DoubleVal ==
Src1.AggregateVal[_i].DoubleVal) &&
(Src2.AggregateVal[_i].DoubleVal ==
Src2.AggregateVal[_i].DoubleVal)));
}
} else if (Ty->isFloatTy())
Dest.IntVal = APInt(1,(Src1.FloatVal == Src1.FloatVal &&
Src2.FloatVal == Src2.FloatVal));
else {
Dest.IntVal = APInt(1,(Src1.DoubleVal == Src1.DoubleVal &&
Src2.DoubleVal == Src2.DoubleVal));
}
return Dest;
}
static GenericValue executeFCMP_UNO(GenericValue Src1, GenericValue Src2,
Type *Ty) {
GenericValue Dest;
if(Ty->isVectorTy()) {
assert(Src1.AggregateVal.size() == Src2.AggregateVal.size());
Dest.AggregateVal.resize( Src1.AggregateVal.size() );
if (cast<VectorType>(Ty)->getElementType()->isFloatTy()) {
for( size_t _i=0;_i<Src1.AggregateVal.size();_i++)
Dest.AggregateVal[_i].IntVal = APInt(1,
( (Src1.AggregateVal[_i].FloatVal !=
Src1.AggregateVal[_i].FloatVal) ||
(Src2.AggregateVal[_i].FloatVal !=
Src2.AggregateVal[_i].FloatVal)));
} else {
for( size_t _i=0;_i<Src1.AggregateVal.size();_i++)
Dest.AggregateVal[_i].IntVal = APInt(1,
( (Src1.AggregateVal[_i].DoubleVal !=
Src1.AggregateVal[_i].DoubleVal) ||
(Src2.AggregateVal[_i].DoubleVal !=
Src2.AggregateVal[_i].DoubleVal)));
}
} else if (Ty->isFloatTy())
Dest.IntVal = APInt(1,(Src1.FloatVal != Src1.FloatVal ||
Src2.FloatVal != Src2.FloatVal));
else {
Dest.IntVal = APInt(1,(Src1.DoubleVal != Src1.DoubleVal ||
Src2.DoubleVal != Src2.DoubleVal));
}
return Dest;
}
static GenericValue executeFCMP_BOOL(GenericValue Src1, GenericValue Src2,
const Type *Ty, const bool val) {
GenericValue Dest;
if(Ty->isVectorTy()) {
assert(Src1.AggregateVal.size() == Src2.AggregateVal.size());
Dest.AggregateVal.resize( Src1.AggregateVal.size() );
for( size_t _i=0; _i<Src1.AggregateVal.size(); _i++)
Dest.AggregateVal[_i].IntVal = APInt(1,val);
} else {
Dest.IntVal = APInt(1, val);
}
return Dest;
}
void Interpreter::visitFCmpInst(FCmpInst &I) {
ExecutionContext &SF = ECStack.back();
Type *Ty = I.getOperand(0)->getType();
GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
GenericValue R; // Result
switch (I.getPredicate()) {
default:
dbgs() << "Don't know how to handle this FCmp predicate!\n-->" << I;
llvm_unreachable(nullptr);
break;
case FCmpInst::FCMP_FALSE: R = executeFCMP_BOOL(Src1, Src2, Ty, false);
break;
case FCmpInst::FCMP_TRUE: R = executeFCMP_BOOL(Src1, Src2, Ty, true);
break;
case FCmpInst::FCMP_ORD: R = executeFCMP_ORD(Src1, Src2, Ty); break;
case FCmpInst::FCMP_UNO: R = executeFCMP_UNO(Src1, Src2, Ty); break;
case FCmpInst::FCMP_UEQ: R = executeFCMP_UEQ(Src1, Src2, Ty); break;
case FCmpInst::FCMP_OEQ: R = executeFCMP_OEQ(Src1, Src2, Ty); break;
case FCmpInst::FCMP_UNE: R = executeFCMP_UNE(Src1, Src2, Ty); break;
case FCmpInst::FCMP_ONE: R = executeFCMP_ONE(Src1, Src2, Ty); break;
case FCmpInst::FCMP_ULT: R = executeFCMP_ULT(Src1, Src2, Ty); break;
case FCmpInst::FCMP_OLT: R = executeFCMP_OLT(Src1, Src2, Ty); break;
case FCmpInst::FCMP_UGT: R = executeFCMP_UGT(Src1, Src2, Ty); break;
case FCmpInst::FCMP_OGT: R = executeFCMP_OGT(Src1, Src2, Ty); break;
case FCmpInst::FCMP_ULE: R = executeFCMP_ULE(Src1, Src2, Ty); break;
case FCmpInst::FCMP_OLE: R = executeFCMP_OLE(Src1, Src2, Ty); break;
case FCmpInst::FCMP_UGE: R = executeFCMP_UGE(Src1, Src2, Ty); break;
case FCmpInst::FCMP_OGE: R = executeFCMP_OGE(Src1, Src2, Ty); break;
}
SetValue(&I, R, SF);
}
static GenericValue executeCmpInst(unsigned predicate, GenericValue Src1,
GenericValue Src2, Type *Ty) {
GenericValue Result;
switch (predicate) {
case ICmpInst::ICMP_EQ: return executeICMP_EQ(Src1, Src2, Ty);
case ICmpInst::ICMP_NE: return executeICMP_NE(Src1, Src2, Ty);
case ICmpInst::ICMP_UGT: return executeICMP_UGT(Src1, Src2, Ty);
case ICmpInst::ICMP_SGT: return executeICMP_SGT(Src1, Src2, Ty);
case ICmpInst::ICMP_ULT: return executeICMP_ULT(Src1, Src2, Ty);
case ICmpInst::ICMP_SLT: return executeICMP_SLT(Src1, Src2, Ty);
case ICmpInst::ICMP_UGE: return executeICMP_UGE(Src1, Src2, Ty);
case ICmpInst::ICMP_SGE: return executeICMP_SGE(Src1, Src2, Ty);
case ICmpInst::ICMP_ULE: return executeICMP_ULE(Src1, Src2, Ty);
case ICmpInst::ICMP_SLE: return executeICMP_SLE(Src1, Src2, Ty);
case FCmpInst::FCMP_ORD: return executeFCMP_ORD(Src1, Src2, Ty);
case FCmpInst::FCMP_UNO: return executeFCMP_UNO(Src1, Src2, Ty);
case FCmpInst::FCMP_OEQ: return executeFCMP_OEQ(Src1, Src2, Ty);
case FCmpInst::FCMP_UEQ: return executeFCMP_UEQ(Src1, Src2, Ty);
case FCmpInst::FCMP_ONE: return executeFCMP_ONE(Src1, Src2, Ty);
case FCmpInst::FCMP_UNE: return executeFCMP_UNE(Src1, Src2, Ty);
case FCmpInst::FCMP_OLT: return executeFCMP_OLT(Src1, Src2, Ty);
case FCmpInst::FCMP_ULT: return executeFCMP_ULT(Src1, Src2, Ty);
case FCmpInst::FCMP_OGT: return executeFCMP_OGT(Src1, Src2, Ty);
case FCmpInst::FCMP_UGT: return executeFCMP_UGT(Src1, Src2, Ty);
case FCmpInst::FCMP_OLE: return executeFCMP_OLE(Src1, Src2, Ty);
case FCmpInst::FCMP_ULE: return executeFCMP_ULE(Src1, Src2, Ty);
case FCmpInst::FCMP_OGE: return executeFCMP_OGE(Src1, Src2, Ty);
case FCmpInst::FCMP_UGE: return executeFCMP_UGE(Src1, Src2, Ty);
case FCmpInst::FCMP_FALSE: return executeFCMP_BOOL(Src1, Src2, Ty, false);
case FCmpInst::FCMP_TRUE: return executeFCMP_BOOL(Src1, Src2, Ty, true);
default:
dbgs() << "Unhandled Cmp predicate\n";
llvm_unreachable(nullptr);
}
}
void Interpreter::visitBinaryOperator(BinaryOperator &I) {
ExecutionContext &SF = ECStack.back();
Type *Ty = I.getOperand(0)->getType();
GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
GenericValue R; // Result
// First process vector operation
if (Ty->isVectorTy()) {
assert(Src1.AggregateVal.size() == Src2.AggregateVal.size());
R.AggregateVal.resize(Src1.AggregateVal.size());
// Macros to execute binary operation 'OP' over integer vectors
#define INTEGER_VECTOR_OPERATION(OP) \
for (unsigned i = 0; i < R.AggregateVal.size(); ++i) \
R.AggregateVal[i].IntVal = \
Src1.AggregateVal[i].IntVal OP Src2.AggregateVal[i].IntVal;
// Additional macros to execute binary operations udiv/sdiv/urem/srem since
// they have different notation.
#define INTEGER_VECTOR_FUNCTION(OP) \
for (unsigned i = 0; i < R.AggregateVal.size(); ++i) \
R.AggregateVal[i].IntVal = \
Src1.AggregateVal[i].IntVal.OP(Src2.AggregateVal[i].IntVal);
// Macros to execute binary operation 'OP' over floating point type TY
// (float or double) vectors
#define FLOAT_VECTOR_FUNCTION(OP, TY) \
for (unsigned i = 0; i < R.AggregateVal.size(); ++i) \
R.AggregateVal[i].TY = \
Src1.AggregateVal[i].TY OP Src2.AggregateVal[i].TY;
// Macros to choose appropriate TY: float or double and run operation
// execution
#define FLOAT_VECTOR_OP(OP) { \
if (cast<VectorType>(Ty)->getElementType()->isFloatTy()) \
FLOAT_VECTOR_FUNCTION(OP, FloatVal) \
else { \
if (cast<VectorType>(Ty)->getElementType()->isDoubleTy()) \
FLOAT_VECTOR_FUNCTION(OP, DoubleVal) \
else { \
dbgs() << "Unhandled type for OP instruction: " << *Ty << "\n"; \
llvm_unreachable(0); \
} \
} \
}
switch(I.getOpcode()){
default:
dbgs() << "Don't know how to handle this binary operator!\n-->" << I;
llvm_unreachable(nullptr);
break;
case Instruction::Add: INTEGER_VECTOR_OPERATION(+) break;
case Instruction::Sub: INTEGER_VECTOR_OPERATION(-) break;
case Instruction::Mul: INTEGER_VECTOR_OPERATION(*) break;
case Instruction::UDiv: INTEGER_VECTOR_FUNCTION(udiv) break;
case Instruction::SDiv: INTEGER_VECTOR_FUNCTION(sdiv) break;
case Instruction::URem: INTEGER_VECTOR_FUNCTION(urem) break;
case Instruction::SRem: INTEGER_VECTOR_FUNCTION(srem) break;
case Instruction::And: INTEGER_VECTOR_OPERATION(&) break;
case Instruction::Or: INTEGER_VECTOR_OPERATION(|) break;
case Instruction::Xor: INTEGER_VECTOR_OPERATION(^) break;
case Instruction::FAdd: FLOAT_VECTOR_OP(+) break;
case Instruction::FSub: FLOAT_VECTOR_OP(-) break;
case Instruction::FMul: FLOAT_VECTOR_OP(*) break;
case Instruction::FDiv: FLOAT_VECTOR_OP(/) break;
case Instruction::FRem:
if (cast<VectorType>(Ty)->getElementType()->isFloatTy())
for (unsigned i = 0; i < R.AggregateVal.size(); ++i)
R.AggregateVal[i].FloatVal =
fmod(Src1.AggregateVal[i].FloatVal, Src2.AggregateVal[i].FloatVal);
else {
if (cast<VectorType>(Ty)->getElementType()->isDoubleTy())
for (unsigned i = 0; i < R.AggregateVal.size(); ++i)
R.AggregateVal[i].DoubleVal =
fmod(Src1.AggregateVal[i].DoubleVal, Src2.AggregateVal[i].DoubleVal);
else {
dbgs() << "Unhandled type for Rem instruction: " << *Ty << "\n";
llvm_unreachable(nullptr);
}
}
break;
}
} else {
switch (I.getOpcode()) {
default:
dbgs() << "Don't know how to handle this binary operator!\n-->" << I;
llvm_unreachable(nullptr);
break;
case Instruction::Add: R.IntVal = Src1.IntVal + Src2.IntVal; break;
case Instruction::Sub: R.IntVal = Src1.IntVal - Src2.IntVal; break;
case Instruction::Mul: R.IntVal = Src1.IntVal * Src2.IntVal; break;
case Instruction::FAdd: executeFAddInst(R, Src1, Src2, Ty); break;
case Instruction::FSub: executeFSubInst(R, Src1, Src2, Ty); break;
case Instruction::FMul: executeFMulInst(R, Src1, Src2, Ty); break;
case Instruction::FDiv: executeFDivInst(R, Src1, Src2, Ty); break;
case Instruction::FRem: executeFRemInst(R, Src1, Src2, Ty); break;
case Instruction::UDiv: R.IntVal = Src1.IntVal.udiv(Src2.IntVal); break;
case Instruction::SDiv: R.IntVal = Src1.IntVal.sdiv(Src2.IntVal); break;
case Instruction::URem: R.IntVal = Src1.IntVal.urem(Src2.IntVal); break;
case Instruction::SRem: R.IntVal = Src1.IntVal.srem(Src2.IntVal); break;
case Instruction::And: R.IntVal = Src1.IntVal & Src2.IntVal; break;
case Instruction::Or: R.IntVal = Src1.IntVal | Src2.IntVal; break;
case Instruction::Xor: R.IntVal = Src1.IntVal ^ Src2.IntVal; break;
}
}
SetValue(&I, R, SF);
}
static GenericValue executeSelectInst(GenericValue Src1, GenericValue Src2,
GenericValue Src3, const Type *Ty) {
GenericValue Dest;
if(Ty->isVectorTy()) {
assert(Src1.AggregateVal.size() == Src2.AggregateVal.size());
assert(Src2.AggregateVal.size() == Src3.AggregateVal.size());
Dest.AggregateVal.resize( Src1.AggregateVal.size() );
for (size_t i = 0; i < Src1.AggregateVal.size(); ++i)
Dest.AggregateVal[i] = (Src1.AggregateVal[i].IntVal == 0) ?
Src3.AggregateVal[i] : Src2.AggregateVal[i];
} else {
Dest = (Src1.IntVal == 0) ? Src3 : Src2;
}
return Dest;
}
void Interpreter::visitSelectInst(SelectInst &I) {
ExecutionContext &SF = ECStack.back();
const Type * Ty = I.getOperand(0)->getType();
GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
GenericValue Src3 = getOperandValue(I.getOperand(2), SF);
GenericValue R = executeSelectInst(Src1, Src2, Src3, Ty);
SetValue(&I, R, SF);
}
//===----------------------------------------------------------------------===//
// Terminator Instruction Implementations
//===----------------------------------------------------------------------===//
void Interpreter::exitCalled(GenericValue GV) {
// runAtExitHandlers() assumes there are no stack frames, but
// if exit() was called, then it had a stack frame. Blow away
// the stack before interpreting atexit handlers.
ECStack.clear();
runAtExitHandlers();
exit(GV.IntVal.zextOrTrunc(32).getZExtValue());
}
/// Pop the last stack frame off of ECStack and then copy the result
/// back into the result variable if we are not returning void. The
/// result variable may be the ExitValue, or the Value of the calling
/// CallInst if there was a previous stack frame. This method may
/// invalidate any ECStack iterators you have. This method also takes
/// care of switching to the normal destination BB, if we are returning
/// from an invoke.
///
void Interpreter::popStackAndReturnValueToCaller(Type *RetTy,
GenericValue Result) {
// Pop the current stack frame.
ECStack.pop_back();
if (ECStack.empty()) { // Finished main. Put result into exit code...
if (RetTy && !RetTy->isVoidTy()) { // Nonvoid return type?
ExitValue = Result; // Capture the exit value of the program
} else {
memset(&ExitValue.Untyped, 0, sizeof(ExitValue.Untyped));
}
} else {
// If we have a previous stack frame, and we have a previous call,
// fill in the return value...
ExecutionContext &CallingSF = ECStack.back();
if (Instruction *I = CallingSF.Caller.getInstruction()) {
// Save result...
if (!CallingSF.Caller.getType()->isVoidTy())
SetValue(I, Result, CallingSF);
if (InvokeInst *II = dyn_cast<InvokeInst> (I))
SwitchToNewBasicBlock (II->getNormalDest (), CallingSF);
CallingSF.Caller = CallSite(); // We returned from the call...
}
}
}
void Interpreter::visitReturnInst(ReturnInst &I) {
ExecutionContext &SF = ECStack.back();
Type *RetTy = Type::getVoidTy(I.getContext());
GenericValue Result;
// Save away the return value... (if we are not 'ret void')
if (I.getNumOperands()) {
RetTy = I.getReturnValue()->getType();
Result = getOperandValue(I.getReturnValue(), SF);
}
popStackAndReturnValueToCaller(RetTy, Result);
}
void Interpreter::visitUnreachableInst(UnreachableInst &I) {
report_fatal_error("Program executed an 'unreachable' instruction!");
}
void Interpreter::visitBranchInst(BranchInst &I) {
ExecutionContext &SF = ECStack.back();
BasicBlock *Dest;
Dest = I.getSuccessor(0); // Uncond branches have a fixed dest...
if (!I.isUnconditional()) {
Value *Cond = I.getCondition();
if (getOperandValue(Cond, SF).IntVal == 0) // If false cond...
Dest = I.getSuccessor(1);
}
SwitchToNewBasicBlock(Dest, SF);
}
void Interpreter::visitSwitchInst(SwitchInst &I) {
ExecutionContext &SF = ECStack.back();
Value* Cond = I.getCondition();
Type *ElTy = Cond->getType();
GenericValue CondVal = getOperandValue(Cond, SF);
// Check to see if any of the cases match...
BasicBlock *Dest = nullptr;
for (SwitchInst::CaseIt i = I.case_begin(), e = I.case_end(); i != e; ++i) {
GenericValue CaseVal = getOperandValue(i.getCaseValue(), SF);
if (executeICMP_EQ(CondVal, CaseVal, ElTy).IntVal != 0) {
Dest = cast<BasicBlock>(i.getCaseSuccessor());
break;
}
}
if (!Dest) Dest = I.getDefaultDest(); // No cases matched: use default
SwitchToNewBasicBlock(Dest, SF);
}
void Interpreter::visitIndirectBrInst(IndirectBrInst &I) {
ExecutionContext &SF = ECStack.back();
void *Dest = GVTOP(getOperandValue(I.getAddress(), SF));
SwitchToNewBasicBlock((BasicBlock*)Dest, SF);
}
// SwitchToNewBasicBlock - This method is used to jump to a new basic block.
// This function handles the actual updating of block and instruction iterators
// as well as execution of all of the PHI nodes in the destination block.
//
// This method does this because all of the PHI nodes must be executed
// atomically, reading their inputs before any of the results are updated. Not
// doing this can cause problems if the PHI nodes depend on other PHI nodes for
// their inputs. If the input PHI node is updated before it is read, incorrect
// results can happen. Thus we use a two phase approach.
//
void Interpreter::SwitchToNewBasicBlock(BasicBlock *Dest, ExecutionContext &SF){
BasicBlock *PrevBB = SF.CurBB; // Remember where we came from...
SF.CurBB = Dest; // Update CurBB to branch destination
SF.CurInst = SF.CurBB->begin(); // Update new instruction ptr...
if (!isa<PHINode>(SF.CurInst)) return; // Nothing fancy to do
// Loop over all of the PHI nodes in the current block, reading their inputs.
std::vector<GenericValue> ResultValues;
for (; PHINode *PN = dyn_cast<PHINode>(SF.CurInst); ++SF.CurInst) {
// Search for the value corresponding to this previous bb...
int i = PN->getBasicBlockIndex(PrevBB);
assert(i != -1 && "PHINode doesn't contain entry for predecessor??");
Value *IncomingValue = PN->getIncomingValue(i);
// Save the incoming value for this PHI node...
ResultValues.push_back(getOperandValue(IncomingValue, SF));
}
// Now loop over all of the PHI nodes setting their values...
SF.CurInst = SF.CurBB->begin();
for (unsigned i = 0; isa<PHINode>(SF.CurInst); ++SF.CurInst, ++i) {
PHINode *PN = cast<PHINode>(SF.CurInst);
SetValue(PN, ResultValues[i], SF);
}
}
//===----------------------------------------------------------------------===//
// Memory Instruction Implementations
//===----------------------------------------------------------------------===//
void Interpreter::visitAllocaInst(AllocaInst &I) {
ExecutionContext &SF = ECStack.back();
Type *Ty = I.getType()->getElementType(); // Type to be allocated
// Get the number of elements being allocated by the array...
unsigned NumElements =
getOperandValue(I.getOperand(0), SF).IntVal.getZExtValue();
unsigned TypeSize = (size_t)TD.getTypeAllocSize(Ty);
// Avoid malloc-ing zero bytes, use max()...
unsigned MemToAlloc = std::max(1U, NumElements * TypeSize);
// Allocate enough memory to hold the type...
void *Memory = malloc(MemToAlloc);
DEBUG(dbgs() << "Allocated Type: " << *Ty << " (" << TypeSize << " bytes) x "
<< NumElements << " (Total: " << MemToAlloc << ") at "
<< uintptr_t(Memory) << '\n');
GenericValue Result = PTOGV(Memory);
assert(Result.PointerVal && "Null pointer returned by malloc!");
SetValue(&I, Result, SF);
if (I.getOpcode() == Instruction::Alloca)
ECStack.back().Allocas.add(Memory);
}
// getElementOffset - The workhorse for getelementptr.
//
GenericValue Interpreter::executeGEPOperation(Value *Ptr, gep_type_iterator I,
gep_type_iterator E,
ExecutionContext &SF) {
assert(Ptr->getType()->isPointerTy() &&
"Cannot getElementOffset of a nonpointer type!");
uint64_t Total = 0;
for (; I != E; ++I) {
if (StructType *STy = dyn_cast<StructType>(*I)) {
const StructLayout *SLO = TD.getStructLayout(STy);
const ConstantInt *CPU = cast<ConstantInt>(I.getOperand());
unsigned Index = unsigned(CPU->getZExtValue());
Total += SLO->getElementOffset(Index);
} else {
SequentialType *ST = cast<SequentialType>(*I);
// Get the index number for the array... which must be long type...
GenericValue IdxGV = getOperandValue(I.getOperand(), SF);
int64_t Idx;
unsigned BitWidth =
cast<IntegerType>(I.getOperand()->getType())->getBitWidth();
if (BitWidth == 32)
Idx = (int64_t)(int32_t)IdxGV.IntVal.getZExtValue();
else {
assert(BitWidth == 64 && "Invalid index type for getelementptr");
Idx = (int64_t)IdxGV.IntVal.getZExtValue();
}
Total += TD.getTypeAllocSize(ST->getElementType())*Idx;
}
}
GenericValue Result;
Result.PointerVal = ((char*)getOperandValue(Ptr, SF).PointerVal) + Total;
DEBUG(dbgs() << "GEP Index " << Total << " bytes.\n");
return Result;
}
void Interpreter::visitGetElementPtrInst(GetElementPtrInst &I) {
ExecutionContext &SF = ECStack.back();
SetValue(&I, executeGEPOperation(I.getPointerOperand(),
gep_type_begin(I), gep_type_end(I), SF), SF);
}
void Interpreter::visitLoadInst(LoadInst &I) {
ExecutionContext &SF = ECStack.back();
GenericValue SRC = getOperandValue(I.getPointerOperand(), SF);
GenericValue *Ptr = (GenericValue*)GVTOP(SRC);
GenericValue Result;
LoadValueFromMemory(Result, Ptr, I.getType());
SetValue(&I, Result, SF);
if (I.isVolatile() && PrintVolatile)
dbgs() << "Volatile load " << I;
}
void Interpreter::visitStoreInst(StoreInst &I) {
ExecutionContext &SF = ECStack.back();
GenericValue Val = getOperandValue(I.getOperand(0), SF);
GenericValue SRC = getOperandValue(I.getPointerOperand(), SF);
StoreValueToMemory(Val, (GenericValue *)GVTOP(SRC),
I.getOperand(0)->getType());
if (I.isVolatile() && PrintVolatile)
dbgs() << "Volatile store: " << I;
}
//===----------------------------------------------------------------------===//
// Miscellaneous Instruction Implementations
//===----------------------------------------------------------------------===//
void Interpreter::visitCallSite(CallSite CS) {
ExecutionContext &SF = ECStack.back();
// Check to see if this is an intrinsic function call...
Function *F = CS.getCalledFunction();
if (F && F->isDeclaration())
switch (F->getIntrinsicID()) {
case Intrinsic::not_intrinsic:
break;
case Intrinsic::vastart: { // va_start
GenericValue ArgIndex;
ArgIndex.UIntPairVal.first = ECStack.size() - 1;
ArgIndex.UIntPairVal.second = 0;
SetValue(CS.getInstruction(), ArgIndex, SF);
return;
}
case Intrinsic::vaend: // va_end is a noop for the interpreter
return;
case Intrinsic::vacopy: // va_copy: dest = src
SetValue(CS.getInstruction(), getOperandValue(*CS.arg_begin(), SF), SF);
return;
default:
// If it is an unknown intrinsic function, use the intrinsic lowering
// class to transform it into hopefully tasty LLVM code.
//
BasicBlock::iterator me(CS.getInstruction());
BasicBlock *Parent = CS.getInstruction()->getParent();
bool atBegin(Parent->begin() == me);
if (!atBegin)
--me;
IL->LowerIntrinsicCall(cast<CallInst>(CS.getInstruction()));
// Restore the CurInst pointer to the first instruction newly inserted, if
// any.
if (atBegin) {
SF.CurInst = Parent->begin();
} else {
SF.CurInst = me;
++SF.CurInst;
}
return;
}
SF.Caller = CS;
std::vector<GenericValue> ArgVals;
const unsigned NumArgs = SF.Caller.arg_size();
ArgVals.reserve(NumArgs);
uint16_t pNum = 1;
for (CallSite::arg_iterator i = SF.Caller.arg_begin(),
e = SF.Caller.arg_end(); i != e; ++i, ++pNum) {
Value *V = *i;
ArgVals.push_back(getOperandValue(V, SF));
}
// To handle indirect calls, we must get the pointer value from the argument
// and treat it as a function pointer.
GenericValue SRC = getOperandValue(SF.Caller.getCalledValue(), SF);
callFunction((Function*)GVTOP(SRC), ArgVals);
}
// auxiliary function for shift operations
static unsigned getShiftAmount(uint64_t orgShiftAmount,
llvm::APInt valueToShift) {
unsigned valueWidth = valueToShift.getBitWidth();
if (orgShiftAmount < (uint64_t)valueWidth)
return orgShiftAmount;
// according to the llvm documentation, if orgShiftAmount > valueWidth,
// the result is undfeined. but we do shift by this rule:
return (NextPowerOf2(valueWidth-1) - 1) & orgShiftAmount;
}
void Interpreter::visitShl(BinaryOperator &I) {
ExecutionContext &SF = ECStack.back();
GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
GenericValue Dest;
const Type *Ty = I.getType();
if (Ty->isVectorTy()) {
uint32_t src1Size = uint32_t(Src1.AggregateVal.size());
assert(src1Size == Src2.AggregateVal.size());
for (unsigned i = 0; i < src1Size; i++) {
GenericValue Result;
uint64_t shiftAmount = Src2.AggregateVal[i].IntVal.getZExtValue();
llvm::APInt valueToShift = Src1.AggregateVal[i].IntVal;
Result.IntVal = valueToShift.shl(getShiftAmount(shiftAmount, valueToShift));
Dest.AggregateVal.push_back(Result);
}
} else {
// scalar
uint64_t shiftAmount = Src2.IntVal.getZExtValue();
llvm::APInt valueToShift = Src1.IntVal;
Dest.IntVal = valueToShift.shl(getShiftAmount(shiftAmount, valueToShift));
}
SetValue(&I, Dest, SF);
}
void Interpreter::visitLShr(BinaryOperator &I) {
ExecutionContext &SF = ECStack.back();
GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
GenericValue Dest;
const Type *Ty = I.getType();
if (Ty->isVectorTy()) {
uint32_t src1Size = uint32_t(Src1.AggregateVal.size());
assert(src1Size == Src2.AggregateVal.size());
for (unsigned i = 0; i < src1Size; i++) {
GenericValue Result;
uint64_t shiftAmount = Src2.AggregateVal[i].IntVal.getZExtValue();
llvm::APInt valueToShift = Src1.AggregateVal[i].IntVal;
Result.IntVal = valueToShift.lshr(getShiftAmount(shiftAmount, valueToShift));
Dest.AggregateVal.push_back(Result);
}
} else {
// scalar
uint64_t shiftAmount = Src2.IntVal.getZExtValue();
llvm::APInt valueToShift = Src1.IntVal;
Dest.IntVal = valueToShift.lshr(getShiftAmount(shiftAmount, valueToShift));
}
SetValue(&I, Dest, SF);
}
void Interpreter::visitAShr(BinaryOperator &I) {
ExecutionContext &SF = ECStack.back();
GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
GenericValue Dest;
const Type *Ty = I.getType();
if (Ty->isVectorTy()) {
size_t src1Size = Src1.AggregateVal.size();
assert(src1Size == Src2.AggregateVal.size());
for (unsigned i = 0; i < src1Size; i++) {
GenericValue Result;
uint64_t shiftAmount = Src2.AggregateVal[i].IntVal.getZExtValue();
llvm::APInt valueToShift = Src1.AggregateVal[i].IntVal;
Result.IntVal = valueToShift.ashr(getShiftAmount(shiftAmount, valueToShift));
Dest.AggregateVal.push_back(Result);
}
} else {
// scalar
uint64_t shiftAmount = Src2.IntVal.getZExtValue();
llvm::APInt valueToShift = Src1.IntVal;
Dest.IntVal = valueToShift.ashr(getShiftAmount(shiftAmount, valueToShift));
}
SetValue(&I, Dest, SF);
}
GenericValue Interpreter::executeTruncInst(Value *SrcVal, Type *DstTy,
ExecutionContext &SF) {
GenericValue Dest, Src = getOperandValue(SrcVal, SF);
Type *SrcTy = SrcVal->getType();
if (SrcTy->isVectorTy()) {
Type *DstVecTy = DstTy->getScalarType();
unsigned DBitWidth = cast<IntegerType>(DstVecTy)->getBitWidth();
unsigned NumElts = Src.AggregateVal.size();
// the sizes of src and dst vectors must be equal
Dest.AggregateVal.resize(NumElts);
for (unsigned i = 0; i < NumElts; i++)
Dest.AggregateVal[i].IntVal = Src.AggregateVal[i].IntVal.trunc(DBitWidth);
} else {
IntegerType *DITy = cast<IntegerType>(DstTy);
unsigned DBitWidth = DITy->getBitWidth();
Dest.IntVal = Src.IntVal.trunc(DBitWidth);
}
return Dest;
}
GenericValue Interpreter::executeSExtInst(Value *SrcVal, Type *DstTy,
ExecutionContext &SF) {
const Type *SrcTy = SrcVal->getType();
GenericValue Dest, Src = getOperandValue(SrcVal, SF);
if (SrcTy->isVectorTy()) {
const Type *DstVecTy = DstTy->getScalarType();
unsigned DBitWidth = cast<IntegerType>(DstVecTy)->getBitWidth();
unsigned size = Src.AggregateVal.size();
// the sizes of src and dst vectors must be equal.
Dest.AggregateVal.resize(size);
for (unsigned i = 0; i < size; i++)
Dest.AggregateVal[i].IntVal = Src.AggregateVal[i].IntVal.sext(DBitWidth);
} else {
const IntegerType *DITy = cast<IntegerType>(DstTy);
unsigned DBitWidth = DITy->getBitWidth();
Dest.IntVal = Src.IntVal.sext(DBitWidth);
}
return Dest;
}
GenericValue Interpreter::executeZExtInst(Value *SrcVal, Type *DstTy,
ExecutionContext &SF) {
const Type *SrcTy = SrcVal->getType();
GenericValue Dest, Src = getOperandValue(SrcVal, SF);
if (SrcTy->isVectorTy()) {
const Type *DstVecTy = DstTy->getScalarType();
unsigned DBitWidth = cast<IntegerType>(DstVecTy)->getBitWidth();
unsigned size = Src.AggregateVal.size();
// the sizes of src and dst vectors must be equal.
Dest.AggregateVal.resize(size);
for (unsigned i = 0; i < size; i++)
Dest.AggregateVal[i].IntVal = Src.AggregateVal[i].IntVal.zext(DBitWidth);
} else {
const IntegerType *DITy = cast<IntegerType>(DstTy);
unsigned DBitWidth = DITy->getBitWidth();
Dest.IntVal = Src.IntVal.zext(DBitWidth);
}
return Dest;
}
GenericValue Interpreter::executeFPTruncInst(Value *SrcVal, Type *DstTy,
ExecutionContext &SF) {
GenericValue Dest, Src = getOperandValue(SrcVal, SF);
if (SrcVal->getType()->getTypeID() == Type::VectorTyID) {
assert(SrcVal->getType()->getScalarType()->isDoubleTy() &&
DstTy->getScalarType()->isFloatTy() &&
"Invalid FPTrunc instruction");
unsigned size = Src.AggregateVal.size();
// the sizes of src and dst vectors must be equal.
Dest.AggregateVal.resize(size);
for (unsigned i = 0; i < size; i++)
Dest.AggregateVal[i].FloatVal = (float)Src.AggregateVal[i].DoubleVal;
} else {
assert(SrcVal->getType()->isDoubleTy() && DstTy->isFloatTy() &&
"Invalid FPTrunc instruction");
Dest.FloatVal = (float)Src.DoubleVal;
}
return Dest;
}
GenericValue Interpreter::executeFPExtInst(Value *SrcVal, Type *DstTy,
ExecutionContext &SF) {
GenericValue Dest, Src = getOperandValue(SrcVal, SF);
if (SrcVal->getType()->getTypeID() == Type::VectorTyID) {
assert(SrcVal->getType()->getScalarType()->isFloatTy() &&
DstTy->getScalarType()->isDoubleTy() && "Invalid FPExt instruction");
unsigned size = Src.AggregateVal.size();
// the sizes of src and dst vectors must be equal.
Dest.AggregateVal.resize(size);
for (unsigned i = 0; i < size; i++)
Dest.AggregateVal[i].DoubleVal = (double)Src.AggregateVal[i].FloatVal;
} else {
assert(SrcVal->getType()->isFloatTy() && DstTy->isDoubleTy() &&
"Invalid FPExt instruction");
Dest.DoubleVal = (double)Src.FloatVal;
}
return Dest;
}
GenericValue Interpreter::executeFPToUIInst(Value *SrcVal, Type *DstTy,
ExecutionContext &SF) {
Type *SrcTy = SrcVal->getType();
GenericValue Dest, Src = getOperandValue(SrcVal, SF);
if (SrcTy->getTypeID() == Type::VectorTyID) {
const Type *DstVecTy = DstTy->getScalarType();
const Type *SrcVecTy = SrcTy->getScalarType();
uint32_t DBitWidth = cast<IntegerType>(DstVecTy)->getBitWidth();
unsigned size = Src.AggregateVal.size();
// the sizes of src and dst vectors must be equal.
Dest.AggregateVal.resize(size);
if (SrcVecTy->getTypeID() == Type::FloatTyID) {
assert(SrcVecTy->isFloatingPointTy() && "Invalid FPToUI instruction");
for (unsigned i = 0; i < size; i++)
Dest.AggregateVal[i].IntVal = APIntOps::RoundFloatToAPInt(
Src.AggregateVal[i].FloatVal, DBitWidth);
} else {
for (unsigned i = 0; i < size; i++)
Dest.AggregateVal[i].IntVal = APIntOps::RoundDoubleToAPInt(
Src.AggregateVal[i].DoubleVal, DBitWidth);
}
} else {
// scalar
uint32_t DBitWidth = cast<IntegerType>(DstTy)->getBitWidth();
assert(SrcTy->isFloatingPointTy() && "Invalid FPToUI instruction");
if (SrcTy->getTypeID() == Type::FloatTyID)
Dest.IntVal = APIntOps::RoundFloatToAPInt(Src.FloatVal, DBitWidth);
else {
Dest.IntVal = APIntOps::RoundDoubleToAPInt(Src.DoubleVal, DBitWidth);
}
}
return Dest;
}
GenericValue Interpreter::executeFPToSIInst(Value *SrcVal, Type *DstTy,
ExecutionContext &SF) {
Type *SrcTy = SrcVal->getType();
GenericValue Dest, Src = getOperandValue(SrcVal, SF);
if (SrcTy->getTypeID() == Type::VectorTyID) {
const Type *DstVecTy = DstTy->getScalarType();
const Type *SrcVecTy = SrcTy->getScalarType();
uint32_t DBitWidth = cast<IntegerType>(DstVecTy)->getBitWidth();
unsigned size = Src.AggregateVal.size();
// the sizes of src and dst vectors must be equal
Dest.AggregateVal.resize(size);
if (SrcVecTy->getTypeID() == Type::FloatTyID) {
assert(SrcVecTy->isFloatingPointTy() && "Invalid FPToSI instruction");
for (unsigned i = 0; i < size; i++)
Dest.AggregateVal[i].IntVal = APIntOps::RoundFloatToAPInt(
Src.AggregateVal[i].FloatVal, DBitWidth);
} else {
for (unsigned i = 0; i < size; i++)
Dest.AggregateVal[i].IntVal = APIntOps::RoundDoubleToAPInt(
Src.AggregateVal[i].DoubleVal, DBitWidth);
}
} else {
// scalar
unsigned DBitWidth = cast<IntegerType>(DstTy)->getBitWidth();
assert(SrcTy->isFloatingPointTy() && "Invalid FPToSI instruction");
if (SrcTy->getTypeID() == Type::FloatTyID)
Dest.IntVal = APIntOps::RoundFloatToAPInt(Src.FloatVal, DBitWidth);
else {
Dest.IntVal = APIntOps::RoundDoubleToAPInt(Src.DoubleVal, DBitWidth);
}
}
return Dest;
}
GenericValue Interpreter::executeUIToFPInst(Value *SrcVal, Type *DstTy,
ExecutionContext &SF) {
GenericValue Dest, Src = getOperandValue(SrcVal, SF);
if (SrcVal->getType()->getTypeID() == Type::VectorTyID) {
const Type *DstVecTy = DstTy->getScalarType();
unsigned size = Src.AggregateVal.size();
// the sizes of src and dst vectors must be equal
Dest.AggregateVal.resize(size);
if (DstVecTy->getTypeID() == Type::FloatTyID) {
assert(DstVecTy->isFloatingPointTy() && "Invalid UIToFP instruction");
for (unsigned i = 0; i < size; i++)
Dest.AggregateVal[i].FloatVal =
APIntOps::RoundAPIntToFloat(Src.AggregateVal[i].IntVal);
} else {
for (unsigned i = 0; i < size; i++)
Dest.AggregateVal[i].DoubleVal =
APIntOps::RoundAPIntToDouble(Src.AggregateVal[i].IntVal);
}
} else {
// scalar
assert(DstTy->isFloatingPointTy() && "Invalid UIToFP instruction");
if (DstTy->getTypeID() == Type::FloatTyID)
Dest.FloatVal = APIntOps::RoundAPIntToFloat(Src.IntVal);
else {
Dest.DoubleVal = APIntOps::RoundAPIntToDouble(Src.IntVal);
}
}
return Dest;
}
GenericValue Interpreter::executeSIToFPInst(Value *SrcVal, Type *DstTy,
ExecutionContext &SF) {
GenericValue Dest, Src = getOperandValue(SrcVal, SF);
if (SrcVal->getType()->getTypeID() == Type::VectorTyID) {
const Type *DstVecTy = DstTy->getScalarType();
unsigned size = Src.AggregateVal.size();
// the sizes of src and dst vectors must be equal
Dest.AggregateVal.resize(size);
if (DstVecTy->getTypeID() == Type::FloatTyID) {
assert(DstVecTy->isFloatingPointTy() && "Invalid SIToFP instruction");
for (unsigned i = 0; i < size; i++)
Dest.AggregateVal[i].FloatVal =
APIntOps::RoundSignedAPIntToFloat(Src.AggregateVal[i].IntVal);
} else {
for (unsigned i = 0; i < size; i++)
Dest.AggregateVal[i].DoubleVal =
APIntOps::RoundSignedAPIntToDouble(Src.AggregateVal[i].IntVal);
}
} else {
// scalar
assert(DstTy->isFloatingPointTy() && "Invalid SIToFP instruction");
if (DstTy->getTypeID() == Type::FloatTyID)
Dest.FloatVal = APIntOps::RoundSignedAPIntToFloat(Src.IntVal);
else {
Dest.DoubleVal = APIntOps::RoundSignedAPIntToDouble(Src.IntVal);
}
}
return Dest;
}
GenericValue Interpreter::executePtrToIntInst(Value *SrcVal, Type *DstTy,
ExecutionContext &SF) {
uint32_t DBitWidth = cast<IntegerType>(DstTy)->getBitWidth();
GenericValue Dest, Src = getOperandValue(SrcVal, SF);
assert(SrcVal->getType()->isPointerTy() && "Invalid PtrToInt instruction");
Dest.IntVal = APInt(DBitWidth, (intptr_t) Src.PointerVal);
return Dest;
}
GenericValue Interpreter::executeIntToPtrInst(Value *SrcVal, Type *DstTy,
ExecutionContext &SF) {
GenericValue Dest, Src = getOperandValue(SrcVal, SF);
assert(DstTy->isPointerTy() && "Invalid PtrToInt instruction");
uint32_t PtrSize = TD.getPointerSizeInBits();
if (PtrSize != Src.IntVal.getBitWidth())
Src.IntVal = Src.IntVal.zextOrTrunc(PtrSize);
Dest.PointerVal = PointerTy(intptr_t(Src.IntVal.getZExtValue()));
return Dest;
}
GenericValue Interpreter::executeBitCastInst(Value *SrcVal, Type *DstTy,
ExecutionContext &SF) {
// This instruction supports bitwise conversion of vectors to integers and
// to vectors of other types (as long as they have the same size)
Type *SrcTy = SrcVal->getType();
GenericValue Dest, Src = getOperandValue(SrcVal, SF);
if ((SrcTy->getTypeID() == Type::VectorTyID) ||
(DstTy->getTypeID() == Type::VectorTyID)) {
// vector src bitcast to vector dst or vector src bitcast to scalar dst or
// scalar src bitcast to vector dst
bool isLittleEndian = TD.isLittleEndian();
GenericValue TempDst, TempSrc, SrcVec;
const Type *SrcElemTy;
const Type *DstElemTy;
unsigned SrcBitSize;
unsigned DstBitSize;
unsigned SrcNum;
unsigned DstNum;
if (SrcTy->getTypeID() == Type::VectorTyID) {
SrcElemTy = SrcTy->getScalarType();
SrcBitSize = SrcTy->getScalarSizeInBits();
SrcNum = Src.AggregateVal.size();
SrcVec = Src;
} else {
// if src is scalar value, make it vector <1 x type>
SrcElemTy = SrcTy;
SrcBitSize = SrcTy->getPrimitiveSizeInBits();
SrcNum = 1;
SrcVec.AggregateVal.push_back(Src);
}
if (DstTy->getTypeID() == Type::VectorTyID) {
DstElemTy = DstTy->getScalarType();
DstBitSize = DstTy->getScalarSizeInBits();
DstNum = (SrcNum * SrcBitSize) / DstBitSize;
} else {
DstElemTy = DstTy;
DstBitSize = DstTy->getPrimitiveSizeInBits();
DstNum = 1;
}
if (SrcNum * SrcBitSize != DstNum * DstBitSize)
llvm_unreachable("Invalid BitCast");
// If src is floating point, cast to integer first.
TempSrc.AggregateVal.resize(SrcNum);
if (SrcElemTy->isFloatTy()) {
for (unsigned i = 0; i < SrcNum; i++)
TempSrc.AggregateVal[i].IntVal =
APInt::floatToBits(SrcVec.AggregateVal[i].FloatVal);
} else if (SrcElemTy->isDoubleTy()) {
for (unsigned i = 0; i < SrcNum; i++)
TempSrc.AggregateVal[i].IntVal =
APInt::doubleToBits(SrcVec.AggregateVal[i].DoubleVal);
} else if (SrcElemTy->isIntegerTy()) {
for (unsigned i = 0; i < SrcNum; i++)
TempSrc.AggregateVal[i].IntVal = SrcVec.AggregateVal[i].IntVal;
} else {
// Pointers are not allowed as the element type of vector.
llvm_unreachable("Invalid Bitcast");
}
// now TempSrc is integer type vector
if (DstNum < SrcNum) {
// Example: bitcast <4 x i32> <i32 0, i32 1, i32 2, i32 3> to <2 x i64>
unsigned Ratio = SrcNum / DstNum;
unsigned SrcElt = 0;
for (unsigned i = 0; i < DstNum; i++) {
GenericValue Elt;
Elt.IntVal = 0;
Elt.IntVal = Elt.IntVal.zext(DstBitSize);
unsigned ShiftAmt = isLittleEndian ? 0 : SrcBitSize * (Ratio - 1);
for (unsigned j = 0; j < Ratio; j++) {
APInt Tmp;
Tmp = Tmp.zext(SrcBitSize);
Tmp = TempSrc.AggregateVal[SrcElt++].IntVal;
Tmp = Tmp.zext(DstBitSize);
Tmp = Tmp.shl(ShiftAmt);
ShiftAmt += isLittleEndian ? SrcBitSize : -SrcBitSize;
Elt.IntVal |= Tmp;
}
TempDst.AggregateVal.push_back(Elt);
}
} else {
// Example: bitcast <2 x i64> <i64 0, i64 1> to <4 x i32>
unsigned Ratio = DstNum / SrcNum;
for (unsigned i = 0; i < SrcNum; i++) {
unsigned ShiftAmt = isLittleEndian ? 0 : DstBitSize * (Ratio - 1);
for (unsigned j = 0; j < Ratio; j++) {
GenericValue Elt;
Elt.IntVal = Elt.IntVal.zext(SrcBitSize);
Elt.IntVal = TempSrc.AggregateVal[i].IntVal;
Elt.IntVal = Elt.IntVal.lshr(ShiftAmt);
// it could be DstBitSize == SrcBitSize, so check it
if (DstBitSize < SrcBitSize)
Elt.IntVal = Elt.IntVal.trunc(DstBitSize);
ShiftAmt += isLittleEndian ? DstBitSize : -DstBitSize;
TempDst.AggregateVal.push_back(Elt);
}
}
}
// convert result from integer to specified type
if (DstTy->getTypeID() == Type::VectorTyID) {
if (DstElemTy->isDoubleTy()) {
Dest.AggregateVal.resize(DstNum);
for (unsigned i = 0; i < DstNum; i++)
Dest.AggregateVal[i].DoubleVal =
TempDst.AggregateVal[i].IntVal.bitsToDouble();
} else if (DstElemTy->isFloatTy()) {
Dest.AggregateVal.resize(DstNum);
for (unsigned i = 0; i < DstNum; i++)
Dest.AggregateVal[i].FloatVal =
TempDst.AggregateVal[i].IntVal.bitsToFloat();
} else {
Dest = TempDst;
}
} else {
if (DstElemTy->isDoubleTy())
Dest.DoubleVal = TempDst.AggregateVal[0].IntVal.bitsToDouble();
else if (DstElemTy->isFloatTy()) {
Dest.FloatVal = TempDst.AggregateVal[0].IntVal.bitsToFloat();
} else {
Dest.IntVal = TempDst.AggregateVal[0].IntVal;
}
}
} else { // if ((SrcTy->getTypeID() == Type::VectorTyID) ||
// (DstTy->getTypeID() == Type::VectorTyID))
// scalar src bitcast to scalar dst
if (DstTy->isPointerTy()) {
assert(SrcTy->isPointerTy() && "Invalid BitCast");
Dest.PointerVal = Src.PointerVal;
} else if (DstTy->isIntegerTy()) {
if (SrcTy->isFloatTy())
Dest.IntVal = APInt::floatToBits(Src.FloatVal);
else if (SrcTy->isDoubleTy()) {
Dest.IntVal = APInt::doubleToBits(Src.DoubleVal);
} else if (SrcTy->isIntegerTy()) {
Dest.IntVal = Src.IntVal;
} else {
llvm_unreachable("Invalid BitCast");
}
} else if (DstTy->isFloatTy()) {
if (SrcTy->isIntegerTy())
Dest.FloatVal = Src.IntVal.bitsToFloat();
else {
Dest.FloatVal = Src.FloatVal;
}
} else if (DstTy->isDoubleTy()) {
if (SrcTy->isIntegerTy())
Dest.DoubleVal = Src.IntVal.bitsToDouble();
else {
Dest.DoubleVal = Src.DoubleVal;
}
} else {
llvm_unreachable("Invalid Bitcast");
}
}
return Dest;
}
void Interpreter::visitTruncInst(TruncInst &I) {
ExecutionContext &SF = ECStack.back();
SetValue(&I, executeTruncInst(I.getOperand(0), I.getType(), SF), SF);
}
void Interpreter::visitSExtInst(SExtInst &I) {
ExecutionContext &SF = ECStack.back();
SetValue(&I, executeSExtInst(I.getOperand(0), I.getType(), SF), SF);
}
void Interpreter::visitZExtInst(ZExtInst &I) {
ExecutionContext &SF = ECStack.back();
SetValue(&I, executeZExtInst(I.getOperand(0), I.getType(), SF), SF);
}
void Interpreter::visitFPTruncInst(FPTruncInst &I) {
ExecutionContext &SF = ECStack.back();
SetValue(&I, executeFPTruncInst(I.getOperand(0), I.getType(), SF), SF);
}
void Interpreter::visitFPExtInst(FPExtInst &I) {
ExecutionContext &SF = ECStack.back();
SetValue(&I, executeFPExtInst(I.getOperand(0), I.getType(), SF), SF);
}
void Interpreter::visitUIToFPInst(UIToFPInst &I) {
ExecutionContext &SF = ECStack.back();
SetValue(&I, executeUIToFPInst(I.getOperand(0), I.getType(), SF), SF);
}
void Interpreter::visitSIToFPInst(SIToFPInst &I) {
ExecutionContext &SF = ECStack.back();
SetValue(&I, executeSIToFPInst(I.getOperand(0), I.getType(), SF), SF);
}
void Interpreter::visitFPToUIInst(FPToUIInst &I) {
ExecutionContext &SF = ECStack.back();
SetValue(&I, executeFPToUIInst(I.getOperand(0), I.getType(), SF), SF);
}
void Interpreter::visitFPToSIInst(FPToSIInst &I) {
ExecutionContext &SF = ECStack.back();
SetValue(&I, executeFPToSIInst(I.getOperand(0), I.getType(), SF), SF);
}
void Interpreter::visitPtrToIntInst(PtrToIntInst &I) {
ExecutionContext &SF = ECStack.back();
SetValue(&I, executePtrToIntInst(I.getOperand(0), I.getType(), SF), SF);
}
void Interpreter::visitIntToPtrInst(IntToPtrInst &I) {
ExecutionContext &SF = ECStack.back();
SetValue(&I, executeIntToPtrInst(I.getOperand(0), I.getType(), SF), SF);
}
void Interpreter::visitBitCastInst(BitCastInst &I) {
ExecutionContext &SF = ECStack.back();
SetValue(&I, executeBitCastInst(I.getOperand(0), I.getType(), SF), SF);
}
#define IMPLEMENT_VAARG(TY) \
case Type::TY##TyID: Dest.TY##Val = Src.TY##Val; break
void Interpreter::visitVAArgInst(VAArgInst &I) {
ExecutionContext &SF = ECStack.back();
// Get the incoming valist parameter. LLI treats the valist as a
// (ec-stack-depth var-arg-index) pair.
GenericValue VAList = getOperandValue(I.getOperand(0), SF);
GenericValue Dest;
GenericValue Src = ECStack[VAList.UIntPairVal.first]
.VarArgs[VAList.UIntPairVal.second];
Type *Ty = I.getType();
switch (Ty->getTypeID()) {
case Type::IntegerTyID:
Dest.IntVal = Src.IntVal;
break;
IMPLEMENT_VAARG(Pointer);
IMPLEMENT_VAARG(Float);
IMPLEMENT_VAARG(Double);
default:
dbgs() << "Unhandled dest type for vaarg instruction: " << *Ty << "\n";
llvm_unreachable(nullptr);
}
// Set the Value of this Instruction.
SetValue(&I, Dest, SF);
// Move the pointer to the next vararg.
++VAList.UIntPairVal.second;
}
void Interpreter::visitExtractElementInst(ExtractElementInst &I) {
ExecutionContext &SF = ECStack.back();
GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
GenericValue Dest;
Type *Ty = I.getType();
const unsigned indx = unsigned(Src2.IntVal.getZExtValue());
if(Src1.AggregateVal.size() > indx) {
switch (Ty->getTypeID()) {
default:
dbgs() << "Unhandled destination type for extractelement instruction: "
<< *Ty << "\n";
llvm_unreachable(nullptr);
break;
case Type::IntegerTyID:
Dest.IntVal = Src1.AggregateVal[indx].IntVal;
break;
case Type::FloatTyID:
Dest.FloatVal = Src1.AggregateVal[indx].FloatVal;
break;
case Type::DoubleTyID:
Dest.DoubleVal = Src1.AggregateVal[indx].DoubleVal;
break;
}
} else {
dbgs() << "Invalid index in extractelement instruction\n";
}
SetValue(&I, Dest, SF);
}
void Interpreter::visitInsertElementInst(InsertElementInst &I) {
ExecutionContext &SF = ECStack.back();
Type *Ty = I.getType();
if(!(Ty->isVectorTy()) )
llvm_unreachable("Unhandled dest type for insertelement instruction");
GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
GenericValue Src3 = getOperandValue(I.getOperand(2), SF);
GenericValue Dest;
Type *TyContained = Ty->getContainedType(0);
const unsigned indx = unsigned(Src3.IntVal.getZExtValue());
Dest.AggregateVal = Src1.AggregateVal;
if(Src1.AggregateVal.size() <= indx)
llvm_unreachable("Invalid index in insertelement instruction");
switch (TyContained->getTypeID()) {
default:
llvm_unreachable("Unhandled dest type for insertelement instruction");
case Type::IntegerTyID:
Dest.AggregateVal[indx].IntVal = Src2.IntVal;
break;
case Type::FloatTyID:
Dest.AggregateVal[indx].FloatVal = Src2.FloatVal;
break;
case Type::DoubleTyID:
Dest.AggregateVal[indx].DoubleVal = Src2.DoubleVal;
break;
}
SetValue(&I, Dest, SF);
}
void Interpreter::visitShuffleVectorInst(ShuffleVectorInst &I){
ExecutionContext &SF = ECStack.back();
Type *Ty = I.getType();
if(!(Ty->isVectorTy()))
llvm_unreachable("Unhandled dest type for shufflevector instruction");
GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
GenericValue Src3 = getOperandValue(I.getOperand(2), SF);
GenericValue Dest;
// There is no need to check types of src1 and src2, because the compiled
// bytecode can't contain different types for src1 and src2 for a
// shufflevector instruction.
Type *TyContained = Ty->getContainedType(0);
unsigned src1Size = (unsigned)Src1.AggregateVal.size();
unsigned src2Size = (unsigned)Src2.AggregateVal.size();
unsigned src3Size = (unsigned)Src3.AggregateVal.size();
Dest.AggregateVal.resize(src3Size);
switch (TyContained->getTypeID()) {
default:
llvm_unreachable("Unhandled dest type for insertelement instruction");
break;
case Type::IntegerTyID:
for( unsigned i=0; i<src3Size; i++) {
unsigned j = Src3.AggregateVal[i].IntVal.getZExtValue();
if(j < src1Size)
Dest.AggregateVal[i].IntVal = Src1.AggregateVal[j].IntVal;
else if(j < src1Size + src2Size)
Dest.AggregateVal[i].IntVal = Src2.AggregateVal[j-src1Size].IntVal;
else
// The selector may not be greater than sum of lengths of first and
// second operands and llasm should not allow situation like
// %tmp = shufflevector <2 x i32> <i32 3, i32 4>, <2 x i32> undef,
// <2 x i32> < i32 0, i32 5 >,
// where i32 5 is invalid, but let it be additional check here:
llvm_unreachable("Invalid mask in shufflevector instruction");
}
break;
case Type::FloatTyID:
for( unsigned i=0; i<src3Size; i++) {
unsigned j = Src3.AggregateVal[i].IntVal.getZExtValue();
if(j < src1Size)
Dest.AggregateVal[i].FloatVal = Src1.AggregateVal[j].FloatVal;
else if(j < src1Size + src2Size)
Dest.AggregateVal[i].FloatVal = Src2.AggregateVal[j-src1Size].FloatVal;
else
llvm_unreachable("Invalid mask in shufflevector instruction");
}
break;
case Type::DoubleTyID:
for( unsigned i=0; i<src3Size; i++) {
unsigned j = Src3.AggregateVal[i].IntVal.getZExtValue();
if(j < src1Size)
Dest.AggregateVal[i].DoubleVal = Src1.AggregateVal[j].DoubleVal;
else if(j < src1Size + src2Size)
Dest.AggregateVal[i].DoubleVal =
Src2.AggregateVal[j-src1Size].DoubleVal;
else
llvm_unreachable("Invalid mask in shufflevector instruction");
}
break;
}
SetValue(&I, Dest, SF);
}
void Interpreter::visitExtractValueInst(ExtractValueInst &I) {
ExecutionContext &SF = ECStack.back();
Value *Agg = I.getAggregateOperand();
GenericValue Dest;
GenericValue Src = getOperandValue(Agg, SF);
ExtractValueInst::idx_iterator IdxBegin = I.idx_begin();
unsigned Num = I.getNumIndices();
GenericValue *pSrc = &Src;
for (unsigned i = 0 ; i < Num; ++i) {
pSrc = &pSrc->AggregateVal[*IdxBegin];
++IdxBegin;
}
Type *IndexedType = ExtractValueInst::getIndexedType(Agg->getType(), I.getIndices());
switch (IndexedType->getTypeID()) {
default:
llvm_unreachable("Unhandled dest type for extractelement instruction");
break;
case Type::IntegerTyID:
Dest.IntVal = pSrc->IntVal;
break;
case Type::FloatTyID:
Dest.FloatVal = pSrc->FloatVal;
break;
case Type::DoubleTyID:
Dest.DoubleVal = pSrc->DoubleVal;
break;
case Type::ArrayTyID:
case Type::StructTyID:
case Type::VectorTyID:
Dest.AggregateVal = pSrc->AggregateVal;
break;
case Type::PointerTyID:
Dest.PointerVal = pSrc->PointerVal;
break;
}
SetValue(&I, Dest, SF);
}
void Interpreter::visitInsertValueInst(InsertValueInst &I) {
ExecutionContext &SF = ECStack.back();
Value *Agg = I.getAggregateOperand();
GenericValue Src1 = getOperandValue(Agg, SF);
GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
GenericValue Dest = Src1; // Dest is a slightly changed Src1
ExtractValueInst::idx_iterator IdxBegin = I.idx_begin();
unsigned Num = I.getNumIndices();
GenericValue *pDest = &Dest;
for (unsigned i = 0 ; i < Num; ++i) {
pDest = &pDest->AggregateVal[*IdxBegin];
++IdxBegin;
}
// pDest points to the target value in the Dest now
Type *IndexedType = ExtractValueInst::getIndexedType(Agg->getType(), I.getIndices());
switch (IndexedType->getTypeID()) {
default:
llvm_unreachable("Unhandled dest type for insertelement instruction");
break;
case Type::IntegerTyID:
pDest->IntVal = Src2.IntVal;
break;
case Type::FloatTyID:
pDest->FloatVal = Src2.FloatVal;
break;
case Type::DoubleTyID:
pDest->DoubleVal = Src2.DoubleVal;
break;
case Type::ArrayTyID:
case Type::StructTyID:
case Type::VectorTyID:
pDest->AggregateVal = Src2.AggregateVal;
break;
case Type::PointerTyID:
pDest->PointerVal = Src2.PointerVal;
break;
}
SetValue(&I, Dest, SF);
}
GenericValue Interpreter::getConstantExprValue (ConstantExpr *CE,
ExecutionContext &SF) {
switch (CE->getOpcode()) {
case Instruction::Trunc:
return executeTruncInst(CE->getOperand(0), CE->getType(), SF);
case Instruction::ZExt:
return executeZExtInst(CE->getOperand(0), CE->getType(), SF);
case Instruction::SExt:
return executeSExtInst(CE->getOperand(0), CE->getType(), SF);
case Instruction::FPTrunc:
return executeFPTruncInst(CE->getOperand(0), CE->getType(), SF);
case Instruction::FPExt:
return executeFPExtInst(CE->getOperand(0), CE->getType(), SF);
case Instruction::UIToFP:
return executeUIToFPInst(CE->getOperand(0), CE->getType(), SF);
case Instruction::SIToFP:
return executeSIToFPInst(CE->getOperand(0), CE->getType(), SF);
case Instruction::FPToUI:
return executeFPToUIInst(CE->getOperand(0), CE->getType(), SF);
case Instruction::FPToSI:
return executeFPToSIInst(CE->getOperand(0), CE->getType(), SF);
case Instruction::PtrToInt:
return executePtrToIntInst(CE->getOperand(0), CE->getType(), SF);
case Instruction::IntToPtr:
return executeIntToPtrInst(CE->getOperand(0), CE->getType(), SF);
case Instruction::BitCast:
return executeBitCastInst(CE->getOperand(0), CE->getType(), SF);
case Instruction::GetElementPtr:
return executeGEPOperation(CE->getOperand(0), gep_type_begin(CE),
gep_type_end(CE), SF);
case Instruction::FCmp:
case Instruction::ICmp:
return executeCmpInst(CE->getPredicate(),
getOperandValue(CE->getOperand(0), SF),
getOperandValue(CE->getOperand(1), SF),
CE->getOperand(0)->getType());
case Instruction::Select:
return executeSelectInst(getOperandValue(CE->getOperand(0), SF),
getOperandValue(CE->getOperand(1), SF),
getOperandValue(CE->getOperand(2), SF),
CE->getOperand(0)->getType());
default :
break;
}
// The cases below here require a GenericValue parameter for the result
// so we initialize one, compute it and then return it.
GenericValue Op0 = getOperandValue(CE->getOperand(0), SF);
GenericValue Op1 = getOperandValue(CE->getOperand(1), SF);
GenericValue Dest;
Type * Ty = CE->getOperand(0)->getType();
switch (CE->getOpcode()) {
case Instruction::Add: Dest.IntVal = Op0.IntVal + Op1.IntVal; break;
case Instruction::Sub: Dest.IntVal = Op0.IntVal - Op1.IntVal; break;
case Instruction::Mul: Dest.IntVal = Op0.IntVal * Op1.IntVal; break;
case Instruction::FAdd: executeFAddInst(Dest, Op0, Op1, Ty); break;
case Instruction::FSub: executeFSubInst(Dest, Op0, Op1, Ty); break;
case Instruction::FMul: executeFMulInst(Dest, Op0, Op1, Ty); break;
case Instruction::FDiv: executeFDivInst(Dest, Op0, Op1, Ty); break;
case Instruction::FRem: executeFRemInst(Dest, Op0, Op1, Ty); break;
case Instruction::SDiv: Dest.IntVal = Op0.IntVal.sdiv(Op1.IntVal); break;
case Instruction::UDiv: Dest.IntVal = Op0.IntVal.udiv(Op1.IntVal); break;
case Instruction::URem: Dest.IntVal = Op0.IntVal.urem(Op1.IntVal); break;
case Instruction::SRem: Dest.IntVal = Op0.IntVal.srem(Op1.IntVal); break;
case Instruction::And: Dest.IntVal = Op0.IntVal & Op1.IntVal; break;
case Instruction::Or: Dest.IntVal = Op0.IntVal | Op1.IntVal; break;
case Instruction::Xor: Dest.IntVal = Op0.IntVal ^ Op1.IntVal; break;
case Instruction::Shl:
Dest.IntVal = Op0.IntVal.shl(Op1.IntVal.getZExtValue());
break;
case Instruction::LShr:
Dest.IntVal = Op0.IntVal.lshr(Op1.IntVal.getZExtValue());
break;
case Instruction::AShr:
Dest.IntVal = Op0.IntVal.ashr(Op1.IntVal.getZExtValue());
break;
default:
dbgs() << "Unhandled ConstantExpr: " << *CE << "\n";
llvm_unreachable("Unhandled ConstantExpr");
}
return Dest;
}
GenericValue Interpreter::getOperandValue(Value *V, ExecutionContext &SF) {
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) {
return getConstantExprValue(CE, SF);
} else if (Constant *CPV = dyn_cast<Constant>(V)) {
return getConstantValue(CPV);
} else if (GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
return PTOGV(getPointerToGlobal(GV));
} else {
return SF.Values[V];
}
}
//===----------------------------------------------------------------------===//
// Dispatch and Execution Code
//===----------------------------------------------------------------------===//
//===----------------------------------------------------------------------===//
// callFunction - Execute the specified function...
//
void Interpreter::callFunction(Function *F, ArrayRef<GenericValue> ArgVals) {
assert((ECStack.empty() || !ECStack.back().Caller.getInstruction() ||
ECStack.back().Caller.arg_size() == ArgVals.size()) &&
"Incorrect number of arguments passed into function call!");
// Make a new stack frame... and fill it in.
ECStack.emplace_back();
ExecutionContext &StackFrame = ECStack.back();
StackFrame.CurFunction = F;
// Special handling for external functions.
if (F->isDeclaration()) {
GenericValue Result = callExternalFunction (F, ArgVals);
// Simulate a 'ret' instruction of the appropriate type.
popStackAndReturnValueToCaller (F->getReturnType (), Result);
return;
}
// Get pointers to first LLVM BB & Instruction in function.
StackFrame.CurBB = F->begin();
StackFrame.CurInst = StackFrame.CurBB->begin();
// Run through the function arguments and initialize their values...
assert((ArgVals.size() == F->arg_size() ||
(ArgVals.size() > F->arg_size() && F->getFunctionType()->isVarArg()))&&
"Invalid number of values passed to function invocation!");
// Handle non-varargs arguments...
unsigned i = 0;
for (Function::arg_iterator AI = F->arg_begin(), E = F->arg_end();
AI != E; ++AI, ++i)
SetValue(AI, ArgVals[i], StackFrame);
// Handle varargs arguments...
StackFrame.VarArgs.assign(ArgVals.begin()+i, ArgVals.end());
}
void Interpreter::run() {
while (!ECStack.empty()) {
// Interpret a single instruction & increment the "PC".
ExecutionContext &SF = ECStack.back(); // Current stack frame
Instruction &I = *SF.CurInst++; // Increment before execute
// Track the number of dynamic instructions executed.
++NumDynamicInsts;
DEBUG(dbgs() << "About to interpret: " << I);
visit(I); // Dispatch to one of the visit* methods...
#if 0
// This is not safe, as visiting the instruction could lower it and free I.
DEBUG(
if (!isa<CallInst>(I) && !isa<InvokeInst>(I) &&
I.getType() != Type::VoidTy) {
dbgs() << " --> ";
const GenericValue &Val = SF.Values[&I];
switch (I.getType()->getTypeID()) {
default: llvm_unreachable("Invalid GenericValue Type");
case Type::VoidTyID: dbgs() << "void"; break;
case Type::FloatTyID: dbgs() << "float " << Val.FloatVal; break;
case Type::DoubleTyID: dbgs() << "double " << Val.DoubleVal; break;
case Type::PointerTyID: dbgs() << "void* " << intptr_t(Val.PointerVal);
break;
case Type::IntegerTyID:
dbgs() << "i" << Val.IntVal.getBitWidth() << " "
<< Val.IntVal.toStringUnsigned(10)
<< " (0x" << Val.IntVal.toStringUnsigned(16) << ")\n";
break;
}
});
#endif
}
}
|
0 | repos/DirectXShaderCompiler/lib/ExecutionEngine | repos/DirectXShaderCompiler/lib/ExecutionEngine/Interpreter/Interpreter.cpp | //===- Interpreter.cpp - Top-Level LLVM Interpreter Implementation --------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements the top-level functionality for the LLVM interpreter.
// This interpreter is designed to be a very simple, portable, inefficient
// interpreter.
//
//===----------------------------------------------------------------------===//
#include "Interpreter.h"
#include "llvm/CodeGen/IntrinsicLowering.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Module.h"
#include <cstring>
using namespace llvm;
namespace {
static struct RegisterInterp {
RegisterInterp() { Interpreter::Register(); }
} InterpRegistrator;
}
extern "C" void LLVMLinkInInterpreter() { }
/// Create a new interpreter object.
///
ExecutionEngine *Interpreter::create(std::unique_ptr<Module> M,
std::string *ErrStr) {
// Tell this Module to materialize everything and release the GVMaterializer.
if (std::error_code EC = M->materializeAllPermanently()) {
if (ErrStr)
*ErrStr = EC.message();
// We got an error, just return 0
return nullptr;
}
return new Interpreter(std::move(M));
}
//===----------------------------------------------------------------------===//
// Interpreter ctor - Initialize stuff
//
Interpreter::Interpreter(std::unique_ptr<Module> M)
: ExecutionEngine(std::move(M)), TD(Modules.back().get()) {
memset(&ExitValue.Untyped, 0, sizeof(ExitValue.Untyped));
setDataLayout(&TD);
// Initialize the "backend"
initializeExecutionEngine();
initializeExternalFunctions();
emitGlobals();
IL = new IntrinsicLowering(TD);
}
Interpreter::~Interpreter() {
delete IL;
}
void Interpreter::runAtExitHandlers () {
while (!AtExitHandlers.empty()) {
callFunction(AtExitHandlers.back(), None);
AtExitHandlers.pop_back();
run();
}
}
/// run - Start execution with the specified function and arguments.
///
GenericValue Interpreter::runFunction(Function *F,
ArrayRef<GenericValue> ArgValues) {
assert (F && "Function *F was null at entry to run()");
// Try extra hard not to pass extra args to a function that isn't
// expecting them. C programmers frequently bend the rules and
// declare main() with fewer parameters than it actually gets
// passed, and the interpreter barfs if you pass a function more
// parameters than it is declared to take. This does not attempt to
// take into account gratuitous differences in declared types,
// though.
const size_t ArgCount = F->getFunctionType()->getNumParams();
ArrayRef<GenericValue> ActualArgs =
ArgValues.slice(0, std::min(ArgValues.size(), ArgCount));
// Set up the function call.
callFunction(F, ActualArgs);
// Start executing the function.
run();
return ExitValue;
}
|
0 | repos/DirectXShaderCompiler/lib/ExecutionEngine | repos/DirectXShaderCompiler/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldImpl.h | //===-- RuntimeDyldImpl.h - Run-time dynamic linker for MC-JIT --*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// Interface for the implementations of runtime dynamic linker facilities.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_LIB_EXECUTIONENGINE_RUNTIMEDYLD_RUNTIMEDYLDIMPL_H
#define LLVM_LIB_EXECUTIONENGINE_RUNTIMEDYLD_RUNTIMEDYLDIMPL_H
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/Triple.h"
#include "llvm/ExecutionEngine/RTDyldMemoryManager.h"
#include "llvm/ExecutionEngine/RuntimeDyld.h"
#include "llvm/ExecutionEngine/RuntimeDyldChecker.h"
#include "llvm/Object/ObjectFile.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/Format.h"
#include "llvm/Support/Host.h"
#include "llvm/Support/Mutex.h"
#include "llvm/Support/SwapByteOrder.h"
#include "llvm/Support/raw_ostream.h"
#include <map>
#include <system_error>
using namespace llvm;
using namespace llvm::object;
namespace llvm {
// Helper for extensive error checking in debug builds.
inline std::error_code Check(std::error_code Err) {
if (Err) {
report_fatal_error(Err.message());
}
return Err;
}
class Twine;
/// SectionEntry - represents a section emitted into memory by the dynamic
/// linker.
class SectionEntry {
public:
/// Name - section name.
std::string Name;
/// Address - address in the linker's memory where the section resides.
uint8_t *Address;
/// Size - section size. Doesn't include the stubs.
size_t Size;
/// LoadAddress - the address of the section in the target process's memory.
/// Used for situations in which JIT-ed code is being executed in the address
/// space of a separate process. If the code executes in the same address
/// space where it was JIT-ed, this just equals Address.
uint64_t LoadAddress;
/// StubOffset - used for architectures with stub functions for far
/// relocations (like ARM).
uintptr_t StubOffset;
/// ObjAddress - address of the section in the in-memory object file. Used
/// for calculating relocations in some object formats (like MachO).
uintptr_t ObjAddress;
SectionEntry(StringRef name, uint8_t *address, size_t size,
uintptr_t objAddress)
: Name(name), Address(address), Size(size),
LoadAddress(reinterpret_cast<uintptr_t>(address)), StubOffset(size),
ObjAddress(objAddress) {}
};
/// RelocationEntry - used to represent relocations internally in the dynamic
/// linker.
class RelocationEntry {
public:
/// SectionID - the section this relocation points to.
unsigned SectionID;
/// Offset - offset into the section.
uint64_t Offset;
/// RelType - relocation type.
uint32_t RelType;
/// Addend - the relocation addend encoded in the instruction itself. Also
/// used to make a relocation section relative instead of symbol relative.
int64_t Addend;
struct SectionPair {
uint32_t SectionA;
uint32_t SectionB;
};
/// SymOffset - Section offset of the relocation entry's symbol (used for GOT
/// lookup).
union {
uint64_t SymOffset;
SectionPair Sections;
};
/// True if this is a PCRel relocation (MachO specific).
bool IsPCRel;
/// The size of this relocation (MachO specific).
unsigned Size;
RelocationEntry(unsigned id, uint64_t offset, uint32_t type, int64_t addend)
: SectionID(id), Offset(offset), RelType(type), Addend(addend),
SymOffset(0), IsPCRel(false), Size(0) {}
RelocationEntry(unsigned id, uint64_t offset, uint32_t type, int64_t addend,
uint64_t symoffset)
: SectionID(id), Offset(offset), RelType(type), Addend(addend),
SymOffset(symoffset), IsPCRel(false), Size(0) {}
RelocationEntry(unsigned id, uint64_t offset, uint32_t type, int64_t addend,
bool IsPCRel, unsigned Size)
: SectionID(id), Offset(offset), RelType(type), Addend(addend),
SymOffset(0), IsPCRel(IsPCRel), Size(Size) {}
RelocationEntry(unsigned id, uint64_t offset, uint32_t type, int64_t addend,
unsigned SectionA, uint64_t SectionAOffset, unsigned SectionB,
uint64_t SectionBOffset, bool IsPCRel, unsigned Size)
: SectionID(id), Offset(offset), RelType(type),
Addend(SectionAOffset - SectionBOffset + addend), IsPCRel(IsPCRel),
Size(Size) {
Sections.SectionA = SectionA;
Sections.SectionB = SectionB;
}
};
class RelocationValueRef {
public:
unsigned SectionID;
uint64_t Offset;
int64_t Addend;
const char *SymbolName;
RelocationValueRef() : SectionID(0), Offset(0), Addend(0),
SymbolName(nullptr) {}
inline bool operator==(const RelocationValueRef &Other) const {
return SectionID == Other.SectionID && Offset == Other.Offset &&
Addend == Other.Addend && SymbolName == Other.SymbolName;
}
inline bool operator<(const RelocationValueRef &Other) const {
if (SectionID != Other.SectionID)
return SectionID < Other.SectionID;
if (Offset != Other.Offset)
return Offset < Other.Offset;
if (Addend != Other.Addend)
return Addend < Other.Addend;
return SymbolName < Other.SymbolName;
}
};
/// @brief Symbol info for RuntimeDyld.
class SymbolTableEntry : public JITSymbolBase {
public:
SymbolTableEntry()
: JITSymbolBase(JITSymbolFlags::None), Offset(0), SectionID(0) {}
SymbolTableEntry(unsigned SectionID, uint64_t Offset, JITSymbolFlags Flags)
: JITSymbolBase(Flags), Offset(Offset), SectionID(SectionID) {}
unsigned getSectionID() const { return SectionID; }
uint64_t getOffset() const { return Offset; }
private:
uint64_t Offset;
unsigned SectionID;
};
typedef StringMap<SymbolTableEntry> RTDyldSymbolTable;
class RuntimeDyldImpl {
friend class RuntimeDyld::LoadedObjectInfo;
friend class RuntimeDyldCheckerImpl;
protected:
// The MemoryManager to load objects into.
RuntimeDyld::MemoryManager &MemMgr;
// The symbol resolver to use for external symbols.
RuntimeDyld::SymbolResolver &Resolver;
// Attached RuntimeDyldChecker instance. Null if no instance attached.
RuntimeDyldCheckerImpl *Checker;
// A list of all sections emitted by the dynamic linker. These sections are
// referenced in the code by means of their index in this list - SectionID.
typedef SmallVector<SectionEntry, 64> SectionList;
SectionList Sections;
typedef unsigned SID; // Type for SectionIDs
#define RTDYLD_INVALID_SECTION_ID ((RuntimeDyldImpl::SID)(-1))
// Keep a map of sections from object file to the SectionID which
// references it.
typedef std::map<SectionRef, unsigned> ObjSectionToIDMap;
// A global symbol table for symbols from all loaded modules.
RTDyldSymbolTable GlobalSymbolTable;
// Keep a map of common symbols to their info pairs
typedef std::vector<SymbolRef> CommonSymbolList;
// For each symbol, keep a list of relocations based on it. Anytime
// its address is reassigned (the JIT re-compiled the function, e.g.),
// the relocations get re-resolved.
// The symbol (or section) the relocation is sourced from is the Key
// in the relocation list where it's stored.
typedef SmallVector<RelocationEntry, 64> RelocationList;
// Relocations to sections already loaded. Indexed by SectionID which is the
// source of the address. The target where the address will be written is
// SectionID/Offset in the relocation itself.
DenseMap<unsigned, RelocationList> Relocations;
// Relocations to external symbols that are not yet resolved. Symbols are
// external when they aren't found in the global symbol table of all loaded
// modules. This map is indexed by symbol name.
StringMap<RelocationList> ExternalSymbolRelocations;
typedef std::map<RelocationValueRef, uintptr_t> StubMap;
Triple::ArchType Arch;
bool IsTargetLittleEndian;
bool IsMipsO32ABI;
bool IsMipsN64ABI;
// True if all sections should be passed to the memory manager, false if only
// sections containing relocations should be. Defaults to 'false'.
bool ProcessAllSections;
// This mutex prevents simultaneously loading objects from two different
// threads. This keeps us from having to protect individual data structures
// and guarantees that section allocation requests to the memory manager
// won't be interleaved between modules. It is also used in mapSectionAddress
// and resolveRelocations to protect write access to internal data structures.
//
// loadObject may be called on the same thread during the handling of of
// processRelocations, and that's OK. The handling of the relocation lists
// is written in such a way as to work correctly if new elements are added to
// the end of the list while the list is being processed.
sys::Mutex lock;
virtual unsigned getMaxStubSize() = 0;
virtual unsigned getStubAlignment() = 0;
bool HasError;
std::string ErrorStr;
// Set the error state and record an error string.
bool Error(const Twine &Msg) {
ErrorStr = Msg.str();
HasError = true;
return true;
}
uint64_t getSectionLoadAddress(unsigned SectionID) const {
return Sections[SectionID].LoadAddress;
}
uint8_t *getSectionAddress(unsigned SectionID) const {
return (uint8_t *)Sections[SectionID].Address;
}
void writeInt16BE(uint8_t *Addr, uint16_t Value) {
if (IsTargetLittleEndian)
sys::swapByteOrder(Value);
*Addr = (Value >> 8) & 0xFF;
*(Addr + 1) = Value & 0xFF;
}
void writeInt32BE(uint8_t *Addr, uint32_t Value) {
if (IsTargetLittleEndian)
sys::swapByteOrder(Value);
*Addr = (Value >> 24) & 0xFF;
*(Addr + 1) = (Value >> 16) & 0xFF;
*(Addr + 2) = (Value >> 8) & 0xFF;
*(Addr + 3) = Value & 0xFF;
}
void writeInt64BE(uint8_t *Addr, uint64_t Value) {
if (IsTargetLittleEndian)
sys::swapByteOrder(Value);
*Addr = (Value >> 56) & 0xFF;
*(Addr + 1) = (Value >> 48) & 0xFF;
*(Addr + 2) = (Value >> 40) & 0xFF;
*(Addr + 3) = (Value >> 32) & 0xFF;
*(Addr + 4) = (Value >> 24) & 0xFF;
*(Addr + 5) = (Value >> 16) & 0xFF;
*(Addr + 6) = (Value >> 8) & 0xFF;
*(Addr + 7) = Value & 0xFF;
}
virtual void setMipsABI(const ObjectFile &Obj) {
IsMipsO32ABI = false;
IsMipsN64ABI = false;
}
/// Endian-aware read Read the least significant Size bytes from Src.
uint64_t readBytesUnaligned(uint8_t *Src, unsigned Size) const;
/// Endian-aware write. Write the least significant Size bytes from Value to
/// Dst.
void writeBytesUnaligned(uint64_t Value, uint8_t *Dst, unsigned Size) const;
/// \brief Given the common symbols discovered in the object file, emit a
/// new section for them and update the symbol mappings in the object and
/// symbol table.
void emitCommonSymbols(const ObjectFile &Obj, CommonSymbolList &CommonSymbols);
/// \brief Emits section data from the object file to the MemoryManager.
/// \param IsCode if it's true then allocateCodeSection() will be
/// used for emits, else allocateDataSection() will be used.
/// \return SectionID.
unsigned emitSection(const ObjectFile &Obj, const SectionRef &Section,
bool IsCode);
/// \brief Find Section in LocalSections. If the secton is not found - emit
/// it and store in LocalSections.
/// \param IsCode if it's true then allocateCodeSection() will be
/// used for emmits, else allocateDataSection() will be used.
/// \return SectionID.
unsigned findOrEmitSection(const ObjectFile &Obj, const SectionRef &Section,
bool IsCode, ObjSectionToIDMap &LocalSections);
// \brief Add a relocation entry that uses the given section.
void addRelocationForSection(const RelocationEntry &RE, unsigned SectionID);
// \brief Add a relocation entry that uses the given symbol. This symbol may
// be found in the global symbol table, or it may be external.
void addRelocationForSymbol(const RelocationEntry &RE, StringRef SymbolName);
/// \brief Emits long jump instruction to Addr.
/// \return Pointer to the memory area for emitting target address.
uint8_t *createStubFunction(uint8_t *Addr, unsigned AbiVariant = 0);
/// \brief Resolves relocations from Relocs list with address from Value.
void resolveRelocationList(const RelocationList &Relocs, uint64_t Value);
/// \brief A object file specific relocation resolver
/// \param RE The relocation to be resolved
/// \param Value Target symbol address to apply the relocation action
virtual void resolveRelocation(const RelocationEntry &RE, uint64_t Value) = 0;
/// \brief Parses one or more object file relocations (some object files use
/// relocation pairs) and stores it to Relocations or SymbolRelocations
/// (this depends on the object file type).
/// \return Iterator to the next relocation that needs to be parsed.
virtual relocation_iterator
processRelocationRef(unsigned SectionID, relocation_iterator RelI,
const ObjectFile &Obj, ObjSectionToIDMap &ObjSectionToID,
StubMap &Stubs) = 0;
/// \brief Resolve relocations to external symbols.
void resolveExternalSymbols();
// \brief Compute an upper bound of the memory that is required to load all
// sections
void computeTotalAllocSize(const ObjectFile &Obj, uint64_t &CodeSize,
uint64_t &DataSizeRO, uint64_t &DataSizeRW);
// \brief Compute the stub buffer size required for a section
unsigned computeSectionStubBufSize(const ObjectFile &Obj,
const SectionRef &Section);
// \brief Implementation of the generic part of the loadObject algorithm.
std::pair<unsigned, unsigned> loadObjectImpl(const object::ObjectFile &Obj);
public:
RuntimeDyldImpl(RuntimeDyld::MemoryManager &MemMgr,
RuntimeDyld::SymbolResolver &Resolver)
: MemMgr(MemMgr), Resolver(Resolver), Checker(nullptr),
ProcessAllSections(false), HasError(false) {
}
virtual ~RuntimeDyldImpl();
void setProcessAllSections(bool ProcessAllSections) {
this->ProcessAllSections = ProcessAllSections;
}
void setRuntimeDyldChecker(RuntimeDyldCheckerImpl *Checker) {
this->Checker = Checker;
}
virtual std::unique_ptr<RuntimeDyld::LoadedObjectInfo>
loadObject(const object::ObjectFile &Obj) = 0;
uint8_t* getSymbolLocalAddress(StringRef Name) const {
// FIXME: Just look up as a function for now. Overly simple of course.
// Work in progress.
RTDyldSymbolTable::const_iterator pos = GlobalSymbolTable.find(Name);
if (pos == GlobalSymbolTable.end())
return nullptr;
const auto &SymInfo = pos->second;
return getSectionAddress(SymInfo.getSectionID()) + SymInfo.getOffset();
}
RuntimeDyld::SymbolInfo getSymbol(StringRef Name) const {
// FIXME: Just look up as a function for now. Overly simple of course.
// Work in progress.
RTDyldSymbolTable::const_iterator pos = GlobalSymbolTable.find(Name);
if (pos == GlobalSymbolTable.end())
return nullptr;
const auto &SymEntry = pos->second;
uint64_t TargetAddr =
getSectionLoadAddress(SymEntry.getSectionID()) + SymEntry.getOffset();
return RuntimeDyld::SymbolInfo(TargetAddr, SymEntry.getFlags());
}
void resolveRelocations();
void reassignSectionAddress(unsigned SectionID, uint64_t Addr);
void mapSectionAddress(const void *LocalAddress, uint64_t TargetAddress);
// Is the linker in an error state?
bool hasError() { return HasError; }
// Mark the error condition as handled and continue.
void clearError() { HasError = false; }
// Get the error message.
StringRef getErrorString() { return ErrorStr; }
virtual bool isCompatibleFile(const ObjectFile &Obj) const = 0;
virtual void registerEHFrames();
virtual void deregisterEHFrames();
virtual void finalizeLoad(const ObjectFile &ObjImg,
ObjSectionToIDMap &SectionMap) {}
};
} // end namespace llvm
#endif
|
0 | repos/DirectXShaderCompiler/lib/ExecutionEngine | repos/DirectXShaderCompiler/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldCheckerImpl.h | //===-- RuntimeDyldCheckerImpl.h -- RuntimeDyld test framework --*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_LIB_EXECUTIONENGINE_RUNTIMEDYLD_RUNTIMEDYLDCHECKERIMPL_H
#define LLVM_LIB_EXECUTIONENGINE_RUNTIMEDYLD_RUNTIMEDYLDCHECKERIMPL_H
#include "RuntimeDyldImpl.h"
#include <set>
namespace llvm {
class RuntimeDyldCheckerImpl {
friend class RuntimeDyldChecker;
friend class RuntimeDyldImpl;
friend class RuntimeDyldCheckerExprEval;
friend class RuntimeDyldELF;
public:
RuntimeDyldCheckerImpl(RuntimeDyld &RTDyld, MCDisassembler *Disassembler,
MCInstPrinter *InstPrinter,
llvm::raw_ostream &ErrStream);
bool check(StringRef CheckExpr) const;
bool checkAllRulesInBuffer(StringRef RulePrefix, MemoryBuffer *MemBuf) const;
private:
// StubMap typedefs.
typedef std::map<std::string, uint64_t> StubOffsetsMap;
struct SectionAddressInfo {
uint64_t SectionID;
StubOffsetsMap StubOffsets;
};
typedef std::map<std::string, SectionAddressInfo> SectionMap;
typedef std::map<std::string, SectionMap> StubMap;
RuntimeDyldImpl &getRTDyld() const { return *RTDyld.Dyld; }
bool isSymbolValid(StringRef Symbol) const;
uint64_t getSymbolLocalAddr(StringRef Symbol) const;
uint64_t getSymbolRemoteAddr(StringRef Symbol) const;
uint64_t readMemoryAtAddr(uint64_t Addr, unsigned Size) const;
std::pair<const SectionAddressInfo*, std::string> findSectionAddrInfo(
StringRef FileName,
StringRef SectionName) const;
std::pair<uint64_t, std::string> getSectionAddr(StringRef FileName,
StringRef SectionName,
bool IsInsideLoad) const;
std::pair<uint64_t, std::string> getStubAddrFor(StringRef FileName,
StringRef SectionName,
StringRef Symbol,
bool IsInsideLoad) const;
StringRef getSubsectionStartingAt(StringRef Name) const;
void registerSection(StringRef FilePath, unsigned SectionID);
void registerStubMap(StringRef FilePath, unsigned SectionID,
const RuntimeDyldImpl::StubMap &RTDyldStubs);
RuntimeDyld &RTDyld;
MCDisassembler *Disassembler;
MCInstPrinter *InstPrinter;
llvm::raw_ostream &ErrStream;
StubMap Stubs;
};
}
#endif
|
0 | repos/DirectXShaderCompiler/lib/ExecutionEngine | repos/DirectXShaderCompiler/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldChecker.cpp | //===--- RuntimeDyldChecker.cpp - RuntimeDyld tester framework --*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "llvm/ADT/STLExtras.h"
#include "RuntimeDyldCheckerImpl.h"
#include "RuntimeDyldImpl.h"
#include "llvm/ExecutionEngine/RuntimeDyldChecker.h"
#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCDisassembler.h"
#include "llvm/MC/MCInst.h"
#include "llvm/Support/Path.h"
#include <cctype>
#include <memory>
#define DEBUG_TYPE "rtdyld"
using namespace llvm;
namespace llvm {
// Helper class that implements the language evaluated by RuntimeDyldChecker.
class RuntimeDyldCheckerExprEval {
public:
RuntimeDyldCheckerExprEval(const RuntimeDyldCheckerImpl &Checker,
raw_ostream &ErrStream)
: Checker(Checker) {}
bool evaluate(StringRef Expr) const {
// Expect equality expression of the form 'LHS = RHS'.
Expr = Expr.trim();
size_t EQIdx = Expr.find('=');
ParseContext OutsideLoad(false);
// Evaluate LHS.
StringRef LHSExpr = Expr.substr(0, EQIdx).rtrim();
StringRef RemainingExpr;
EvalResult LHSResult;
std::tie(LHSResult, RemainingExpr) =
evalComplexExpr(evalSimpleExpr(LHSExpr, OutsideLoad), OutsideLoad);
if (LHSResult.hasError())
return handleError(Expr, LHSResult);
if (RemainingExpr != "")
return handleError(Expr, unexpectedToken(RemainingExpr, LHSExpr, ""));
// Evaluate RHS.
StringRef RHSExpr = Expr.substr(EQIdx + 1).ltrim();
EvalResult RHSResult;
std::tie(RHSResult, RemainingExpr) =
evalComplexExpr(evalSimpleExpr(RHSExpr, OutsideLoad), OutsideLoad);
if (RHSResult.hasError())
return handleError(Expr, RHSResult);
if (RemainingExpr != "")
return handleError(Expr, unexpectedToken(RemainingExpr, RHSExpr, ""));
if (LHSResult.getValue() != RHSResult.getValue()) {
Checker.ErrStream << "Expression '" << Expr << "' is false: "
<< format("0x%" PRIx64, LHSResult.getValue())
<< " != " << format("0x%" PRIx64, RHSResult.getValue())
<< "\n";
return false;
}
return true;
}
private:
// RuntimeDyldCheckerExprEval requires some context when parsing exprs. In
// particular, it needs to know whether a symbol is being evaluated in the
// context of a load, in which case we want the linker's local address for
// the symbol, or outside of a load, in which case we want the symbol's
// address in the remote target.
struct ParseContext {
bool IsInsideLoad;
ParseContext(bool IsInsideLoad) : IsInsideLoad(IsInsideLoad) {}
};
const RuntimeDyldCheckerImpl &Checker;
enum class BinOpToken : unsigned {
Invalid,
Add,
Sub,
BitwiseAnd,
BitwiseOr,
ShiftLeft,
ShiftRight
};
class EvalResult {
public:
EvalResult() : Value(0), ErrorMsg("") {}
EvalResult(uint64_t Value) : Value(Value), ErrorMsg("") {}
EvalResult(std::string ErrorMsg) : Value(0), ErrorMsg(ErrorMsg) {}
uint64_t getValue() const { return Value; }
bool hasError() const { return ErrorMsg != ""; }
const std::string &getErrorMsg() const { return ErrorMsg; }
private:
uint64_t Value;
std::string ErrorMsg;
};
StringRef getTokenForError(StringRef Expr) const {
if (Expr.empty())
return "";
StringRef Token, Remaining;
if (isalpha(Expr[0]))
std::tie(Token, Remaining) = parseSymbol(Expr);
else if (isdigit(Expr[0]))
std::tie(Token, Remaining) = parseNumberString(Expr);
else {
unsigned TokLen = 1;
if (Expr.startswith("<<") || Expr.startswith(">>"))
TokLen = 2;
Token = Expr.substr(0, TokLen);
}
return Token;
}
EvalResult unexpectedToken(StringRef TokenStart, StringRef SubExpr,
StringRef ErrText) const {
std::string ErrorMsg("Encountered unexpected token '");
ErrorMsg += getTokenForError(TokenStart);
if (SubExpr != "") {
ErrorMsg += "' while parsing subexpression '";
ErrorMsg += SubExpr;
}
ErrorMsg += "'";
if (ErrText != "") {
ErrorMsg += " ";
ErrorMsg += ErrText;
}
return EvalResult(std::move(ErrorMsg));
}
bool handleError(StringRef Expr, const EvalResult &R) const {
assert(R.hasError() && "Not an error result.");
Checker.ErrStream << "Error evaluating expression '" << Expr
<< "': " << R.getErrorMsg() << "\n";
return false;
}
std::pair<BinOpToken, StringRef> parseBinOpToken(StringRef Expr) const {
if (Expr.empty())
return std::make_pair(BinOpToken::Invalid, "");
// Handle the two 2-character tokens.
if (Expr.startswith("<<"))
return std::make_pair(BinOpToken::ShiftLeft, Expr.substr(2).ltrim());
if (Expr.startswith(">>"))
return std::make_pair(BinOpToken::ShiftRight, Expr.substr(2).ltrim());
// Handle one-character tokens.
BinOpToken Op;
switch (Expr[0]) {
default:
return std::make_pair(BinOpToken::Invalid, Expr);
case '+':
Op = BinOpToken::Add;
break;
case '-':
Op = BinOpToken::Sub;
break;
case '&':
Op = BinOpToken::BitwiseAnd;
break;
case '|':
Op = BinOpToken::BitwiseOr;
break;
}
return std::make_pair(Op, Expr.substr(1).ltrim());
}
EvalResult computeBinOpResult(BinOpToken Op, const EvalResult &LHSResult,
const EvalResult &RHSResult) const {
switch (Op) {
default:
llvm_unreachable("Tried to evaluate unrecognized operation.");
case BinOpToken::Add:
return EvalResult(LHSResult.getValue() + RHSResult.getValue());
case BinOpToken::Sub:
return EvalResult(LHSResult.getValue() - RHSResult.getValue());
case BinOpToken::BitwiseAnd:
return EvalResult(LHSResult.getValue() & RHSResult.getValue());
case BinOpToken::BitwiseOr:
return EvalResult(LHSResult.getValue() | RHSResult.getValue());
case BinOpToken::ShiftLeft:
return EvalResult(LHSResult.getValue() << RHSResult.getValue());
case BinOpToken::ShiftRight:
return EvalResult(LHSResult.getValue() >> RHSResult.getValue());
}
}
// Parse a symbol and return a (string, string) pair representing the symbol
// name and expression remaining to be parsed.
std::pair<StringRef, StringRef> parseSymbol(StringRef Expr) const {
size_t FirstNonSymbol = Expr.find_first_not_of("0123456789"
"abcdefghijklmnopqrstuvwxyz"
"ABCDEFGHIJKLMNOPQRSTUVWXYZ"
":_.$");
return std::make_pair(Expr.substr(0, FirstNonSymbol),
Expr.substr(FirstNonSymbol).ltrim());
}
// Evaluate a call to decode_operand. Decode the instruction operand at the
// given symbol and get the value of the requested operand.
// Returns an error if the instruction cannot be decoded, or the requested
// operand is not an immediate.
// On success, retuns a pair containing the value of the operand, plus
// the expression remaining to be evaluated.
std::pair<EvalResult, StringRef> evalDecodeOperand(StringRef Expr) const {
if (!Expr.startswith("("))
return std::make_pair(unexpectedToken(Expr, Expr, "expected '('"), "");
StringRef RemainingExpr = Expr.substr(1).ltrim();
StringRef Symbol;
std::tie(Symbol, RemainingExpr) = parseSymbol(RemainingExpr);
if (!Checker.isSymbolValid(Symbol))
return std::make_pair(
EvalResult(("Cannot decode unknown symbol '" + Symbol + "'").str()),
"");
if (!RemainingExpr.startswith(","))
return std::make_pair(
unexpectedToken(RemainingExpr, RemainingExpr, "expected ','"), "");
RemainingExpr = RemainingExpr.substr(1).ltrim();
EvalResult OpIdxExpr;
std::tie(OpIdxExpr, RemainingExpr) = evalNumberExpr(RemainingExpr);
if (OpIdxExpr.hasError())
return std::make_pair(OpIdxExpr, "");
if (!RemainingExpr.startswith(")"))
return std::make_pair(
unexpectedToken(RemainingExpr, RemainingExpr, "expected ')'"), "");
RemainingExpr = RemainingExpr.substr(1).ltrim();
MCInst Inst;
uint64_t Size;
if (!decodeInst(Symbol, Inst, Size))
return std::make_pair(
EvalResult(("Couldn't decode instruction at '" + Symbol + "'").str()),
"");
unsigned OpIdx = OpIdxExpr.getValue();
if (OpIdx >= Inst.getNumOperands()) {
std::string ErrMsg;
raw_string_ostream ErrMsgStream(ErrMsg);
ErrMsgStream << "Invalid operand index '" << format("%i", OpIdx)
<< "' for instruction '" << Symbol
<< "'. Instruction has only "
<< format("%i", Inst.getNumOperands())
<< " operands.\nInstruction is:\n ";
Inst.dump_pretty(ErrMsgStream, Checker.InstPrinter);
return std::make_pair(EvalResult(ErrMsgStream.str()), "");
}
const MCOperand &Op = Inst.getOperand(OpIdx);
if (!Op.isImm()) {
std::string ErrMsg;
raw_string_ostream ErrMsgStream(ErrMsg);
ErrMsgStream << "Operand '" << format("%i", OpIdx) << "' of instruction '"
<< Symbol << "' is not an immediate.\nInstruction is:\n ";
Inst.dump_pretty(ErrMsgStream, Checker.InstPrinter);
return std::make_pair(EvalResult(ErrMsgStream.str()), "");
}
return std::make_pair(EvalResult(Op.getImm()), RemainingExpr);
}
// Evaluate a call to next_pc.
// Decode the instruction at the given symbol and return the following program
// counter.
// Returns an error if the instruction cannot be decoded.
// On success, returns a pair containing the next PC, plus of the
// expression remaining to be evaluated.
std::pair<EvalResult, StringRef> evalNextPC(StringRef Expr,
ParseContext PCtx) const {
if (!Expr.startswith("("))
return std::make_pair(unexpectedToken(Expr, Expr, "expected '('"), "");
StringRef RemainingExpr = Expr.substr(1).ltrim();
StringRef Symbol;
std::tie(Symbol, RemainingExpr) = parseSymbol(RemainingExpr);
if (!Checker.isSymbolValid(Symbol))
return std::make_pair(
EvalResult(("Cannot decode unknown symbol '" + Symbol + "'").str()),
"");
if (!RemainingExpr.startswith(")"))
return std::make_pair(
unexpectedToken(RemainingExpr, RemainingExpr, "expected ')'"), "");
RemainingExpr = RemainingExpr.substr(1).ltrim();
MCInst Inst;
uint64_t InstSize;
if (!decodeInst(Symbol, Inst, InstSize))
return std::make_pair(
EvalResult(("Couldn't decode instruction at '" + Symbol + "'").str()),
"");
uint64_t SymbolAddr = PCtx.IsInsideLoad
? Checker.getSymbolLocalAddr(Symbol)
: Checker.getSymbolRemoteAddr(Symbol);
uint64_t NextPC = SymbolAddr + InstSize;
return std::make_pair(EvalResult(NextPC), RemainingExpr);
}
// Evaluate a call to stub_addr.
// Look up and return the address of the stub for the given
// (<file name>, <section name>, <symbol name>) tuple.
// On success, returns a pair containing the stub address, plus the expression
// remaining to be evaluated.
std::pair<EvalResult, StringRef> evalStubAddr(StringRef Expr,
ParseContext PCtx) const {
if (!Expr.startswith("("))
return std::make_pair(unexpectedToken(Expr, Expr, "expected '('"), "");
StringRef RemainingExpr = Expr.substr(1).ltrim();
// Handle file-name specially, as it may contain characters that aren't
// legal for symbols.
StringRef FileName;
size_t ComaIdx = RemainingExpr.find(',');
FileName = RemainingExpr.substr(0, ComaIdx).rtrim();
RemainingExpr = RemainingExpr.substr(ComaIdx).ltrim();
if (!RemainingExpr.startswith(","))
return std::make_pair(
unexpectedToken(RemainingExpr, Expr, "expected ','"), "");
RemainingExpr = RemainingExpr.substr(1).ltrim();
StringRef SectionName;
std::tie(SectionName, RemainingExpr) = parseSymbol(RemainingExpr);
if (!RemainingExpr.startswith(","))
return std::make_pair(
unexpectedToken(RemainingExpr, Expr, "expected ','"), "");
RemainingExpr = RemainingExpr.substr(1).ltrim();
StringRef Symbol;
std::tie(Symbol, RemainingExpr) = parseSymbol(RemainingExpr);
if (!RemainingExpr.startswith(")"))
return std::make_pair(
unexpectedToken(RemainingExpr, Expr, "expected ')'"), "");
RemainingExpr = RemainingExpr.substr(1).ltrim();
uint64_t StubAddr;
std::string ErrorMsg = "";
std::tie(StubAddr, ErrorMsg) = Checker.getStubAddrFor(
FileName, SectionName, Symbol, PCtx.IsInsideLoad);
if (ErrorMsg != "")
return std::make_pair(EvalResult(ErrorMsg), "");
return std::make_pair(EvalResult(StubAddr), RemainingExpr);
}
std::pair<EvalResult, StringRef> evalSectionAddr(StringRef Expr,
ParseContext PCtx) const {
if (!Expr.startswith("("))
return std::make_pair(unexpectedToken(Expr, Expr, "expected '('"), "");
StringRef RemainingExpr = Expr.substr(1).ltrim();
// Handle file-name specially, as it may contain characters that aren't
// legal for symbols.
StringRef FileName;
size_t ComaIdx = RemainingExpr.find(',');
FileName = RemainingExpr.substr(0, ComaIdx).rtrim();
RemainingExpr = RemainingExpr.substr(ComaIdx).ltrim();
if (!RemainingExpr.startswith(","))
return std::make_pair(
unexpectedToken(RemainingExpr, Expr, "expected ','"), "");
RemainingExpr = RemainingExpr.substr(1).ltrim();
StringRef SectionName;
std::tie(SectionName, RemainingExpr) = parseSymbol(RemainingExpr);
if (!RemainingExpr.startswith(")"))
return std::make_pair(
unexpectedToken(RemainingExpr, Expr, "expected ')'"), "");
RemainingExpr = RemainingExpr.substr(1).ltrim();
uint64_t StubAddr;
std::string ErrorMsg = "";
std::tie(StubAddr, ErrorMsg) = Checker.getSectionAddr(
FileName, SectionName, PCtx.IsInsideLoad);
if (ErrorMsg != "")
return std::make_pair(EvalResult(ErrorMsg), "");
return std::make_pair(EvalResult(StubAddr), RemainingExpr);
}
// Evaluate an identiefer expr, which may be a symbol, or a call to
// one of the builtin functions: get_insn_opcode or get_insn_length.
// Return the result, plus the expression remaining to be parsed.
std::pair<EvalResult, StringRef> evalIdentifierExpr(StringRef Expr,
ParseContext PCtx) const {
StringRef Symbol;
StringRef RemainingExpr;
std::tie(Symbol, RemainingExpr) = parseSymbol(Expr);
// Check for builtin function calls.
if (Symbol == "decode_operand")
return evalDecodeOperand(RemainingExpr);
else if (Symbol == "next_pc")
return evalNextPC(RemainingExpr, PCtx);
else if (Symbol == "stub_addr")
return evalStubAddr(RemainingExpr, PCtx);
else if (Symbol == "section_addr")
return evalSectionAddr(RemainingExpr, PCtx);
if (!Checker.isSymbolValid(Symbol)) {
std::string ErrMsg("No known address for symbol '");
ErrMsg += Symbol;
ErrMsg += "'";
if (Symbol.startswith("L"))
ErrMsg += " (this appears to be an assembler local label - "
" perhaps drop the 'L'?)";
return std::make_pair(EvalResult(ErrMsg), "");
}
// The value for the symbol depends on the context we're evaluating in:
// Inside a load this is the address in the linker's memory, outside a
// load it's the address in the target processes memory.
uint64_t Value = PCtx.IsInsideLoad ? Checker.getSymbolLocalAddr(Symbol)
: Checker.getSymbolRemoteAddr(Symbol);
// Looks like a plain symbol reference.
return std::make_pair(EvalResult(Value), RemainingExpr);
}
// Parse a number (hexadecimal or decimal) and return a (string, string)
// pair representing the number and the expression remaining to be parsed.
std::pair<StringRef, StringRef> parseNumberString(StringRef Expr) const {
size_t FirstNonDigit = StringRef::npos;
if (Expr.startswith("0x")) {
FirstNonDigit = Expr.find_first_not_of("0123456789abcdefABCDEF", 2);
if (FirstNonDigit == StringRef::npos)
FirstNonDigit = Expr.size();
} else {
FirstNonDigit = Expr.find_first_not_of("0123456789");
if (FirstNonDigit == StringRef::npos)
FirstNonDigit = Expr.size();
}
return std::make_pair(Expr.substr(0, FirstNonDigit),
Expr.substr(FirstNonDigit));
}
// Evaluate a constant numeric expression (hexidecimal or decimal) and
// return a pair containing the result, and the expression remaining to be
// evaluated.
std::pair<EvalResult, StringRef> evalNumberExpr(StringRef Expr) const {
StringRef ValueStr;
StringRef RemainingExpr;
std::tie(ValueStr, RemainingExpr) = parseNumberString(Expr);
if (ValueStr.empty() || !isdigit(ValueStr[0]))
return std::make_pair(
unexpectedToken(RemainingExpr, RemainingExpr, "expected number"), "");
uint64_t Value;
ValueStr.getAsInteger(0, Value);
return std::make_pair(EvalResult(Value), RemainingExpr);
}
// Evaluate an expression of the form "(<expr>)" and return a pair
// containing the result of evaluating <expr>, plus the expression
// remaining to be parsed.
std::pair<EvalResult, StringRef> evalParensExpr(StringRef Expr,
ParseContext PCtx) const {
assert(Expr.startswith("(") && "Not a parenthesized expression");
EvalResult SubExprResult;
StringRef RemainingExpr;
std::tie(SubExprResult, RemainingExpr) =
evalComplexExpr(evalSimpleExpr(Expr.substr(1).ltrim(), PCtx), PCtx);
if (SubExprResult.hasError())
return std::make_pair(SubExprResult, "");
if (!RemainingExpr.startswith(")"))
return std::make_pair(
unexpectedToken(RemainingExpr, Expr, "expected ')'"), "");
RemainingExpr = RemainingExpr.substr(1).ltrim();
return std::make_pair(SubExprResult, RemainingExpr);
}
// Evaluate an expression in one of the following forms:
// *{<number>}<expr>
// Return a pair containing the result, plus the expression remaining to be
// parsed.
std::pair<EvalResult, StringRef> evalLoadExpr(StringRef Expr) const {
assert(Expr.startswith("*") && "Not a load expression");
StringRef RemainingExpr = Expr.substr(1).ltrim();
// Parse read size.
if (!RemainingExpr.startswith("{"))
return std::make_pair(EvalResult("Expected '{' following '*'."), "");
RemainingExpr = RemainingExpr.substr(1).ltrim();
EvalResult ReadSizeExpr;
std::tie(ReadSizeExpr, RemainingExpr) = evalNumberExpr(RemainingExpr);
if (ReadSizeExpr.hasError())
return std::make_pair(ReadSizeExpr, RemainingExpr);
uint64_t ReadSize = ReadSizeExpr.getValue();
if (ReadSize < 1 || ReadSize > 8)
return std::make_pair(EvalResult("Invalid size for dereference."), "");
if (!RemainingExpr.startswith("}"))
return std::make_pair(EvalResult("Missing '}' for dereference."), "");
RemainingExpr = RemainingExpr.substr(1).ltrim();
// Evaluate the expression representing the load address.
ParseContext LoadCtx(true);
EvalResult LoadAddrExprResult;
std::tie(LoadAddrExprResult, RemainingExpr) =
evalComplexExpr(evalSimpleExpr(RemainingExpr, LoadCtx), LoadCtx);
if (LoadAddrExprResult.hasError())
return std::make_pair(LoadAddrExprResult, "");
uint64_t LoadAddr = LoadAddrExprResult.getValue();
return std::make_pair(
EvalResult(Checker.readMemoryAtAddr(LoadAddr, ReadSize)),
RemainingExpr);
}
// Evaluate a "simple" expression. This is any expression that _isn't_ an
// un-parenthesized binary expression.
//
// "Simple" expressions can be optionally bit-sliced. See evalSlicedExpr.
//
// Returns a pair containing the result of the evaluation, plus the
// expression remaining to be parsed.
std::pair<EvalResult, StringRef> evalSimpleExpr(StringRef Expr,
ParseContext PCtx) const {
EvalResult SubExprResult;
StringRef RemainingExpr;
if (Expr.empty())
return std::make_pair(EvalResult("Unexpected end of expression"), "");
if (Expr[0] == '(')
std::tie(SubExprResult, RemainingExpr) = evalParensExpr(Expr, PCtx);
else if (Expr[0] == '*')
std::tie(SubExprResult, RemainingExpr) = evalLoadExpr(Expr);
else if (isalpha(Expr[0]) || Expr[0] == '_')
std::tie(SubExprResult, RemainingExpr) = evalIdentifierExpr(Expr, PCtx);
else if (isdigit(Expr[0]))
std::tie(SubExprResult, RemainingExpr) = evalNumberExpr(Expr);
else
return std::make_pair(
unexpectedToken(Expr, Expr,
"expected '(', '*', identifier, or number"), "");
if (SubExprResult.hasError())
return std::make_pair(SubExprResult, RemainingExpr);
// Evaluate bit-slice if present.
if (RemainingExpr.startswith("["))
std::tie(SubExprResult, RemainingExpr) =
evalSliceExpr(std::make_pair(SubExprResult, RemainingExpr));
return std::make_pair(SubExprResult, RemainingExpr);
}
// Evaluate a bit-slice of an expression.
// A bit-slice has the form "<expr>[high:low]". The result of evaluating a
// slice is the bits between high and low (inclusive) in the original
// expression, right shifted so that the "low" bit is in position 0 in the
// result.
// Returns a pair containing the result of the slice operation, plus the
// expression remaining to be parsed.
std::pair<EvalResult, StringRef>
evalSliceExpr(std::pair<EvalResult, StringRef> Ctx) const {
EvalResult SubExprResult;
StringRef RemainingExpr;
std::tie(SubExprResult, RemainingExpr) = Ctx;
assert(RemainingExpr.startswith("[") && "Not a slice expr.");
RemainingExpr = RemainingExpr.substr(1).ltrim();
EvalResult HighBitExpr;
std::tie(HighBitExpr, RemainingExpr) = evalNumberExpr(RemainingExpr);
if (HighBitExpr.hasError())
return std::make_pair(HighBitExpr, RemainingExpr);
if (!RemainingExpr.startswith(":"))
return std::make_pair(
unexpectedToken(RemainingExpr, RemainingExpr, "expected ':'"), "");
RemainingExpr = RemainingExpr.substr(1).ltrim();
EvalResult LowBitExpr;
std::tie(LowBitExpr, RemainingExpr) = evalNumberExpr(RemainingExpr);
if (LowBitExpr.hasError())
return std::make_pair(LowBitExpr, RemainingExpr);
if (!RemainingExpr.startswith("]"))
return std::make_pair(
unexpectedToken(RemainingExpr, RemainingExpr, "expected ']'"), "");
RemainingExpr = RemainingExpr.substr(1).ltrim();
unsigned HighBit = HighBitExpr.getValue();
unsigned LowBit = LowBitExpr.getValue();
uint64_t Mask = ((uint64_t)1 << (HighBit - LowBit + 1)) - 1;
uint64_t SlicedValue = (SubExprResult.getValue() >> LowBit) & Mask;
return std::make_pair(EvalResult(SlicedValue), RemainingExpr);
}
// Evaluate a "complex" expression.
// Takes an already evaluated subexpression and checks for the presence of a
// binary operator, computing the result of the binary operation if one is
// found. Used to make arithmetic expressions left-associative.
// Returns a pair containing the ultimate result of evaluating the
// expression, plus the expression remaining to be evaluated.
std::pair<EvalResult, StringRef>
evalComplexExpr(std::pair<EvalResult, StringRef> LHSAndRemaining,
ParseContext PCtx) const {
EvalResult LHSResult;
StringRef RemainingExpr;
std::tie(LHSResult, RemainingExpr) = LHSAndRemaining;
// If there was an error, or there's nothing left to evaluate, return the
// result.
if (LHSResult.hasError() || RemainingExpr == "")
return std::make_pair(LHSResult, RemainingExpr);
// Otherwise check if this is a binary expressioan.
BinOpToken BinOp;
std::tie(BinOp, RemainingExpr) = parseBinOpToken(RemainingExpr);
// If this isn't a recognized expression just return.
if (BinOp == BinOpToken::Invalid)
return std::make_pair(LHSResult, RemainingExpr);
// This is a recognized bin-op. Evaluate the RHS, then evaluate the binop.
EvalResult RHSResult;
std::tie(RHSResult, RemainingExpr) = evalSimpleExpr(RemainingExpr, PCtx);
// If there was an error evaluating the RHS, return it.
if (RHSResult.hasError())
return std::make_pair(RHSResult, RemainingExpr);
// This is a binary expression - evaluate and try to continue as a
// complex expr.
EvalResult ThisResult(computeBinOpResult(BinOp, LHSResult, RHSResult));
return evalComplexExpr(std::make_pair(ThisResult, RemainingExpr), PCtx);
}
bool decodeInst(StringRef Symbol, MCInst &Inst, uint64_t &Size) const {
MCDisassembler *Dis = Checker.Disassembler;
StringRef SectionMem = Checker.getSubsectionStartingAt(Symbol);
ArrayRef<uint8_t> SectionBytes(
reinterpret_cast<const uint8_t *>(SectionMem.data()),
SectionMem.size());
MCDisassembler::DecodeStatus S =
Dis->getInstruction(Inst, Size, SectionBytes, 0, nulls(), nulls());
return (S == MCDisassembler::Success);
}
};
}
RuntimeDyldCheckerImpl::RuntimeDyldCheckerImpl(RuntimeDyld &RTDyld,
MCDisassembler *Disassembler,
MCInstPrinter *InstPrinter,
raw_ostream &ErrStream)
: RTDyld(RTDyld), Disassembler(Disassembler), InstPrinter(InstPrinter),
ErrStream(ErrStream) {
RTDyld.Checker = this;
}
bool RuntimeDyldCheckerImpl::check(StringRef CheckExpr) const {
CheckExpr = CheckExpr.trim();
DEBUG(dbgs() << "RuntimeDyldChecker: Checking '" << CheckExpr << "'...\n");
RuntimeDyldCheckerExprEval P(*this, ErrStream);
bool Result = P.evaluate(CheckExpr);
(void)Result;
DEBUG(dbgs() << "RuntimeDyldChecker: '" << CheckExpr << "' "
<< (Result ? "passed" : "FAILED") << ".\n");
return Result;
}
bool RuntimeDyldCheckerImpl::checkAllRulesInBuffer(StringRef RulePrefix,
MemoryBuffer *MemBuf) const {
bool DidAllTestsPass = true;
unsigned NumRules = 0;
const char *LineStart = MemBuf->getBufferStart();
// Eat whitespace.
while (LineStart != MemBuf->getBufferEnd() && std::isspace(*LineStart))
++LineStart;
while (LineStart != MemBuf->getBufferEnd() && *LineStart != '\0') {
const char *LineEnd = LineStart;
while (LineEnd != MemBuf->getBufferEnd() && *LineEnd != '\r' &&
*LineEnd != '\n')
++LineEnd;
StringRef Line(LineStart, LineEnd - LineStart);
if (Line.startswith(RulePrefix)) {
DidAllTestsPass &= check(Line.substr(RulePrefix.size()));
++NumRules;
}
// Eat whitespace.
LineStart = LineEnd;
while (LineStart != MemBuf->getBufferEnd() && std::isspace(*LineStart))
++LineStart;
}
return DidAllTestsPass && (NumRules != 0);
}
bool RuntimeDyldCheckerImpl::isSymbolValid(StringRef Symbol) const {
if (getRTDyld().getSymbolLocalAddress(Symbol))
return true;
return !!getRTDyld().Resolver.findSymbol(Symbol);
}
uint64_t RuntimeDyldCheckerImpl::getSymbolLocalAddr(StringRef Symbol) const {
return static_cast<uint64_t>(
reinterpret_cast<uintptr_t>(getRTDyld().getSymbolLocalAddress(Symbol)));
}
uint64_t RuntimeDyldCheckerImpl::getSymbolRemoteAddr(StringRef Symbol) const {
if (auto InternalSymbol = getRTDyld().getSymbol(Symbol))
return InternalSymbol.getAddress();
return getRTDyld().Resolver.findSymbol(Symbol).getAddress();
}
uint64_t RuntimeDyldCheckerImpl::readMemoryAtAddr(uint64_t SrcAddr,
unsigned Size) const {
uintptr_t PtrSizedAddr = static_cast<uintptr_t>(SrcAddr);
assert(PtrSizedAddr == SrcAddr && "Linker memory pointer out-of-range.");
uint8_t *Src = reinterpret_cast<uint8_t*>(PtrSizedAddr);
return getRTDyld().readBytesUnaligned(Src, Size);
}
std::pair<const RuntimeDyldCheckerImpl::SectionAddressInfo*, std::string>
RuntimeDyldCheckerImpl::findSectionAddrInfo(StringRef FileName,
StringRef SectionName) const {
auto SectionMapItr = Stubs.find(FileName);
if (SectionMapItr == Stubs.end()) {
std::string ErrorMsg = "File '";
ErrorMsg += FileName;
ErrorMsg += "' not found. ";
if (Stubs.empty())
ErrorMsg += "No stubs registered.";
else {
ErrorMsg += "Available files are:";
for (const auto& StubEntry : Stubs) {
ErrorMsg += " '";
ErrorMsg += StubEntry.first;
ErrorMsg += "'";
}
}
ErrorMsg += "\n";
return std::make_pair(nullptr, ErrorMsg);
}
auto SectionInfoItr = SectionMapItr->second.find(SectionName);
if (SectionInfoItr == SectionMapItr->second.end())
return std::make_pair(nullptr,
("Section '" + SectionName + "' not found in file '" +
FileName + "'\n").str());
return std::make_pair(&SectionInfoItr->second, std::string(""));
}
std::pair<uint64_t, std::string> RuntimeDyldCheckerImpl::getSectionAddr(
StringRef FileName, StringRef SectionName, bool IsInsideLoad) const {
const SectionAddressInfo *SectionInfo = nullptr;
{
std::string ErrorMsg;
std::tie(SectionInfo, ErrorMsg) =
findSectionAddrInfo(FileName, SectionName);
if (ErrorMsg != "")
return std::make_pair(0, ErrorMsg);
}
unsigned SectionID = SectionInfo->SectionID;
uint64_t Addr;
if (IsInsideLoad)
Addr =
static_cast<uint64_t>(
reinterpret_cast<uintptr_t>(getRTDyld().Sections[SectionID].Address));
else
Addr = getRTDyld().Sections[SectionID].LoadAddress;
return std::make_pair(Addr, std::string(""));
}
std::pair<uint64_t, std::string> RuntimeDyldCheckerImpl::getStubAddrFor(
StringRef FileName, StringRef SectionName, StringRef SymbolName,
bool IsInsideLoad) const {
const SectionAddressInfo *SectionInfo = nullptr;
{
std::string ErrorMsg;
std::tie(SectionInfo, ErrorMsg) =
findSectionAddrInfo(FileName, SectionName);
if (ErrorMsg != "")
return std::make_pair(0, ErrorMsg);
}
unsigned SectionID = SectionInfo->SectionID;
const StubOffsetsMap &SymbolStubs = SectionInfo->StubOffsets;
auto StubOffsetItr = SymbolStubs.find(SymbolName);
if (StubOffsetItr == SymbolStubs.end())
return std::make_pair(0,
("Stub for symbol '" + SymbolName + "' not found. "
"If '" + SymbolName + "' is an internal symbol this "
"may indicate that the stub target offset is being "
"computed incorrectly.\n").str());
uint64_t StubOffset = StubOffsetItr->second;
uint64_t Addr;
if (IsInsideLoad) {
uintptr_t SectionBase =
reinterpret_cast<uintptr_t>(getRTDyld().Sections[SectionID].Address);
Addr = static_cast<uint64_t>(SectionBase) + StubOffset;
} else {
uint64_t SectionBase = getRTDyld().Sections[SectionID].LoadAddress;
Addr = SectionBase + StubOffset;
}
return std::make_pair(Addr, std::string(""));
}
StringRef
RuntimeDyldCheckerImpl::getSubsectionStartingAt(StringRef Name) const {
RTDyldSymbolTable::const_iterator pos =
getRTDyld().GlobalSymbolTable.find(Name);
if (pos == getRTDyld().GlobalSymbolTable.end())
return StringRef();
const auto &SymInfo = pos->second;
uint8_t *SectionAddr = getRTDyld().getSectionAddress(SymInfo.getSectionID());
return StringRef(reinterpret_cast<const char *>(SectionAddr) +
SymInfo.getOffset(),
getRTDyld().Sections[SymInfo.getSectionID()].Size -
SymInfo.getOffset());
}
void RuntimeDyldCheckerImpl::registerSection(
StringRef FilePath, unsigned SectionID) {
StringRef FileName = sys::path::filename(FilePath);
const SectionEntry &Section = getRTDyld().Sections[SectionID];
StringRef SectionName = Section.Name;
Stubs[FileName][SectionName].SectionID = SectionID;
}
void RuntimeDyldCheckerImpl::registerStubMap(
StringRef FilePath, unsigned SectionID,
const RuntimeDyldImpl::StubMap &RTDyldStubs) {
StringRef FileName = sys::path::filename(FilePath);
const SectionEntry &Section = getRTDyld().Sections[SectionID];
StringRef SectionName = Section.Name;
Stubs[FileName][SectionName].SectionID = SectionID;
for (auto &StubMapEntry : RTDyldStubs) {
std::string SymbolName = "";
if (StubMapEntry.first.SymbolName)
SymbolName = StubMapEntry.first.SymbolName;
else {
// If this is a (Section, Offset) pair, do a reverse lookup in the
// global symbol table to find the name.
for (auto &GSTEntry : getRTDyld().GlobalSymbolTable) {
const auto &SymInfo = GSTEntry.second;
if (SymInfo.getSectionID() == StubMapEntry.first.SectionID &&
SymInfo.getOffset() ==
static_cast<uint64_t>(StubMapEntry.first.Offset)) {
SymbolName = GSTEntry.first();
break;
}
}
}
if (SymbolName != "")
Stubs[FileName][SectionName].StubOffsets[SymbolName] =
StubMapEntry.second;
}
}
RuntimeDyldChecker::RuntimeDyldChecker(RuntimeDyld &RTDyld,
MCDisassembler *Disassembler,
MCInstPrinter *InstPrinter,
raw_ostream &ErrStream)
: Impl(make_unique<RuntimeDyldCheckerImpl>(RTDyld, Disassembler,
InstPrinter, ErrStream)) {}
RuntimeDyldChecker::~RuntimeDyldChecker() {}
RuntimeDyld& RuntimeDyldChecker::getRTDyld() {
return Impl->RTDyld;
}
const RuntimeDyld& RuntimeDyldChecker::getRTDyld() const {
return Impl->RTDyld;
}
bool RuntimeDyldChecker::check(StringRef CheckExpr) const {
return Impl->check(CheckExpr);
}
bool RuntimeDyldChecker::checkAllRulesInBuffer(StringRef RulePrefix,
MemoryBuffer *MemBuf) const {
return Impl->checkAllRulesInBuffer(RulePrefix, MemBuf);
}
std::pair<uint64_t, std::string>
RuntimeDyldChecker::getSectionAddr(StringRef FileName, StringRef SectionName,
bool LocalAddress) {
return Impl->getSectionAddr(FileName, SectionName, LocalAddress);
}
|
0 | repos/DirectXShaderCompiler/lib/ExecutionEngine | repos/DirectXShaderCompiler/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldCOFF.h | //===-- RuntimeDyldCOFF.h - Run-time dynamic linker for MC-JIT ---*- C++ -*-==//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// COFF support for MC-JIT runtime dynamic linker.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_RUNTIME_DYLD_COFF_H
#define LLVM_RUNTIME_DYLD_COFF_H
#include "RuntimeDyldImpl.h"
#include "llvm/ADT/DenseMap.h"
#define DEBUG_TYPE "dyld"
using namespace llvm;
namespace llvm {
// Common base class for COFF dynamic linker support.
// Concrete subclasses for each target can be found in ./Targets.
class RuntimeDyldCOFF : public RuntimeDyldImpl {
public:
std::unique_ptr<RuntimeDyld::LoadedObjectInfo>
loadObject(const object::ObjectFile &Obj) override;
bool isCompatibleFile(const object::ObjectFile &Obj) const override;
static std::unique_ptr<RuntimeDyldCOFF>
create(Triple::ArchType Arch, RuntimeDyld::MemoryManager &MemMgr,
RuntimeDyld::SymbolResolver &Resolver);
protected:
RuntimeDyldCOFF(RuntimeDyld::MemoryManager &MemMgr,
RuntimeDyld::SymbolResolver &Resolver)
: RuntimeDyldImpl(MemMgr, Resolver) {}
uint64_t getSymbolOffset(const SymbolRef &Sym);
};
} // end namespace llvm
#undef DEBUG_TYPE
#endif
|
0 | repos/DirectXShaderCompiler/lib/ExecutionEngine | repos/DirectXShaderCompiler/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldMachO.h | //===-- RuntimeDyldMachO.h - Run-time dynamic linker for MC-JIT ---*- C++ -*-=//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// MachO support for MC-JIT runtime dynamic linker.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_LIB_EXECUTIONENGINE_RUNTIMEDYLD_RUNTIMEDYLDMACHO_H
#define LLVM_LIB_EXECUTIONENGINE_RUNTIMEDYLD_RUNTIMEDYLDMACHO_H
#include "RuntimeDyldImpl.h"
#include "llvm/Object/MachO.h"
#include "llvm/Support/Format.h"
#define DEBUG_TYPE "dyld"
using namespace llvm;
using namespace llvm::object;
namespace llvm {
class RuntimeDyldMachO : public RuntimeDyldImpl {
protected:
struct SectionOffsetPair {
unsigned SectionID;
uint64_t Offset;
};
struct EHFrameRelatedSections {
EHFrameRelatedSections()
: EHFrameSID(RTDYLD_INVALID_SECTION_ID),
TextSID(RTDYLD_INVALID_SECTION_ID),
ExceptTabSID(RTDYLD_INVALID_SECTION_ID) {}
EHFrameRelatedSections(SID EH, SID T, SID Ex)
: EHFrameSID(EH), TextSID(T), ExceptTabSID(Ex) {}
SID EHFrameSID;
SID TextSID;
SID ExceptTabSID;
};
// When a module is loaded we save the SectionID of the EH frame section
// in a table until we receive a request to register all unregistered
// EH frame sections with the memory manager.
SmallVector<EHFrameRelatedSections, 2> UnregisteredEHFrameSections;
RuntimeDyldMachO(RuntimeDyld::MemoryManager &MemMgr,
RuntimeDyld::SymbolResolver &Resolver)
: RuntimeDyldImpl(MemMgr, Resolver) {}
/// This convenience method uses memcpy to extract a contiguous addend (the
/// addend size and offset are taken from the corresponding fields of the RE).
int64_t memcpyAddend(const RelocationEntry &RE) const;
/// Given a relocation_iterator for a non-scattered relocation, construct a
/// RelocationEntry and fill in the common fields. The 'Addend' field is *not*
/// filled in, since immediate encodings are highly target/opcode specific.
/// For targets/opcodes with simple, contiguous immediates (e.g. X86) the
/// memcpyAddend method can be used to read the immediate.
RelocationEntry getRelocationEntry(unsigned SectionID,
const ObjectFile &BaseTObj,
const relocation_iterator &RI) const {
const MachOObjectFile &Obj =
static_cast<const MachOObjectFile &>(BaseTObj);
MachO::any_relocation_info RelInfo =
Obj.getRelocation(RI->getRawDataRefImpl());
bool IsPCRel = Obj.getAnyRelocationPCRel(RelInfo);
unsigned Size = Obj.getAnyRelocationLength(RelInfo);
uint64_t Offset = RI->getOffset();
MachO::RelocationInfoType RelType =
static_cast<MachO::RelocationInfoType>(Obj.getAnyRelocationType(RelInfo));
return RelocationEntry(SectionID, Offset, RelType, 0, IsPCRel, Size);
}
/// Construct a RelocationValueRef representing the relocation target.
/// For Symbols in known sections, this will return a RelocationValueRef
/// representing a (SectionID, Offset) pair.
/// For Symbols whose section is not known, this will return a
/// (SymbolName, Offset) pair, where the Offset is taken from the instruction
/// immediate (held in RE.Addend).
/// In both cases the Addend field is *NOT* fixed up to be PC-relative. That
/// should be done by the caller where appropriate by calling makePCRel on
/// the RelocationValueRef.
RelocationValueRef getRelocationValueRef(const ObjectFile &BaseTObj,
const relocation_iterator &RI,
const RelocationEntry &RE,
ObjSectionToIDMap &ObjSectionToID);
/// Make the RelocationValueRef addend PC-relative.
void makeValueAddendPCRel(RelocationValueRef &Value,
const relocation_iterator &RI,
unsigned OffsetToNextPC);
/// Dump information about the relocation entry (RE) and resolved value.
void dumpRelocationToResolve(const RelocationEntry &RE, uint64_t Value) const;
// Return a section iterator for the section containing the given address.
static section_iterator getSectionByAddress(const MachOObjectFile &Obj,
uint64_t Addr);
// Populate __pointers section.
void populateIndirectSymbolPointersSection(const MachOObjectFile &Obj,
const SectionRef &PTSection,
unsigned PTSectionID);
public:
/// Create a RuntimeDyldMachO instance for the given target architecture.
static std::unique_ptr<RuntimeDyldMachO>
create(Triple::ArchType Arch,
RuntimeDyld::MemoryManager &MemMgr,
RuntimeDyld::SymbolResolver &Resolver);
std::unique_ptr<RuntimeDyld::LoadedObjectInfo>
loadObject(const object::ObjectFile &O) override;
SectionEntry &getSection(unsigned SectionID) { return Sections[SectionID]; }
bool isCompatibleFile(const object::ObjectFile &Obj) const override;
};
/// RuntimeDyldMachOTarget - Templated base class for generic MachO linker
/// algorithms and data structures.
///
/// Concrete, target specific sub-classes can be accessed via the impl()
/// methods. (i.e. the RuntimeDyldMachO hierarchy uses the Curiously
/// Recurring Template Idiom). Concrete subclasses for each target
/// can be found in ./Targets.
template <typename Impl>
class RuntimeDyldMachOCRTPBase : public RuntimeDyldMachO {
private:
Impl &impl() { return static_cast<Impl &>(*this); }
const Impl &impl() const { return static_cast<const Impl &>(*this); }
unsigned char *processFDE(unsigned char *P, int64_t DeltaForText,
int64_t DeltaForEH);
public:
RuntimeDyldMachOCRTPBase(RuntimeDyld::MemoryManager &MemMgr,
RuntimeDyld::SymbolResolver &Resolver)
: RuntimeDyldMachO(MemMgr, Resolver) {}
void finalizeLoad(const ObjectFile &Obj,
ObjSectionToIDMap &SectionMap) override;
void registerEHFrames() override;
};
} // end namespace llvm
#undef DEBUG_TYPE
#endif
|
0 | repos/DirectXShaderCompiler/lib/ExecutionEngine | repos/DirectXShaderCompiler/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldCOFF.cpp | //===-- RuntimeDyldCOFF.cpp - Run-time dynamic linker for MC-JIT -*- C++ -*-==//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// Implementation of COFF support for the MC-JIT runtime dynamic linker.
//
//===----------------------------------------------------------------------===//
#include "RuntimeDyldCOFF.h"
#include "Targets/RuntimeDyldCOFFX86_64.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/Triple.h"
#include "llvm/Object/ObjectFile.h"
using namespace llvm;
using namespace llvm::object;
#define DEBUG_TYPE "dyld"
namespace {
class LoadedCOFFObjectInfo
: public RuntimeDyld::LoadedObjectInfoHelper<LoadedCOFFObjectInfo> {
public:
LoadedCOFFObjectInfo(RuntimeDyldImpl &RTDyld, unsigned BeginIdx,
unsigned EndIdx)
: LoadedObjectInfoHelper(RTDyld, BeginIdx, EndIdx) {}
OwningBinary<ObjectFile>
getObjectForDebug(const ObjectFile &Obj) const override {
return OwningBinary<ObjectFile>();
}
};
}
namespace llvm {
std::unique_ptr<RuntimeDyldCOFF>
llvm::RuntimeDyldCOFF::create(Triple::ArchType Arch,
RuntimeDyld::MemoryManager &MemMgr,
RuntimeDyld::SymbolResolver &Resolver) {
switch (Arch) {
default:
llvm_unreachable("Unsupported target for RuntimeDyldCOFF.");
break;
case Triple::x86_64:
return make_unique<RuntimeDyldCOFFX86_64>(MemMgr, Resolver);
}
}
std::unique_ptr<RuntimeDyld::LoadedObjectInfo>
RuntimeDyldCOFF::loadObject(const object::ObjectFile &O) {
unsigned SectionStartIdx, SectionEndIdx;
std::tie(SectionStartIdx, SectionEndIdx) = loadObjectImpl(O);
return llvm::make_unique<LoadedCOFFObjectInfo>(*this, SectionStartIdx,
SectionEndIdx);
}
uint64_t RuntimeDyldCOFF::getSymbolOffset(const SymbolRef &Sym) {
// The value in a relocatable COFF object is the offset.
return Sym.getValue();
}
bool RuntimeDyldCOFF::isCompatibleFile(const object::ObjectFile &Obj) const {
return Obj.isCOFF();
}
} // namespace llvm
|
0 | repos/DirectXShaderCompiler/lib/ExecutionEngine | repos/DirectXShaderCompiler/lib/ExecutionEngine/RuntimeDyld/RuntimeDyld.cpp | //===-- RuntimeDyld.cpp - Run-time dynamic linker for MC-JIT ----*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// Implementation of the MC-JIT runtime dynamic linker.
//
//===----------------------------------------------------------------------===//
#include "llvm/ExecutionEngine/RuntimeDyld.h"
#include "RuntimeDyldCheckerImpl.h"
#include "RuntimeDyldCOFF.h"
#include "RuntimeDyldELF.h"
#include "RuntimeDyldImpl.h"
#include "RuntimeDyldMachO.h"
#include "llvm/Object/ELFObjectFile.h"
#include "llvm/Object/COFF.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/MutexGuard.h"
using namespace llvm;
using namespace llvm::object;
#define DEBUG_TYPE "dyld"
// Empty out-of-line virtual destructor as the key function.
RuntimeDyldImpl::~RuntimeDyldImpl() {}
// Pin LoadedObjectInfo's vtables to this file.
void RuntimeDyld::LoadedObjectInfo::anchor() {}
namespace llvm {
void RuntimeDyldImpl::registerEHFrames() {}
void RuntimeDyldImpl::deregisterEHFrames() {}
#ifndef NDEBUG
static void dumpSectionMemory(const SectionEntry &S, StringRef State) {
dbgs() << "----- Contents of section " << S.Name << " " << State << " -----";
if (S.Address == nullptr) {
dbgs() << "\n <section not emitted>\n";
return;
}
const unsigned ColsPerRow = 16;
uint8_t *DataAddr = S.Address;
uint64_t LoadAddr = S.LoadAddress;
unsigned StartPadding = LoadAddr & (ColsPerRow - 1);
unsigned BytesRemaining = S.Size;
if (StartPadding) {
dbgs() << "\n" << format("0x%016" PRIx64,
LoadAddr & ~(uint64_t)(ColsPerRow - 1)) << ":";
while (StartPadding--)
dbgs() << " ";
}
while (BytesRemaining > 0) {
if ((LoadAddr & (ColsPerRow - 1)) == 0)
dbgs() << "\n" << format("0x%016" PRIx64, LoadAddr) << ":";
dbgs() << " " << format("%02x", *DataAddr);
++DataAddr;
++LoadAddr;
--BytesRemaining;
}
dbgs() << "\n";
}
#endif
// Resolve the relocations for all symbols we currently know about.
void RuntimeDyldImpl::resolveRelocations() {
MutexGuard locked(lock);
// First, resolve relocations associated with external symbols.
resolveExternalSymbols();
// Just iterate over the sections we have and resolve all the relocations
// in them. Gross overkill, but it gets the job done.
for (int i = 0, e = Sections.size(); i != e; ++i) {
// The Section here (Sections[i]) refers to the section in which the
// symbol for the relocation is located. The SectionID in the relocation
// entry provides the section to which the relocation will be applied.
uint64_t Addr = Sections[i].LoadAddress;
DEBUG(dbgs() << "Resolving relocations Section #" << i << "\t"
<< format("%p", (uintptr_t)Addr) << "\n");
DEBUG(dumpSectionMemory(Sections[i], "before relocations"));
resolveRelocationList(Relocations[i], Addr);
DEBUG(dumpSectionMemory(Sections[i], "after relocations"));
Relocations.erase(i);
}
}
void RuntimeDyldImpl::mapSectionAddress(const void *LocalAddress,
uint64_t TargetAddress) {
MutexGuard locked(lock);
for (unsigned i = 0, e = Sections.size(); i != e; ++i) {
if (Sections[i].Address == LocalAddress) {
reassignSectionAddress(i, TargetAddress);
return;
}
}
llvm_unreachable("Attempting to remap address of unknown section!");
}
static std::error_code getOffset(const SymbolRef &Sym, SectionRef Sec,
uint64_t &Result) {
ErrorOr<uint64_t> AddressOrErr = Sym.getAddress();
if (std::error_code EC = AddressOrErr.getError())
return EC;
Result = *AddressOrErr - Sec.getAddress();
return std::error_code();
}
std::pair<unsigned, unsigned>
RuntimeDyldImpl::loadObjectImpl(const object::ObjectFile &Obj) {
MutexGuard locked(lock);
// Grab the first Section ID. We'll use this later to construct the underlying
// range for the returned LoadedObjectInfo.
unsigned SectionsAddedBeginIdx = Sections.size();
// Save information about our target
Arch = (Triple::ArchType)Obj.getArch();
IsTargetLittleEndian = Obj.isLittleEndian();
setMipsABI(Obj);
// Compute the memory size required to load all sections to be loaded
// and pass this information to the memory manager
if (MemMgr.needsToReserveAllocationSpace()) {
uint64_t CodeSize = 0, DataSizeRO = 0, DataSizeRW = 0;
computeTotalAllocSize(Obj, CodeSize, DataSizeRO, DataSizeRW);
MemMgr.reserveAllocationSpace(CodeSize, DataSizeRO, DataSizeRW);
}
// Used sections from the object file
ObjSectionToIDMap LocalSections;
// Common symbols requiring allocation, with their sizes and alignments
CommonSymbolList CommonSymbols;
// Parse symbols
DEBUG(dbgs() << "Parse symbols:\n");
for (symbol_iterator I = Obj.symbol_begin(), E = Obj.symbol_end(); I != E;
++I) {
uint32_t Flags = I->getFlags();
bool IsCommon = Flags & SymbolRef::SF_Common;
if (IsCommon)
CommonSymbols.push_back(*I);
else {
object::SymbolRef::Type SymType = I->getType();
if (SymType == object::SymbolRef::ST_Function ||
SymType == object::SymbolRef::ST_Data ||
SymType == object::SymbolRef::ST_Unknown) {
ErrorOr<StringRef> NameOrErr = I->getName();
Check(NameOrErr.getError());
StringRef Name = *NameOrErr;
section_iterator SI = Obj.section_end();
Check(I->getSection(SI));
if (SI == Obj.section_end())
continue;
uint64_t SectOffset;
Check(getOffset(*I, *SI, SectOffset));
StringRef SectionData;
Check(SI->getContents(SectionData));
bool IsCode = SI->isText();
unsigned SectionID =
findOrEmitSection(Obj, *SI, IsCode, LocalSections);
DEBUG(dbgs() << "\tType: " << SymType << " Name: " << Name
<< " SID: " << SectionID << " Offset: "
<< format("%p", (uintptr_t)SectOffset)
<< " flags: " << Flags << "\n");
JITSymbolFlags RTDyldSymFlags = JITSymbolFlags::None;
if (Flags & SymbolRef::SF_Weak)
RTDyldSymFlags |= JITSymbolFlags::Weak;
if (Flags & SymbolRef::SF_Exported)
RTDyldSymFlags |= JITSymbolFlags::Exported;
GlobalSymbolTable[Name] =
SymbolTableEntry(SectionID, SectOffset, RTDyldSymFlags);
}
}
}
// Allocate common symbols
emitCommonSymbols(Obj, CommonSymbols);
// Parse and process relocations
DEBUG(dbgs() << "Parse relocations:\n");
for (section_iterator SI = Obj.section_begin(), SE = Obj.section_end();
SI != SE; ++SI) {
unsigned SectionID = 0;
StubMap Stubs;
section_iterator RelocatedSection = SI->getRelocatedSection();
if (RelocatedSection == SE)
continue;
relocation_iterator I = SI->relocation_begin();
relocation_iterator E = SI->relocation_end();
if (I == E && !ProcessAllSections)
continue;
bool IsCode = RelocatedSection->isText();
SectionID =
findOrEmitSection(Obj, *RelocatedSection, IsCode, LocalSections);
DEBUG(dbgs() << "\tSectionID: " << SectionID << "\n");
for (; I != E;)
I = processRelocationRef(SectionID, I, Obj, LocalSections, Stubs);
// If there is an attached checker, notify it about the stubs for this
// section so that they can be verified.
if (Checker)
Checker->registerStubMap(Obj.getFileName(), SectionID, Stubs);
}
// Give the subclasses a chance to tie-up any loose ends.
finalizeLoad(Obj, LocalSections);
unsigned SectionsAddedEndIdx = Sections.size();
return std::make_pair(SectionsAddedBeginIdx, SectionsAddedEndIdx);
}
// A helper method for computeTotalAllocSize.
// Computes the memory size required to allocate sections with the given sizes,
// assuming that all sections are allocated with the given alignment
static uint64_t
computeAllocationSizeForSections(std::vector<uint64_t> &SectionSizes,
uint64_t Alignment) {
uint64_t TotalSize = 0;
for (size_t Idx = 0, Cnt = SectionSizes.size(); Idx < Cnt; Idx++) {
uint64_t AlignedSize =
(SectionSizes[Idx] + Alignment - 1) / Alignment * Alignment;
TotalSize += AlignedSize;
}
return TotalSize;
}
static bool isRequiredForExecution(const SectionRef Section) {
const ObjectFile *Obj = Section.getObject();
if (isa<object::ELFObjectFileBase>(Obj))
return ELFSectionRef(Section).getFlags() & ELF::SHF_ALLOC;
if (auto *COFFObj = dyn_cast<object::COFFObjectFile>(Obj)) {
const coff_section *CoffSection = COFFObj->getCOFFSection(Section);
// Avoid loading zero-sized COFF sections.
// In PE files, VirtualSize gives the section size, and SizeOfRawData
// may be zero for sections with content. In Obj files, SizeOfRawData
// gives the section size, and VirtualSize is always zero. Hence
// the need to check for both cases below.
bool HasContent = (CoffSection->VirtualSize > 0)
|| (CoffSection->SizeOfRawData > 0);
bool IsDiscardable = CoffSection->Characteristics &
(COFF::IMAGE_SCN_MEM_DISCARDABLE | COFF::IMAGE_SCN_LNK_INFO);
return HasContent && !IsDiscardable;
}
assert(isa<MachOObjectFile>(Obj));
return true;
}
static bool isReadOnlyData(const SectionRef Section) {
const ObjectFile *Obj = Section.getObject();
if (isa<object::ELFObjectFileBase>(Obj))
return !(ELFSectionRef(Section).getFlags() &
(ELF::SHF_WRITE | ELF::SHF_EXECINSTR));
if (auto *COFFObj = dyn_cast<object::COFFObjectFile>(Obj))
return ((COFFObj->getCOFFSection(Section)->Characteristics &
(COFF::IMAGE_SCN_CNT_INITIALIZED_DATA
| COFF::IMAGE_SCN_MEM_READ
| COFF::IMAGE_SCN_MEM_WRITE))
==
(COFF::IMAGE_SCN_CNT_INITIALIZED_DATA
| COFF::IMAGE_SCN_MEM_READ));
assert(isa<MachOObjectFile>(Obj));
return false;
}
static bool isZeroInit(const SectionRef Section) {
const ObjectFile *Obj = Section.getObject();
if (isa<object::ELFObjectFileBase>(Obj))
return ELFSectionRef(Section).getType() == ELF::SHT_NOBITS;
if (auto *COFFObj = dyn_cast<object::COFFObjectFile>(Obj))
return COFFObj->getCOFFSection(Section)->Characteristics &
COFF::IMAGE_SCN_CNT_UNINITIALIZED_DATA;
auto *MachO = cast<MachOObjectFile>(Obj);
unsigned SectionType = MachO->getSectionType(Section);
return SectionType == MachO::S_ZEROFILL ||
SectionType == MachO::S_GB_ZEROFILL;
}
// Compute an upper bound of the memory size that is required to load all
// sections
void RuntimeDyldImpl::computeTotalAllocSize(const ObjectFile &Obj,
uint64_t &CodeSize,
uint64_t &DataSizeRO,
uint64_t &DataSizeRW) {
// Compute the size of all sections required for execution
std::vector<uint64_t> CodeSectionSizes;
std::vector<uint64_t> ROSectionSizes;
std::vector<uint64_t> RWSectionSizes;
uint64_t MaxAlignment = sizeof(void *);
// Collect sizes of all sections to be loaded;
// also determine the max alignment of all sections
for (section_iterator SI = Obj.section_begin(), SE = Obj.section_end();
SI != SE; ++SI) {
const SectionRef &Section = *SI;
bool IsRequired = isRequiredForExecution(Section);
// Consider only the sections that are required to be loaded for execution
if (IsRequired) {
StringRef Name;
uint64_t DataSize = Section.getSize();
uint64_t Alignment64 = Section.getAlignment();
bool IsCode = Section.isText();
bool IsReadOnly = isReadOnlyData(Section);
Check(Section.getName(Name));
unsigned Alignment = (unsigned)Alignment64 & 0xffffffffL;
uint64_t StubBufSize = computeSectionStubBufSize(Obj, Section);
uint64_t SectionSize = DataSize + StubBufSize;
// The .eh_frame section (at least on Linux) needs an extra four bytes
// padded
// with zeroes added at the end. For MachO objects, this section has a
// slightly different name, so this won't have any effect for MachO
// objects.
if (Name == ".eh_frame")
SectionSize += 4;
if (!SectionSize)
SectionSize = 1;
if (IsCode) {
CodeSectionSizes.push_back(SectionSize);
} else if (IsReadOnly) {
ROSectionSizes.push_back(SectionSize);
} else {
RWSectionSizes.push_back(SectionSize);
}
// update the max alignment
if (Alignment > MaxAlignment) {
MaxAlignment = Alignment;
}
}
}
// Compute the size of all common symbols
uint64_t CommonSize = 0;
for (symbol_iterator I = Obj.symbol_begin(), E = Obj.symbol_end(); I != E;
++I) {
uint32_t Flags = I->getFlags();
if (Flags & SymbolRef::SF_Common) {
// Add the common symbols to a list. We'll allocate them all below.
uint64_t Size = I->getCommonSize();
CommonSize += Size;
}
}
if (CommonSize != 0) {
RWSectionSizes.push_back(CommonSize);
}
// Compute the required allocation space for each different type of sections
// (code, read-only data, read-write data) assuming that all sections are
// allocated with the max alignment. Note that we cannot compute with the
// individual alignments of the sections, because then the required size
// depends on the order, in which the sections are allocated.
CodeSize = computeAllocationSizeForSections(CodeSectionSizes, MaxAlignment);
DataSizeRO = computeAllocationSizeForSections(ROSectionSizes, MaxAlignment);
DataSizeRW = computeAllocationSizeForSections(RWSectionSizes, MaxAlignment);
}
// compute stub buffer size for the given section
unsigned RuntimeDyldImpl::computeSectionStubBufSize(const ObjectFile &Obj,
const SectionRef &Section) {
unsigned StubSize = getMaxStubSize();
if (StubSize == 0) {
return 0;
}
// FIXME: this is an inefficient way to handle this. We should computed the
// necessary section allocation size in loadObject by walking all the sections
// once.
unsigned StubBufSize = 0;
for (section_iterator SI = Obj.section_begin(), SE = Obj.section_end();
SI != SE; ++SI) {
section_iterator RelSecI = SI->getRelocatedSection();
if (!(RelSecI == Section))
continue;
for (const RelocationRef &Reloc : SI->relocations()) {
(void)Reloc;
StubBufSize += StubSize;
}
}
// Get section data size and alignment
uint64_t DataSize = Section.getSize();
uint64_t Alignment64 = Section.getAlignment();
// Add stubbuf size alignment
unsigned Alignment = (unsigned)Alignment64 & 0xffffffffL;
unsigned StubAlignment = getStubAlignment();
unsigned EndAlignment = (DataSize | Alignment) & -(DataSize | Alignment);
if (StubAlignment > EndAlignment)
StubBufSize += StubAlignment - EndAlignment;
return StubBufSize;
}
uint64_t RuntimeDyldImpl::readBytesUnaligned(uint8_t *Src,
unsigned Size) const {
uint64_t Result = 0;
if (IsTargetLittleEndian) {
Src += Size - 1;
while (Size--)
Result = (Result << 8) | *Src--;
} else
while (Size--)
Result = (Result << 8) | *Src++;
return Result;
}
void RuntimeDyldImpl::writeBytesUnaligned(uint64_t Value, uint8_t *Dst,
unsigned Size) const {
if (IsTargetLittleEndian) {
while (Size--) {
*Dst++ = Value & 0xFF;
Value >>= 8;
}
} else {
Dst += Size - 1;
while (Size--) {
*Dst-- = Value & 0xFF;
Value >>= 8;
}
}
}
void RuntimeDyldImpl::emitCommonSymbols(const ObjectFile &Obj,
CommonSymbolList &CommonSymbols) {
if (CommonSymbols.empty())
return;
uint64_t CommonSize = 0;
CommonSymbolList SymbolsToAllocate;
DEBUG(dbgs() << "Processing common symbols...\n");
for (const auto &Sym : CommonSymbols) {
ErrorOr<StringRef> NameOrErr = Sym.getName();
Check(NameOrErr.getError());
StringRef Name = *NameOrErr;
// Skip common symbols already elsewhere.
if (GlobalSymbolTable.count(Name) ||
Resolver.findSymbolInLogicalDylib(Name)) {
DEBUG(dbgs() << "\tSkipping already emitted common symbol '" << Name
<< "'\n");
continue;
}
uint32_t Align = Sym.getAlignment();
uint64_t Size = Sym.getCommonSize();
CommonSize += Align + Size;
SymbolsToAllocate.push_back(Sym);
}
// Allocate memory for the section
unsigned SectionID = Sections.size();
uint8_t *Addr = MemMgr.allocateDataSection(CommonSize, sizeof(void *),
SectionID, StringRef(), false);
if (!Addr)
report_fatal_error("Unable to allocate memory for common symbols!");
uint64_t Offset = 0;
Sections.push_back(SectionEntry("<common symbols>", Addr, CommonSize, 0));
memset(Addr, 0, CommonSize);
DEBUG(dbgs() << "emitCommonSection SectionID: " << SectionID << " new addr: "
<< format("%p", Addr) << " DataSize: " << CommonSize << "\n");
// Assign the address of each symbol
for (auto &Sym : SymbolsToAllocate) {
uint32_t Align = Sym.getAlignment();
uint64_t Size = Sym.getCommonSize();
ErrorOr<StringRef> NameOrErr = Sym.getName();
Check(NameOrErr.getError());
StringRef Name = *NameOrErr;
if (Align) {
// This symbol has an alignment requirement.
uint64_t AlignOffset = OffsetToAlignment((uint64_t)Addr, Align);
Addr += AlignOffset;
Offset += AlignOffset;
}
uint32_t Flags = Sym.getFlags();
JITSymbolFlags RTDyldSymFlags = JITSymbolFlags::None;
if (Flags & SymbolRef::SF_Weak)
RTDyldSymFlags |= JITSymbolFlags::Weak;
if (Flags & SymbolRef::SF_Exported)
RTDyldSymFlags |= JITSymbolFlags::Exported;
DEBUG(dbgs() << "Allocating common symbol " << Name << " address "
<< format("%p", Addr) << "\n");
GlobalSymbolTable[Name] =
SymbolTableEntry(SectionID, Offset, RTDyldSymFlags);
Offset += Size;
Addr += Size;
}
}
unsigned RuntimeDyldImpl::emitSection(const ObjectFile &Obj,
const SectionRef &Section, bool IsCode) {
StringRef data;
uint64_t Alignment64 = Section.getAlignment();
unsigned Alignment = (unsigned)Alignment64 & 0xffffffffL;
unsigned PaddingSize = 0;
unsigned StubBufSize = 0;
StringRef Name;
bool IsRequired = isRequiredForExecution(Section);
bool IsVirtual = Section.isVirtual();
bool IsZeroInit = isZeroInit(Section);
bool IsReadOnly = isReadOnlyData(Section);
uint64_t DataSize = Section.getSize();
Check(Section.getName(Name));
StubBufSize = computeSectionStubBufSize(Obj, Section);
// The .eh_frame section (at least on Linux) needs an extra four bytes padded
// with zeroes added at the end. For MachO objects, this section has a
// slightly different name, so this won't have any effect for MachO objects.
if (Name == ".eh_frame")
PaddingSize = 4;
uintptr_t Allocate;
unsigned SectionID = Sections.size();
uint8_t *Addr;
const char *pData = nullptr;
// In either case, set the location of the unrelocated section in memory,
// since we still process relocations for it even if we're not applying them.
Check(Section.getContents(data));
// Virtual sections have no data in the object image, so leave pData = 0
if (!IsVirtual)
pData = data.data();
// Some sections, such as debug info, don't need to be loaded for execution.
// Leave those where they are.
if (IsRequired) {
Allocate = DataSize + PaddingSize + StubBufSize;
if (!Allocate)
Allocate = 1;
Addr = IsCode ? MemMgr.allocateCodeSection(Allocate, Alignment, SectionID,
Name)
: MemMgr.allocateDataSection(Allocate, Alignment, SectionID,
Name, IsReadOnly);
if (!Addr)
report_fatal_error("Unable to allocate section memory!");
// Zero-initialize or copy the data from the image
if (IsZeroInit || IsVirtual)
memset(Addr, 0, DataSize);
else
memcpy(Addr, pData, DataSize);
// Fill in any extra bytes we allocated for padding
if (PaddingSize != 0) {
memset(Addr + DataSize, 0, PaddingSize);
// Update the DataSize variable so that the stub offset is set correctly.
DataSize += PaddingSize;
}
DEBUG(dbgs() << "emitSection SectionID: " << SectionID << " Name: " << Name
<< " obj addr: " << format("%p", pData)
<< " new addr: " << format("%p", Addr)
<< " DataSize: " << DataSize << " StubBufSize: " << StubBufSize
<< " Allocate: " << Allocate << "\n");
} else {
// Even if we didn't load the section, we need to record an entry for it
// to handle later processing (and by 'handle' I mean don't do anything
// with these sections).
Allocate = 0;
Addr = nullptr;
DEBUG(dbgs() << "emitSection SectionID: " << SectionID << " Name: " << Name
<< " obj addr: " << format("%p", data.data()) << " new addr: 0"
<< " DataSize: " << DataSize << " StubBufSize: " << StubBufSize
<< " Allocate: " << Allocate << "\n");
}
Sections.push_back(SectionEntry(Name, Addr, DataSize, (uintptr_t)pData));
if (Checker)
Checker->registerSection(Obj.getFileName(), SectionID);
return SectionID;
}
unsigned RuntimeDyldImpl::findOrEmitSection(const ObjectFile &Obj,
const SectionRef &Section,
bool IsCode,
ObjSectionToIDMap &LocalSections) {
unsigned SectionID = 0;
ObjSectionToIDMap::iterator i = LocalSections.find(Section);
if (i != LocalSections.end())
SectionID = i->second;
else {
SectionID = emitSection(Obj, Section, IsCode);
LocalSections[Section] = SectionID;
}
return SectionID;
}
void RuntimeDyldImpl::addRelocationForSection(const RelocationEntry &RE,
unsigned SectionID) {
Relocations[SectionID].push_back(RE);
}
void RuntimeDyldImpl::addRelocationForSymbol(const RelocationEntry &RE,
StringRef SymbolName) {
// Relocation by symbol. If the symbol is found in the global symbol table,
// create an appropriate section relocation. Otherwise, add it to
// ExternalSymbolRelocations.
RTDyldSymbolTable::const_iterator Loc = GlobalSymbolTable.find(SymbolName);
if (Loc == GlobalSymbolTable.end()) {
ExternalSymbolRelocations[SymbolName].push_back(RE);
} else {
// Copy the RE since we want to modify its addend.
RelocationEntry RECopy = RE;
const auto &SymInfo = Loc->second;
RECopy.Addend += SymInfo.getOffset();
Relocations[SymInfo.getSectionID()].push_back(RECopy);
}
}
uint8_t *RuntimeDyldImpl::createStubFunction(uint8_t *Addr,
unsigned AbiVariant) {
if (Arch == Triple::aarch64 || Arch == Triple::aarch64_be) {
// This stub has to be able to access the full address space,
// since symbol lookup won't necessarily find a handy, in-range,
// PLT stub for functions which could be anywhere.
// Stub can use ip0 (== x16) to calculate address
writeBytesUnaligned(0xd2e00010, Addr, 4); // movz ip0, #:abs_g3:<addr>
writeBytesUnaligned(0xf2c00010, Addr+4, 4); // movk ip0, #:abs_g2_nc:<addr>
writeBytesUnaligned(0xf2a00010, Addr+8, 4); // movk ip0, #:abs_g1_nc:<addr>
writeBytesUnaligned(0xf2800010, Addr+12, 4); // movk ip0, #:abs_g0_nc:<addr>
writeBytesUnaligned(0xd61f0200, Addr+16, 4); // br ip0
return Addr;
} else if (Arch == Triple::arm || Arch == Triple::armeb) {
// TODO: There is only ARM far stub now. We should add the Thumb stub,
// and stubs for branches Thumb - ARM and ARM - Thumb.
writeBytesUnaligned(0xe51ff004, Addr, 4); // ldr pc,<label>
return Addr + 4;
} else if (IsMipsO32ABI) {
// 0: 3c190000 lui t9,%hi(addr).
// 4: 27390000 addiu t9,t9,%lo(addr).
// 8: 03200008 jr t9.
// c: 00000000 nop.
const unsigned LuiT9Instr = 0x3c190000, AdduiT9Instr = 0x27390000;
const unsigned JrT9Instr = 0x03200008, NopInstr = 0x0;
writeBytesUnaligned(LuiT9Instr, Addr, 4);
writeBytesUnaligned(AdduiT9Instr, Addr+4, 4);
writeBytesUnaligned(JrT9Instr, Addr+8, 4);
writeBytesUnaligned(NopInstr, Addr+12, 4);
return Addr;
} else if (Arch == Triple::ppc64 || Arch == Triple::ppc64le) {
// Depending on which version of the ELF ABI is in use, we need to
// generate one of two variants of the stub. They both start with
// the same sequence to load the target address into r12.
writeInt32BE(Addr, 0x3D800000); // lis r12, highest(addr)
writeInt32BE(Addr+4, 0x618C0000); // ori r12, higher(addr)
writeInt32BE(Addr+8, 0x798C07C6); // sldi r12, r12, 32
writeInt32BE(Addr+12, 0x658C0000); // oris r12, r12, h(addr)
writeInt32BE(Addr+16, 0x618C0000); // ori r12, r12, l(addr)
if (AbiVariant == 2) {
// PowerPC64 stub ELFv2 ABI: The address points to the function itself.
// The address is already in r12 as required by the ABI. Branch to it.
writeInt32BE(Addr+20, 0xF8410018); // std r2, 24(r1)
writeInt32BE(Addr+24, 0x7D8903A6); // mtctr r12
writeInt32BE(Addr+28, 0x4E800420); // bctr
} else {
// PowerPC64 stub ELFv1 ABI: The address points to a function descriptor.
// Load the function address on r11 and sets it to control register. Also
// loads the function TOC in r2 and environment pointer to r11.
writeInt32BE(Addr+20, 0xF8410028); // std r2, 40(r1)
writeInt32BE(Addr+24, 0xE96C0000); // ld r11, 0(r12)
writeInt32BE(Addr+28, 0xE84C0008); // ld r2, 0(r12)
writeInt32BE(Addr+32, 0x7D6903A6); // mtctr r11
writeInt32BE(Addr+36, 0xE96C0010); // ld r11, 16(r2)
writeInt32BE(Addr+40, 0x4E800420); // bctr
}
return Addr;
} else if (Arch == Triple::systemz) {
writeInt16BE(Addr, 0xC418); // lgrl %r1,.+8
writeInt16BE(Addr+2, 0x0000);
writeInt16BE(Addr+4, 0x0004);
writeInt16BE(Addr+6, 0x07F1); // brc 15,%r1
// 8-byte address stored at Addr + 8
return Addr;
} else if (Arch == Triple::x86_64) {
*Addr = 0xFF; // jmp
*(Addr+1) = 0x25; // rip
// 32-bit PC-relative address of the GOT entry will be stored at Addr+2
} else if (Arch == Triple::x86) {
*Addr = 0xE9; // 32-bit pc-relative jump.
}
return Addr;
}
// Assign an address to a symbol name and resolve all the relocations
// associated with it.
void RuntimeDyldImpl::reassignSectionAddress(unsigned SectionID,
uint64_t Addr) {
// The address to use for relocation resolution is not
// the address of the local section buffer. We must be doing
// a remote execution environment of some sort. Relocations can't
// be applied until all the sections have been moved. The client must
// trigger this with a call to MCJIT::finalize() or
// RuntimeDyld::resolveRelocations().
//
// Addr is a uint64_t because we can't assume the pointer width
// of the target is the same as that of the host. Just use a generic
// "big enough" type.
DEBUG(dbgs() << "Reassigning address for section "
<< SectionID << " (" << Sections[SectionID].Name << "): "
<< format("0x%016" PRIx64, Sections[SectionID].LoadAddress) << " -> "
<< format("0x%016" PRIx64, Addr) << "\n");
Sections[SectionID].LoadAddress = Addr;
}
void RuntimeDyldImpl::resolveRelocationList(const RelocationList &Relocs,
uint64_t Value) {
for (unsigned i = 0, e = Relocs.size(); i != e; ++i) {
const RelocationEntry &RE = Relocs[i];
// Ignore relocations for sections that were not loaded
if (Sections[RE.SectionID].Address == nullptr)
continue;
resolveRelocation(RE, Value);
}
}
void RuntimeDyldImpl::resolveExternalSymbols() {
while (!ExternalSymbolRelocations.empty()) {
StringMap<RelocationList>::iterator i = ExternalSymbolRelocations.begin();
StringRef Name = i->first();
if (Name.size() == 0) {
// This is an absolute symbol, use an address of zero.
DEBUG(dbgs() << "Resolving absolute relocations."
<< "\n");
RelocationList &Relocs = i->second;
resolveRelocationList(Relocs, 0);
} else {
uint64_t Addr = 0;
RTDyldSymbolTable::const_iterator Loc = GlobalSymbolTable.find(Name);
if (Loc == GlobalSymbolTable.end()) {
// This is an external symbol, try to get its address from the symbol
// resolver.
Addr = Resolver.findSymbol(Name.data()).getAddress();
// The call to getSymbolAddress may have caused additional modules to
// be loaded, which may have added new entries to the
// ExternalSymbolRelocations map. Consquently, we need to update our
// iterator. This is also why retrieval of the relocation list
// associated with this symbol is deferred until below this point.
// New entries may have been added to the relocation list.
i = ExternalSymbolRelocations.find(Name);
} else {
// We found the symbol in our global table. It was probably in a
// Module that we loaded previously.
const auto &SymInfo = Loc->second;
Addr = getSectionLoadAddress(SymInfo.getSectionID()) +
SymInfo.getOffset();
}
// FIXME: Implement error handling that doesn't kill the host program!
if (!Addr)
report_fatal_error("Program used external function '" + Name +
"' which could not be resolved!");
// If Resolver returned UINT64_MAX, the client wants to handle this symbol
// manually and we shouldn't resolve its relocations.
if (Addr != UINT64_MAX) {
DEBUG(dbgs() << "Resolving relocations Name: " << Name << "\t"
<< format("0x%lx", Addr) << "\n");
// This list may have been updated when we called getSymbolAddress, so
// don't change this code to get the list earlier.
RelocationList &Relocs = i->second;
resolveRelocationList(Relocs, Addr);
}
}
ExternalSymbolRelocations.erase(i);
}
}
//===----------------------------------------------------------------------===//
// RuntimeDyld class implementation
uint64_t RuntimeDyld::LoadedObjectInfo::getSectionLoadAddress(
StringRef SectionName) const {
for (unsigned I = BeginIdx; I != EndIdx; ++I)
if (RTDyld.Sections[I].Name == SectionName)
return RTDyld.Sections[I].LoadAddress;
return 0;
}
void RuntimeDyld::MemoryManager::anchor() {}
void RuntimeDyld::SymbolResolver::anchor() {}
RuntimeDyld::RuntimeDyld(RuntimeDyld::MemoryManager &MemMgr,
RuntimeDyld::SymbolResolver &Resolver)
: MemMgr(MemMgr), Resolver(Resolver) {
// FIXME: There's a potential issue lurking here if a single instance of
// RuntimeDyld is used to load multiple objects. The current implementation
// associates a single memory manager with a RuntimeDyld instance. Even
// though the public class spawns a new 'impl' instance for each load,
// they share a single memory manager. This can become a problem when page
// permissions are applied.
Dyld = nullptr;
ProcessAllSections = false;
Checker = nullptr;
}
RuntimeDyld::~RuntimeDyld() {}
static std::unique_ptr<RuntimeDyldCOFF>
createRuntimeDyldCOFF(Triple::ArchType Arch, RuntimeDyld::MemoryManager &MM,
RuntimeDyld::SymbolResolver &Resolver,
bool ProcessAllSections, RuntimeDyldCheckerImpl *Checker) {
std::unique_ptr<RuntimeDyldCOFF> Dyld =
RuntimeDyldCOFF::create(Arch, MM, Resolver);
Dyld->setProcessAllSections(ProcessAllSections);
Dyld->setRuntimeDyldChecker(Checker);
return Dyld;
}
static std::unique_ptr<RuntimeDyldELF>
createRuntimeDyldELF(RuntimeDyld::MemoryManager &MM,
RuntimeDyld::SymbolResolver &Resolver,
bool ProcessAllSections, RuntimeDyldCheckerImpl *Checker) {
std::unique_ptr<RuntimeDyldELF> Dyld(new RuntimeDyldELF(MM, Resolver));
Dyld->setProcessAllSections(ProcessAllSections);
Dyld->setRuntimeDyldChecker(Checker);
return Dyld;
}
static std::unique_ptr<RuntimeDyldMachO>
createRuntimeDyldMachO(Triple::ArchType Arch, RuntimeDyld::MemoryManager &MM,
RuntimeDyld::SymbolResolver &Resolver,
bool ProcessAllSections,
RuntimeDyldCheckerImpl *Checker) {
std::unique_ptr<RuntimeDyldMachO> Dyld =
RuntimeDyldMachO::create(Arch, MM, Resolver);
Dyld->setProcessAllSections(ProcessAllSections);
Dyld->setRuntimeDyldChecker(Checker);
return Dyld;
}
std::unique_ptr<RuntimeDyld::LoadedObjectInfo>
RuntimeDyld::loadObject(const ObjectFile &Obj) {
if (!Dyld) {
if (Obj.isELF())
Dyld = createRuntimeDyldELF(MemMgr, Resolver, ProcessAllSections, Checker);
else if (Obj.isMachO())
Dyld = createRuntimeDyldMachO(
static_cast<Triple::ArchType>(Obj.getArch()), MemMgr, Resolver,
ProcessAllSections, Checker);
else if (Obj.isCOFF())
Dyld = createRuntimeDyldCOFF(
static_cast<Triple::ArchType>(Obj.getArch()), MemMgr, Resolver,
ProcessAllSections, Checker);
else
report_fatal_error("Incompatible object format!");
}
if (!Dyld->isCompatibleFile(Obj))
report_fatal_error("Incompatible object format!");
return Dyld->loadObject(Obj);
}
void *RuntimeDyld::getSymbolLocalAddress(StringRef Name) const {
if (!Dyld)
return nullptr;
return Dyld->getSymbolLocalAddress(Name);
}
RuntimeDyld::SymbolInfo RuntimeDyld::getSymbol(StringRef Name) const {
if (!Dyld)
return nullptr;
return Dyld->getSymbol(Name);
}
void RuntimeDyld::resolveRelocations() { Dyld->resolveRelocations(); }
void RuntimeDyld::reassignSectionAddress(unsigned SectionID, uint64_t Addr) {
Dyld->reassignSectionAddress(SectionID, Addr);
}
void RuntimeDyld::mapSectionAddress(const void *LocalAddress,
uint64_t TargetAddress) {
Dyld->mapSectionAddress(LocalAddress, TargetAddress);
}
bool RuntimeDyld::hasError() { return Dyld->hasError(); }
StringRef RuntimeDyld::getErrorString() { return Dyld->getErrorString(); }
void RuntimeDyld::registerEHFrames() {
if (Dyld)
Dyld->registerEHFrames();
}
void RuntimeDyld::deregisterEHFrames() {
if (Dyld)
Dyld->deregisterEHFrames();
}
} // end namespace llvm
|
0 | repos/DirectXShaderCompiler/lib/ExecutionEngine | repos/DirectXShaderCompiler/lib/ExecutionEngine/RuntimeDyld/CMakeLists.txt | add_llvm_library(LLVMRuntimeDyld
RTDyldMemoryManager.cpp
RuntimeDyld.cpp
RuntimeDyldChecker.cpp
RuntimeDyldCOFF.cpp
RuntimeDyldELF.cpp
RuntimeDyldMachO.cpp
DEPENDS
intrinsics_gen
)
|
0 | repos/DirectXShaderCompiler/lib/ExecutionEngine | repos/DirectXShaderCompiler/lib/ExecutionEngine/RuntimeDyld/LLVMBuild.txt | ;===- ./lib/ExecutionEngine/RuntimeDyld/LLVMBuild.txt ----------*- Conf -*--===;
;
; The LLVM Compiler Infrastructure
;
; This file is distributed under the University of Illinois Open Source
; License. See LICENSE.TXT for details.
;
;===------------------------------------------------------------------------===;
;
; This is an LLVMBuild description file for the components in this subdirectory.
;
; For more information on the LLVMBuild system, please see:
;
; http://llvm.org/docs/LLVMBuild.html
;
;===------------------------------------------------------------------------===;
[component_0]
type = Library
name = RuntimeDyld
parent = ExecutionEngine
required_libraries = MC Object Support
|
0 | repos/DirectXShaderCompiler/lib/ExecutionEngine | repos/DirectXShaderCompiler/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldMachO.cpp | //===-- RuntimeDyldMachO.cpp - Run-time dynamic linker for MC-JIT -*- C++ -*-=//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// Implementation of the MC-JIT runtime dynamic linker.
//
//===----------------------------------------------------------------------===//
#include "RuntimeDyldMachO.h"
#include "Targets/RuntimeDyldMachOAArch64.h"
#include "Targets/RuntimeDyldMachOARM.h"
#include "Targets/RuntimeDyldMachOI386.h"
#include "Targets/RuntimeDyldMachOX86_64.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/StringRef.h"
using namespace llvm;
using namespace llvm::object;
#define DEBUG_TYPE "dyld"
namespace {
class LoadedMachOObjectInfo
: public RuntimeDyld::LoadedObjectInfoHelper<LoadedMachOObjectInfo> {
public:
LoadedMachOObjectInfo(RuntimeDyldImpl &RTDyld, unsigned BeginIdx,
unsigned EndIdx)
: LoadedObjectInfoHelper(RTDyld, BeginIdx, EndIdx) {}
OwningBinary<ObjectFile>
getObjectForDebug(const ObjectFile &Obj) const override {
return OwningBinary<ObjectFile>();
}
};
}
namespace llvm {
int64_t RuntimeDyldMachO::memcpyAddend(const RelocationEntry &RE) const {
unsigned NumBytes = 1 << RE.Size;
uint8_t *Src = Sections[RE.SectionID].Address + RE.Offset;
return static_cast<int64_t>(readBytesUnaligned(Src, NumBytes));
}
RelocationValueRef RuntimeDyldMachO::getRelocationValueRef(
const ObjectFile &BaseTObj, const relocation_iterator &RI,
const RelocationEntry &RE, ObjSectionToIDMap &ObjSectionToID) {
const MachOObjectFile &Obj =
static_cast<const MachOObjectFile &>(BaseTObj);
MachO::any_relocation_info RelInfo =
Obj.getRelocation(RI->getRawDataRefImpl());
RelocationValueRef Value;
bool IsExternal = Obj.getPlainRelocationExternal(RelInfo);
if (IsExternal) {
symbol_iterator Symbol = RI->getSymbol();
ErrorOr<StringRef> TargetNameOrErr = Symbol->getName();
if (std::error_code EC = TargetNameOrErr.getError())
report_fatal_error(EC.message());
StringRef TargetName = *TargetNameOrErr;
RTDyldSymbolTable::const_iterator SI =
GlobalSymbolTable.find(TargetName.data());
if (SI != GlobalSymbolTable.end()) {
const auto &SymInfo = SI->second;
Value.SectionID = SymInfo.getSectionID();
Value.Offset = SymInfo.getOffset() + RE.Addend;
} else {
Value.SymbolName = TargetName.data();
Value.Offset = RE.Addend;
}
} else {
SectionRef Sec = Obj.getAnyRelocationSection(RelInfo);
bool IsCode = Sec.isText();
Value.SectionID = findOrEmitSection(Obj, Sec, IsCode, ObjSectionToID);
uint64_t Addr = Sec.getAddress();
Value.Offset = RE.Addend - Addr;
}
return Value;
}
void RuntimeDyldMachO::makeValueAddendPCRel(RelocationValueRef &Value,
const relocation_iterator &RI,
unsigned OffsetToNextPC) {
auto &O = *cast<MachOObjectFile>(RI->getObject());
section_iterator SecI = O.getRelocationRelocatedSection(RI);
Value.Offset += RI->getOffset() + OffsetToNextPC + SecI->getAddress();
}
void RuntimeDyldMachO::dumpRelocationToResolve(const RelocationEntry &RE,
uint64_t Value) const {
const SectionEntry &Section = Sections[RE.SectionID];
uint8_t *LocalAddress = Section.Address + RE.Offset;
uint64_t FinalAddress = Section.LoadAddress + RE.Offset;
dbgs() << "resolveRelocation Section: " << RE.SectionID
<< " LocalAddress: " << format("%p", LocalAddress)
<< " FinalAddress: " << format("0x%016" PRIx64, FinalAddress)
<< " Value: " << format("0x%016" PRIx64, Value) << " Addend: " << RE.Addend
<< " isPCRel: " << RE.IsPCRel << " MachoType: " << RE.RelType
<< " Size: " << (1 << RE.Size) << "\n";
}
section_iterator
RuntimeDyldMachO::getSectionByAddress(const MachOObjectFile &Obj,
uint64_t Addr) {
section_iterator SI = Obj.section_begin();
section_iterator SE = Obj.section_end();
for (; SI != SE; ++SI) {
uint64_t SAddr = SI->getAddress();
uint64_t SSize = SI->getSize();
if ((Addr >= SAddr) && (Addr < SAddr + SSize))
return SI;
}
return SE;
}
// Populate __pointers section.
void RuntimeDyldMachO::populateIndirectSymbolPointersSection(
const MachOObjectFile &Obj,
const SectionRef &PTSection,
unsigned PTSectionID) {
assert(!Obj.is64Bit() &&
"Pointer table section not supported in 64-bit MachO.");
MachO::dysymtab_command DySymTabCmd = Obj.getDysymtabLoadCommand();
MachO::section Sec32 = Obj.getSection(PTSection.getRawDataRefImpl());
uint32_t PTSectionSize = Sec32.size;
unsigned FirstIndirectSymbol = Sec32.reserved1;
const unsigned PTEntrySize = 4;
unsigned NumPTEntries = PTSectionSize / PTEntrySize;
unsigned PTEntryOffset = 0;
assert((PTSectionSize % PTEntrySize) == 0 &&
"Pointers section does not contain a whole number of stubs?");
DEBUG(dbgs() << "Populating pointer table section "
<< Sections[PTSectionID].Name
<< ", Section ID " << PTSectionID << ", "
<< NumPTEntries << " entries, " << PTEntrySize
<< " bytes each:\n");
for (unsigned i = 0; i < NumPTEntries; ++i) {
unsigned SymbolIndex =
Obj.getIndirectSymbolTableEntry(DySymTabCmd, FirstIndirectSymbol + i);
symbol_iterator SI = Obj.getSymbolByIndex(SymbolIndex);
ErrorOr<StringRef> IndirectSymbolNameOrErr = SI->getName();
if (std::error_code EC = IndirectSymbolNameOrErr.getError())
report_fatal_error(EC.message());
StringRef IndirectSymbolName = *IndirectSymbolNameOrErr;
DEBUG(dbgs() << " " << IndirectSymbolName << ": index " << SymbolIndex
<< ", PT offset: " << PTEntryOffset << "\n");
RelocationEntry RE(PTSectionID, PTEntryOffset,
MachO::GENERIC_RELOC_VANILLA, 0, false, 2);
addRelocationForSymbol(RE, IndirectSymbolName);
PTEntryOffset += PTEntrySize;
}
}
bool RuntimeDyldMachO::isCompatibleFile(const object::ObjectFile &Obj) const {
return Obj.isMachO();
}
template <typename Impl>
void RuntimeDyldMachOCRTPBase<Impl>::finalizeLoad(const ObjectFile &Obj,
ObjSectionToIDMap &SectionMap) {
unsigned EHFrameSID = RTDYLD_INVALID_SECTION_ID;
unsigned TextSID = RTDYLD_INVALID_SECTION_ID;
unsigned ExceptTabSID = RTDYLD_INVALID_SECTION_ID;
for (const auto &Section : Obj.sections()) {
StringRef Name;
Section.getName(Name);
// Force emission of the __text, __eh_frame, and __gcc_except_tab sections
// if they're present. Otherwise call down to the impl to handle other
// sections that have already been emitted.
if (Name == "__text")
TextSID = findOrEmitSection(Obj, Section, true, SectionMap);
else if (Name == "__eh_frame")
EHFrameSID = findOrEmitSection(Obj, Section, false, SectionMap);
else if (Name == "__gcc_except_tab")
ExceptTabSID = findOrEmitSection(Obj, Section, true, SectionMap);
else {
auto I = SectionMap.find(Section);
if (I != SectionMap.end())
impl().finalizeSection(Obj, I->second, Section);
}
}
UnregisteredEHFrameSections.push_back(
EHFrameRelatedSections(EHFrameSID, TextSID, ExceptTabSID));
}
template <typename Impl>
unsigned char *RuntimeDyldMachOCRTPBase<Impl>::processFDE(unsigned char *P,
int64_t DeltaForText,
int64_t DeltaForEH) {
typedef typename Impl::TargetPtrT TargetPtrT;
DEBUG(dbgs() << "Processing FDE: Delta for text: " << DeltaForText
<< ", Delta for EH: " << DeltaForEH << "\n");
uint32_t Length = readBytesUnaligned(P, 4);
P += 4;
unsigned char *Ret = P + Length;
uint32_t Offset = readBytesUnaligned(P, 4);
if (Offset == 0) // is a CIE
return Ret;
P += 4;
TargetPtrT FDELocation = readBytesUnaligned(P, sizeof(TargetPtrT));
TargetPtrT NewLocation = FDELocation - DeltaForText;
writeBytesUnaligned(NewLocation, P, sizeof(TargetPtrT));
P += sizeof(TargetPtrT);
// Skip the FDE address range
P += sizeof(TargetPtrT);
uint8_t Augmentationsize = *P;
P += 1;
if (Augmentationsize != 0) {
TargetPtrT LSDA = readBytesUnaligned(P, sizeof(TargetPtrT));
TargetPtrT NewLSDA = LSDA - DeltaForEH;
writeBytesUnaligned(NewLSDA, P, sizeof(TargetPtrT));
}
return Ret;
}
static int64_t computeDelta(SectionEntry *A, SectionEntry *B) {
int64_t ObjDistance =
static_cast<int64_t>(A->ObjAddress) - static_cast<int64_t>(B->ObjAddress);
int64_t MemDistance = A->LoadAddress - B->LoadAddress;
return ObjDistance - MemDistance;
}
template <typename Impl>
void RuntimeDyldMachOCRTPBase<Impl>::registerEHFrames() {
for (int i = 0, e = UnregisteredEHFrameSections.size(); i != e; ++i) {
EHFrameRelatedSections &SectionInfo = UnregisteredEHFrameSections[i];
if (SectionInfo.EHFrameSID == RTDYLD_INVALID_SECTION_ID ||
SectionInfo.TextSID == RTDYLD_INVALID_SECTION_ID)
continue;
SectionEntry *Text = &Sections[SectionInfo.TextSID];
SectionEntry *EHFrame = &Sections[SectionInfo.EHFrameSID];
SectionEntry *ExceptTab = nullptr;
if (SectionInfo.ExceptTabSID != RTDYLD_INVALID_SECTION_ID)
ExceptTab = &Sections[SectionInfo.ExceptTabSID];
int64_t DeltaForText = computeDelta(Text, EHFrame);
int64_t DeltaForEH = 0;
if (ExceptTab)
DeltaForEH = computeDelta(ExceptTab, EHFrame);
unsigned char *P = EHFrame->Address;
unsigned char *End = P + EHFrame->Size;
do {
P = processFDE(P, DeltaForText, DeltaForEH);
} while (P != End);
MemMgr.registerEHFrames(EHFrame->Address, EHFrame->LoadAddress,
EHFrame->Size);
}
UnregisteredEHFrameSections.clear();
}
std::unique_ptr<RuntimeDyldMachO>
RuntimeDyldMachO::create(Triple::ArchType Arch,
RuntimeDyld::MemoryManager &MemMgr,
RuntimeDyld::SymbolResolver &Resolver) {
switch (Arch) {
default:
llvm_unreachable("Unsupported target for RuntimeDyldMachO.");
break;
case Triple::arm:
return make_unique<RuntimeDyldMachOARM>(MemMgr, Resolver);
case Triple::aarch64:
return make_unique<RuntimeDyldMachOAArch64>(MemMgr, Resolver);
case Triple::x86:
return make_unique<RuntimeDyldMachOI386>(MemMgr, Resolver);
case Triple::x86_64:
return make_unique<RuntimeDyldMachOX86_64>(MemMgr, Resolver);
}
}
std::unique_ptr<RuntimeDyld::LoadedObjectInfo>
RuntimeDyldMachO::loadObject(const object::ObjectFile &O) {
unsigned SectionStartIdx, SectionEndIdx;
std::tie(SectionStartIdx, SectionEndIdx) = loadObjectImpl(O);
return llvm::make_unique<LoadedMachOObjectInfo>(*this, SectionStartIdx,
SectionEndIdx);
}
} // end namespace llvm
|
0 | repos/DirectXShaderCompiler/lib/ExecutionEngine | repos/DirectXShaderCompiler/lib/ExecutionEngine/RuntimeDyld/RTDyldMemoryManager.cpp | //===-- RTDyldMemoryManager.cpp - Memory manager for MC-JIT -----*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// Implementation of the runtime dynamic memory manager base class.
//
//===----------------------------------------------------------------------===//
#include "llvm/Config/config.h"
#include "llvm/ExecutionEngine/RTDyldMemoryManager.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/DynamicLibrary.h"
#include "llvm/Support/ErrorHandling.h"
#include <cstdlib>
#ifdef __linux__
// These includes used by RTDyldMemoryManager::getPointerToNamedFunction()
// for Glibc trickery. See comments in this function for more information.
#ifdef HAVE_SYS_STAT_H
#include <sys/stat.h>
#endif
#include <fcntl.h>
#include <unistd.h>
#endif
namespace llvm {
RTDyldMemoryManager::~RTDyldMemoryManager() {}
// Determine whether we can register EH tables.
#if (defined(__GNUC__) && !defined(__ARM_EABI__) && !defined(__ia64__) && \
!defined(__SEH__) && !defined(__USING_SJLJ_EXCEPTIONS__))
#define HAVE_EHTABLE_SUPPORT 1
#else
#define HAVE_EHTABLE_SUPPORT 0
#endif
#if HAVE_EHTABLE_SUPPORT
extern "C" void __register_frame(void *);
extern "C" void __deregister_frame(void *);
#else
// The building compiler does not have __(de)register_frame but
// it may be found at runtime in a dynamically-loaded library.
// For example, this happens when building LLVM with Visual C++
// but using the MingW runtime.
void __register_frame(void *p) {
static bool Searched = false;
static void((*rf)(void *)) = 0;
if (!Searched) {
Searched = true;
*(void **)&rf =
llvm::sys::DynamicLibrary::SearchForAddressOfSymbol("__register_frame");
}
if (rf)
rf(p);
}
void __deregister_frame(void *p) {
static bool Searched = false;
static void((*df)(void *)) = 0;
if (!Searched) {
Searched = true;
*(void **)&df = llvm::sys::DynamicLibrary::SearchForAddressOfSymbol(
"__deregister_frame");
}
if (df)
df(p);
}
#endif
#ifdef __APPLE__
static const char *processFDE(const char *Entry, bool isDeregister) {
const char *P = Entry;
uint32_t Length = *((const uint32_t *)P);
P += 4;
uint32_t Offset = *((const uint32_t *)P);
if (Offset != 0) {
if (isDeregister)
__deregister_frame(const_cast<char *>(Entry));
else
__register_frame(const_cast<char *>(Entry));
}
return P + Length;
}
// This implementation handles frame registration for local targets.
// Memory managers for remote targets should re-implement this function
// and use the LoadAddr parameter.
void RTDyldMemoryManager::registerEHFrames(uint8_t *Addr,
uint64_t LoadAddr,
size_t Size) {
// On OS X OS X __register_frame takes a single FDE as an argument.
// See http://lists.llvm.org/pipermail/llvm-dev/2013-April/061768.html
const char *P = (const char *)Addr;
const char *End = P + Size;
do {
P = processFDE(P, false);
} while(P != End);
}
void RTDyldMemoryManager::deregisterEHFrames(uint8_t *Addr,
uint64_t LoadAddr,
size_t Size) {
const char *P = (const char *)Addr;
const char *End = P + Size;
do {
P = processFDE(P, true);
} while(P != End);
}
#else
void RTDyldMemoryManager::registerEHFrames(uint8_t *Addr,
uint64_t LoadAddr,
size_t Size) {
// On Linux __register_frame takes a single argument:
// a pointer to the start of the .eh_frame section.
// How can it find the end? Because crtendS.o is linked
// in and it has an .eh_frame section with four zero chars.
__register_frame(Addr);
}
void RTDyldMemoryManager::deregisterEHFrames(uint8_t *Addr,
uint64_t LoadAddr,
size_t Size) {
__deregister_frame(Addr);
}
#endif
static int jit_noop() {
return 0;
}
// ARM math functions are statically linked on Android from libgcc.a, but not
// available at runtime for dynamic linking. On Linux these are usually placed
// in libgcc_s.so so can be found by normal dynamic lookup.
#if defined(__BIONIC__) && defined(__arm__)
// List of functions which are statically linked on Android and can be generated
// by LLVM. This is done as a nested macro which is used once to declare the
// imported functions with ARM_MATH_DECL and once to compare them to the
// user-requested symbol in getSymbolAddress with ARM_MATH_CHECK. The test
// assumes that all functions start with __aeabi_ and getSymbolAddress must be
// modified if that changes.
#define ARM_MATH_IMPORTS(PP) \
PP(__aeabi_d2f) \
PP(__aeabi_d2iz) \
PP(__aeabi_d2lz) \
PP(__aeabi_d2uiz) \
PP(__aeabi_d2ulz) \
PP(__aeabi_dadd) \
PP(__aeabi_dcmpeq) \
PP(__aeabi_dcmpge) \
PP(__aeabi_dcmpgt) \
PP(__aeabi_dcmple) \
PP(__aeabi_dcmplt) \
PP(__aeabi_dcmpun) \
PP(__aeabi_ddiv) \
PP(__aeabi_dmul) \
PP(__aeabi_dsub) \
PP(__aeabi_f2d) \
PP(__aeabi_f2iz) \
PP(__aeabi_f2lz) \
PP(__aeabi_f2uiz) \
PP(__aeabi_f2ulz) \
PP(__aeabi_fadd) \
PP(__aeabi_fcmpeq) \
PP(__aeabi_fcmpge) \
PP(__aeabi_fcmpgt) \
PP(__aeabi_fcmple) \
PP(__aeabi_fcmplt) \
PP(__aeabi_fcmpun) \
PP(__aeabi_fdiv) \
PP(__aeabi_fmul) \
PP(__aeabi_fsub) \
PP(__aeabi_i2d) \
PP(__aeabi_i2f) \
PP(__aeabi_idiv) \
PP(__aeabi_idivmod) \
PP(__aeabi_l2d) \
PP(__aeabi_l2f) \
PP(__aeabi_lasr) \
PP(__aeabi_ldivmod) \
PP(__aeabi_llsl) \
PP(__aeabi_llsr) \
PP(__aeabi_lmul) \
PP(__aeabi_ui2d) \
PP(__aeabi_ui2f) \
PP(__aeabi_uidiv) \
PP(__aeabi_uidivmod) \
PP(__aeabi_ul2d) \
PP(__aeabi_ul2f) \
PP(__aeabi_uldivmod)
// Declare statically linked math functions on ARM. The function declarations
// here do not have the correct prototypes for each function in
// ARM_MATH_IMPORTS, but it doesn't matter because only the symbol addresses are
// needed. In particular the __aeabi_*divmod functions do not have calling
// conventions which match any C prototype.
#define ARM_MATH_DECL(name) extern "C" void name();
ARM_MATH_IMPORTS(ARM_MATH_DECL)
#undef ARM_MATH_DECL
#endif
#if defined(__linux__) && defined(__GLIBC__) && \
(defined(__i386__) || defined(__x86_64__))
extern "C" LLVM_ATTRIBUTE_WEAK void __morestack();
#endif
uint64_t
RTDyldMemoryManager::getSymbolAddressInProcess(const std::string &Name) {
// This implementation assumes that the host program is the target.
// Clients generating code for a remote target should implement their own
// memory manager.
#if defined(__linux__) && defined(__GLIBC__)
//===--------------------------------------------------------------------===//
// Function stubs that are invoked instead of certain library calls
//
// Force the following functions to be linked in to anything that uses the
// JIT. This is a hack designed to work around the all-too-clever Glibc
// strategy of making these functions work differently when inlined vs. when
// not inlined, and hiding their real definitions in a separate archive file
// that the dynamic linker can't see. For more info, search for
// 'libc_nonshared.a' on Google, or read http://llvm.org/PR274.
if (Name == "stat") return (uint64_t)&stat;
if (Name == "fstat") return (uint64_t)&fstat;
if (Name == "lstat") return (uint64_t)&lstat;
if (Name == "stat64") return (uint64_t)&stat64;
if (Name == "fstat64") return (uint64_t)&fstat64;
if (Name == "lstat64") return (uint64_t)&lstat64;
if (Name == "atexit") return (uint64_t)&atexit;
if (Name == "mknod") return (uint64_t)&mknod;
#if defined(__i386__) || defined(__x86_64__)
// __morestack lives in libgcc, a static library.
if (&__morestack && Name == "__morestack")
return (uint64_t)&__morestack;
#endif
#endif // __linux__ && __GLIBC__
// See ARM_MATH_IMPORTS definition for explanation
#if defined(__BIONIC__) && defined(__arm__)
if (Name.compare(0, 8, "__aeabi_") == 0) {
// Check if the user has requested any of the functions listed in
// ARM_MATH_IMPORTS, and if so redirect to the statically linked symbol.
#define ARM_MATH_CHECK(fn) if (Name == #fn) return (uint64_t)&fn;
ARM_MATH_IMPORTS(ARM_MATH_CHECK)
#undef ARM_MATH_CHECK
}
#endif
// We should not invoke parent's ctors/dtors from generated main()!
// On Mingw and Cygwin, the symbol __main is resolved to
// callee's(eg. tools/lli) one, to invoke wrong duplicated ctors
// (and register wrong callee's dtors with atexit(3)).
// We expect ExecutionEngine::runStaticConstructorsDestructors()
// is called before ExecutionEngine::runFunctionAsMain() is called.
if (Name == "__main") return (uint64_t)&jit_noop;
// Try to demangle Name before looking it up in the process, otherwise symbol
// '_<Name>' (if present) will shadow '<Name>', and there will be no way to
// refer to the latter.
const char *NameStr = Name.c_str();
if (NameStr[0] == '_')
if (void *Ptr = sys::DynamicLibrary::SearchForAddressOfSymbol(NameStr + 1))
return (uint64_t)Ptr;
// If we Name did not require demangling, or we failed to find the demangled
// name, try again without demangling.
return (uint64_t)sys::DynamicLibrary::SearchForAddressOfSymbol(NameStr);
}
void *RTDyldMemoryManager::getPointerToNamedFunction(const std::string &Name,
bool AbortOnFailure) {
uint64_t Addr = getSymbolAddress(Name);
if (!Addr && AbortOnFailure)
report_fatal_error("Program used external function '" + Name +
"' which could not be resolved!");
return (void*)Addr;
}
} // namespace llvm
|
0 | repos/DirectXShaderCompiler/lib/ExecutionEngine | repos/DirectXShaderCompiler/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.h | //===-- RuntimeDyldELF.h - Run-time dynamic linker for MC-JIT ---*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// ELF support for MC-JIT runtime dynamic linker.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_LIB_EXECUTIONENGINE_RUNTIMEDYLD_RUNTIMEDYLDELF_H
#define LLVM_LIB_EXECUTIONENGINE_RUNTIMEDYLD_RUNTIMEDYLDELF_H
#include "RuntimeDyldImpl.h"
#include "llvm/ADT/DenseMap.h"
using namespace llvm;
namespace llvm {
class RuntimeDyldELF : public RuntimeDyldImpl {
void resolveRelocation(const SectionEntry &Section, uint64_t Offset,
uint64_t Value, uint32_t Type, int64_t Addend,
uint64_t SymOffset = 0, SID SectionID = 0);
void resolveX86_64Relocation(const SectionEntry &Section, uint64_t Offset,
uint64_t Value, uint32_t Type, int64_t Addend,
uint64_t SymOffset);
void resolveX86Relocation(const SectionEntry &Section, uint64_t Offset,
uint32_t Value, uint32_t Type, int32_t Addend);
void resolveAArch64Relocation(const SectionEntry &Section, uint64_t Offset,
uint64_t Value, uint32_t Type, int64_t Addend);
void resolveARMRelocation(const SectionEntry &Section, uint64_t Offset,
uint32_t Value, uint32_t Type, int32_t Addend);
void resolveMIPSRelocation(const SectionEntry &Section, uint64_t Offset,
uint32_t Value, uint32_t Type, int32_t Addend);
void resolvePPC64Relocation(const SectionEntry &Section, uint64_t Offset,
uint64_t Value, uint32_t Type, int64_t Addend);
void resolveSystemZRelocation(const SectionEntry &Section, uint64_t Offset,
uint64_t Value, uint32_t Type, int64_t Addend);
void resolveMIPS64Relocation(const SectionEntry &Section, uint64_t Offset,
uint64_t Value, uint32_t Type, int64_t Addend,
uint64_t SymOffset, SID SectionID);
int64_t evaluateMIPS64Relocation(const SectionEntry &Section,
uint64_t Offset, uint64_t Value,
uint32_t Type, int64_t Addend,
uint64_t SymOffset, SID SectionID);
void applyMIPS64Relocation(uint8_t *TargetPtr, int64_t CalculatedValue,
uint32_t Type);
unsigned getMaxStubSize() override {
if (Arch == Triple::aarch64 || Arch == Triple::aarch64_be)
return 20; // movz; movk; movk; movk; br
if (Arch == Triple::arm || Arch == Triple::thumb)
return 8; // 32-bit instruction and 32-bit address
else if (IsMipsO32ABI)
return 16;
else if (Arch == Triple::ppc64 || Arch == Triple::ppc64le)
return 44;
else if (Arch == Triple::x86_64)
return 6; // 2-byte jmp instruction + 32-bit relative address
else if (Arch == Triple::systemz)
return 16;
else
return 0;
}
unsigned getStubAlignment() override {
if (Arch == Triple::systemz)
return 8;
else
return 1;
}
void setMipsABI(const ObjectFile &Obj) override;
void findPPC64TOCSection(const ELFObjectFileBase &Obj,
ObjSectionToIDMap &LocalSections,
RelocationValueRef &Rel);
void findOPDEntrySection(const ELFObjectFileBase &Obj,
ObjSectionToIDMap &LocalSections,
RelocationValueRef &Rel);
size_t getGOTEntrySize();
SectionEntry &getSection(unsigned SectionID) { return Sections[SectionID]; }
// Allocate no GOT entries for use in the given section.
uint64_t allocateGOTEntries(unsigned SectionID, unsigned no);
// Resolve the relvative address of GOTOffset in Section ID and place
// it at the given Offset
void resolveGOTOffsetRelocation(unsigned SectionID, uint64_t Offset,
uint64_t GOTOffset);
// For a GOT entry referenced from SectionID, compute a relocation entry
// that will place the final resolved value in the GOT slot
RelocationEntry computeGOTOffsetRE(unsigned SectionID,
uint64_t GOTOffset,
uint64_t SymbolOffset,
unsigned Type);
// Compute the address in memory where we can find the placeholder
void *computePlaceholderAddress(unsigned SectionID, uint64_t Offset) const;
// Split out common case for createing the RelocationEntry for when the relocation requires
// no particular advanced processing.
void processSimpleRelocation(unsigned SectionID, uint64_t Offset, unsigned RelType, RelocationValueRef Value);
// The tentative ID for the GOT section
unsigned GOTSectionID;
// Records the current number of allocated slots in the GOT
// (This would be equivalent to GOTEntries.size() were it not for relocations
// that consume more than one slot)
unsigned CurrentGOTIndex;
// A map from section to a GOT section that has entries for section's GOT
// relocations. (Mips64 specific)
DenseMap<SID, SID> SectionToGOTMap;
// A map to avoid duplicate got entries (Mips64 specific)
StringMap<uint64_t> GOTSymbolOffsets;
// When a module is loaded we save the SectionID of the EH frame section
// in a table until we receive a request to register all unregistered
// EH frame sections with the memory manager.
SmallVector<SID, 2> UnregisteredEHFrameSections;
SmallVector<SID, 2> RegisteredEHFrameSections;
public:
RuntimeDyldELF(RuntimeDyld::MemoryManager &MemMgr,
RuntimeDyld::SymbolResolver &Resolver);
~RuntimeDyldELF() override;
std::unique_ptr<RuntimeDyld::LoadedObjectInfo>
loadObject(const object::ObjectFile &O) override;
void resolveRelocation(const RelocationEntry &RE, uint64_t Value) override;
relocation_iterator
processRelocationRef(unsigned SectionID, relocation_iterator RelI,
const ObjectFile &Obj,
ObjSectionToIDMap &ObjSectionToID,
StubMap &Stubs) override;
bool isCompatibleFile(const object::ObjectFile &Obj) const override;
void registerEHFrames() override;
void deregisterEHFrames() override;
void finalizeLoad(const ObjectFile &Obj,
ObjSectionToIDMap &SectionMap) override;
};
} // end namespace llvm
#endif
|
0 | repos/DirectXShaderCompiler/lib/ExecutionEngine | repos/DirectXShaderCompiler/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.cpp | //===-- RuntimeDyldELF.cpp - Run-time dynamic linker for MC-JIT -*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// Implementation of ELF support for the MC-JIT runtime dynamic linker.
//
//===----------------------------------------------------------------------===//
#include "RuntimeDyldELF.h"
#include "RuntimeDyldCheckerImpl.h"
#include "llvm/ADT/IntervalMap.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/Triple.h"
#include "llvm/MC/MCStreamer.h"
#include "llvm/Object/ELFObjectFile.h"
#include "llvm/Object/ObjectFile.h"
#include "llvm/Support/ELF.h"
#include "llvm/Support/Endian.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/TargetRegistry.h"
using namespace llvm;
using namespace llvm::object;
#define DEBUG_TYPE "dyld"
static inline std::error_code check(std::error_code Err) {
if (Err) {
report_fatal_error(Err.message());
}
return Err;
}
namespace {
template <class ELFT> class DyldELFObject : public ELFObjectFile<ELFT> {
LLVM_ELF_IMPORT_TYPES_ELFT(ELFT)
typedef Elf_Shdr_Impl<ELFT> Elf_Shdr;
typedef Elf_Sym_Impl<ELFT> Elf_Sym;
typedef Elf_Rel_Impl<ELFT, false> Elf_Rel;
typedef Elf_Rel_Impl<ELFT, true> Elf_Rela;
typedef Elf_Ehdr_Impl<ELFT> Elf_Ehdr;
typedef typename ELFDataTypeTypedefHelper<ELFT>::value_type addr_type;
public:
DyldELFObject(MemoryBufferRef Wrapper, std::error_code &ec);
void updateSectionAddress(const SectionRef &Sec, uint64_t Addr);
void updateSymbolAddress(const SymbolRef &SymRef, uint64_t Addr);
// Methods for type inquiry through isa, cast and dyn_cast
static inline bool classof(const Binary *v) {
return (isa<ELFObjectFile<ELFT>>(v) &&
classof(cast<ELFObjectFile<ELFT>>(v)));
}
static inline bool classof(const ELFObjectFile<ELFT> *v) {
return v->isDyldType();
}
};
// The MemoryBuffer passed into this constructor is just a wrapper around the
// actual memory. Ultimately, the Binary parent class will take ownership of
// this MemoryBuffer object but not the underlying memory.
template <class ELFT>
DyldELFObject<ELFT>::DyldELFObject(MemoryBufferRef Wrapper, std::error_code &EC)
: ELFObjectFile<ELFT>(Wrapper, EC) {
this->isDyldELFObject = true;
}
template <class ELFT>
void DyldELFObject<ELFT>::updateSectionAddress(const SectionRef &Sec,
uint64_t Addr) {
DataRefImpl ShdrRef = Sec.getRawDataRefImpl();
Elf_Shdr *shdr =
const_cast<Elf_Shdr *>(reinterpret_cast<const Elf_Shdr *>(ShdrRef.p));
// This assumes the address passed in matches the target address bitness
// The template-based type cast handles everything else.
shdr->sh_addr = static_cast<addr_type>(Addr);
}
template <class ELFT>
void DyldELFObject<ELFT>::updateSymbolAddress(const SymbolRef &SymRef,
uint64_t Addr) {
Elf_Sym *sym = const_cast<Elf_Sym *>(
ELFObjectFile<ELFT>::getSymbol(SymRef.getRawDataRefImpl()));
// This assumes the address passed in matches the target address bitness
// The template-based type cast handles everything else.
sym->st_value = static_cast<addr_type>(Addr);
}
class LoadedELFObjectInfo
: public RuntimeDyld::LoadedObjectInfoHelper<LoadedELFObjectInfo> {
public:
LoadedELFObjectInfo(RuntimeDyldImpl &RTDyld, unsigned BeginIdx,
unsigned EndIdx)
: LoadedObjectInfoHelper(RTDyld, BeginIdx, EndIdx) {}
OwningBinary<ObjectFile>
getObjectForDebug(const ObjectFile &Obj) const override;
};
template <typename ELFT>
std::unique_ptr<DyldELFObject<ELFT>>
createRTDyldELFObject(MemoryBufferRef Buffer,
const LoadedELFObjectInfo &L,
std::error_code &ec) {
typedef typename ELFFile<ELFT>::Elf_Shdr Elf_Shdr;
typedef typename ELFDataTypeTypedefHelper<ELFT>::value_type addr_type;
std::unique_ptr<DyldELFObject<ELFT>> Obj =
llvm::make_unique<DyldELFObject<ELFT>>(Buffer, ec);
// Iterate over all sections in the object.
for (const auto &Sec : Obj->sections()) {
StringRef SectionName;
Sec.getName(SectionName);
if (SectionName != "") {
DataRefImpl ShdrRef = Sec.getRawDataRefImpl();
Elf_Shdr *shdr = const_cast<Elf_Shdr *>(
reinterpret_cast<const Elf_Shdr *>(ShdrRef.p));
if (uint64_t SecLoadAddr = L.getSectionLoadAddress(SectionName)) {
// This assumes that the address passed in matches the target address
// bitness. The template-based type cast handles everything else.
shdr->sh_addr = static_cast<addr_type>(SecLoadAddr);
}
}
}
return Obj;
}
OwningBinary<ObjectFile> createELFDebugObject(const ObjectFile &Obj,
const LoadedELFObjectInfo &L) {
assert(Obj.isELF() && "Not an ELF object file.");
std::unique_ptr<MemoryBuffer> Buffer =
MemoryBuffer::getMemBufferCopy(Obj.getData(), Obj.getFileName());
std::error_code ec;
std::unique_ptr<ObjectFile> DebugObj;
if (Obj.getBytesInAddress() == 4 && Obj.isLittleEndian()) {
typedef ELFType<support::little, false> ELF32LE;
DebugObj = createRTDyldELFObject<ELF32LE>(Buffer->getMemBufferRef(), L, ec);
} else if (Obj.getBytesInAddress() == 4 && !Obj.isLittleEndian()) {
typedef ELFType<support::big, false> ELF32BE;
DebugObj = createRTDyldELFObject<ELF32BE>(Buffer->getMemBufferRef(), L, ec);
} else if (Obj.getBytesInAddress() == 8 && !Obj.isLittleEndian()) {
typedef ELFType<support::big, true> ELF64BE;
DebugObj = createRTDyldELFObject<ELF64BE>(Buffer->getMemBufferRef(), L, ec);
} else if (Obj.getBytesInAddress() == 8 && Obj.isLittleEndian()) {
typedef ELFType<support::little, true> ELF64LE;
DebugObj = createRTDyldELFObject<ELF64LE>(Buffer->getMemBufferRef(), L, ec);
} else
llvm_unreachable("Unexpected ELF format");
assert(!ec && "Could not construct copy ELF object file");
return OwningBinary<ObjectFile>(std::move(DebugObj), std::move(Buffer));
}
OwningBinary<ObjectFile>
LoadedELFObjectInfo::getObjectForDebug(const ObjectFile &Obj) const {
return createELFDebugObject(Obj, *this);
}
} // namespace
namespace llvm {
RuntimeDyldELF::RuntimeDyldELF(RuntimeDyld::MemoryManager &MemMgr,
RuntimeDyld::SymbolResolver &Resolver)
: RuntimeDyldImpl(MemMgr, Resolver), GOTSectionID(0), CurrentGOTIndex(0) {}
RuntimeDyldELF::~RuntimeDyldELF() {}
void RuntimeDyldELF::registerEHFrames() {
for (int i = 0, e = UnregisteredEHFrameSections.size(); i != e; ++i) {
SID EHFrameSID = UnregisteredEHFrameSections[i];
uint8_t *EHFrameAddr = Sections[EHFrameSID].Address;
uint64_t EHFrameLoadAddr = Sections[EHFrameSID].LoadAddress;
size_t EHFrameSize = Sections[EHFrameSID].Size;
MemMgr.registerEHFrames(EHFrameAddr, EHFrameLoadAddr, EHFrameSize);
RegisteredEHFrameSections.push_back(EHFrameSID);
}
UnregisteredEHFrameSections.clear();
}
void RuntimeDyldELF::deregisterEHFrames() {
for (int i = 0, e = RegisteredEHFrameSections.size(); i != e; ++i) {
SID EHFrameSID = RegisteredEHFrameSections[i];
uint8_t *EHFrameAddr = Sections[EHFrameSID].Address;
uint64_t EHFrameLoadAddr = Sections[EHFrameSID].LoadAddress;
size_t EHFrameSize = Sections[EHFrameSID].Size;
MemMgr.deregisterEHFrames(EHFrameAddr, EHFrameLoadAddr, EHFrameSize);
}
RegisteredEHFrameSections.clear();
}
std::unique_ptr<RuntimeDyld::LoadedObjectInfo>
RuntimeDyldELF::loadObject(const object::ObjectFile &O) {
unsigned SectionStartIdx, SectionEndIdx;
std::tie(SectionStartIdx, SectionEndIdx) = loadObjectImpl(O);
return llvm::make_unique<LoadedELFObjectInfo>(*this, SectionStartIdx,
SectionEndIdx);
}
void RuntimeDyldELF::resolveX86_64Relocation(const SectionEntry &Section,
uint64_t Offset, uint64_t Value,
uint32_t Type, int64_t Addend,
uint64_t SymOffset) {
switch (Type) {
default:
llvm_unreachable("Relocation type not implemented yet!");
break;
case ELF::R_X86_64_64: {
support::ulittle64_t::ref(Section.Address + Offset) = Value + Addend;
DEBUG(dbgs() << "Writing " << format("%p", (Value + Addend)) << " at "
<< format("%p\n", Section.Address + Offset));
break;
}
case ELF::R_X86_64_32:
case ELF::R_X86_64_32S: {
Value += Addend;
assert((Type == ELF::R_X86_64_32 && (Value <= UINT32_MAX)) ||
(Type == ELF::R_X86_64_32S &&
((int64_t)Value <= INT32_MAX && (int64_t)Value >= INT32_MIN)));
uint32_t TruncatedAddr = (Value & 0xFFFFFFFF);
support::ulittle32_t::ref(Section.Address + Offset) = TruncatedAddr;
DEBUG(dbgs() << "Writing " << format("%p", TruncatedAddr) << " at "
<< format("%p\n", Section.Address + Offset));
break;
}
case ELF::R_X86_64_PC32: {
uint64_t FinalAddress = Section.LoadAddress + Offset;
int64_t RealOffset = Value + Addend - FinalAddress;
assert(isInt<32>(RealOffset));
int32_t TruncOffset = (RealOffset & 0xFFFFFFFF);
support::ulittle32_t::ref(Section.Address + Offset) = TruncOffset;
break;
}
case ELF::R_X86_64_PC64: {
uint64_t FinalAddress = Section.LoadAddress + Offset;
int64_t RealOffset = Value + Addend - FinalAddress;
support::ulittle64_t::ref(Section.Address + Offset) = RealOffset;
break;
}
}
}
void RuntimeDyldELF::resolveX86Relocation(const SectionEntry &Section,
uint64_t Offset, uint32_t Value,
uint32_t Type, int32_t Addend) {
switch (Type) {
case ELF::R_386_32: {
support::ulittle32_t::ref(Section.Address + Offset) = Value + Addend;
break;
}
case ELF::R_386_PC32: {
uint32_t FinalAddress = ((Section.LoadAddress + Offset) & 0xFFFFFFFF);
uint32_t RealOffset = Value + Addend - FinalAddress;
support::ulittle32_t::ref(Section.Address + Offset) = RealOffset;
break;
}
default:
// There are other relocation types, but it appears these are the
// only ones currently used by the LLVM ELF object writer
llvm_unreachable("Relocation type not implemented yet!");
break;
}
}
void RuntimeDyldELF::resolveAArch64Relocation(const SectionEntry &Section,
uint64_t Offset, uint64_t Value,
uint32_t Type, int64_t Addend) {
uint32_t *TargetPtr = reinterpret_cast<uint32_t *>(Section.Address + Offset);
uint64_t FinalAddress = Section.LoadAddress + Offset;
DEBUG(dbgs() << "resolveAArch64Relocation, LocalAddress: 0x"
<< format("%llx", Section.Address + Offset)
<< " FinalAddress: 0x" << format("%llx", FinalAddress)
<< " Value: 0x" << format("%llx", Value) << " Type: 0x"
<< format("%x", Type) << " Addend: 0x" << format("%llx", Addend)
<< "\n");
switch (Type) {
default:
llvm_unreachable("Relocation type not implemented yet!");
break;
case ELF::R_AARCH64_ABS64: {
uint64_t *TargetPtr =
reinterpret_cast<uint64_t *>(Section.Address + Offset);
*TargetPtr = Value + Addend;
break;
}
case ELF::R_AARCH64_PREL32: {
uint64_t Result = Value + Addend - FinalAddress;
assert(static_cast<int64_t>(Result) >= INT32_MIN &&
static_cast<int64_t>(Result) <= UINT32_MAX);
*TargetPtr = static_cast<uint32_t>(Result & 0xffffffffU);
break;
}
case ELF::R_AARCH64_CALL26: // fallthrough
case ELF::R_AARCH64_JUMP26: {
// Operation: S+A-P. Set Call or B immediate value to bits fff_fffc of the
// calculation.
uint64_t BranchImm = Value + Addend - FinalAddress;
// "Check that -2^27 <= result < 2^27".
assert(isInt<28>(BranchImm));
// AArch64 code is emitted with .rela relocations. The data already in any
// bits affected by the relocation on entry is garbage.
*TargetPtr &= 0xfc000000U;
// Immediate goes in bits 25:0 of B and BL.
*TargetPtr |= static_cast<uint32_t>(BranchImm & 0xffffffcU) >> 2;
break;
}
case ELF::R_AARCH64_MOVW_UABS_G3: {
uint64_t Result = Value + Addend;
// AArch64 code is emitted with .rela relocations. The data already in any
// bits affected by the relocation on entry is garbage.
*TargetPtr &= 0xffe0001fU;
// Immediate goes in bits 20:5 of MOVZ/MOVK instruction
*TargetPtr |= Result >> (48 - 5);
// Shift must be "lsl #48", in bits 22:21
assert((*TargetPtr >> 21 & 0x3) == 3 && "invalid shift for relocation");
break;
}
case ELF::R_AARCH64_MOVW_UABS_G2_NC: {
uint64_t Result = Value + Addend;
// AArch64 code is emitted with .rela relocations. The data already in any
// bits affected by the relocation on entry is garbage.
*TargetPtr &= 0xffe0001fU;
// Immediate goes in bits 20:5 of MOVZ/MOVK instruction
*TargetPtr |= ((Result & 0xffff00000000ULL) >> (32 - 5));
// Shift must be "lsl #32", in bits 22:21
assert((*TargetPtr >> 21 & 0x3) == 2 && "invalid shift for relocation");
break;
}
case ELF::R_AARCH64_MOVW_UABS_G1_NC: {
uint64_t Result = Value + Addend;
// AArch64 code is emitted with .rela relocations. The data already in any
// bits affected by the relocation on entry is garbage.
*TargetPtr &= 0xffe0001fU;
// Immediate goes in bits 20:5 of MOVZ/MOVK instruction
*TargetPtr |= ((Result & 0xffff0000U) >> (16 - 5));
// Shift must be "lsl #16", in bits 22:2
assert((*TargetPtr >> 21 & 0x3) == 1 && "invalid shift for relocation");
break;
}
case ELF::R_AARCH64_MOVW_UABS_G0_NC: {
uint64_t Result = Value + Addend;
// AArch64 code is emitted with .rela relocations. The data already in any
// bits affected by the relocation on entry is garbage.
*TargetPtr &= 0xffe0001fU;
// Immediate goes in bits 20:5 of MOVZ/MOVK instruction
*TargetPtr |= ((Result & 0xffffU) << 5);
// Shift must be "lsl #0", in bits 22:21.
assert((*TargetPtr >> 21 & 0x3) == 0 && "invalid shift for relocation");
break;
}
case ELF::R_AARCH64_ADR_PREL_PG_HI21: {
// Operation: Page(S+A) - Page(P)
uint64_t Result =
((Value + Addend) & ~0xfffULL) - (FinalAddress & ~0xfffULL);
// Check that -2^32 <= X < 2^32
assert(isInt<33>(Result) && "overflow check failed for relocation");
// AArch64 code is emitted with .rela relocations. The data already in any
// bits affected by the relocation on entry is garbage.
*TargetPtr &= 0x9f00001fU;
// Immediate goes in bits 30:29 + 5:23 of ADRP instruction, taken
// from bits 32:12 of X.
*TargetPtr |= ((Result & 0x3000U) << (29 - 12));
*TargetPtr |= ((Result & 0x1ffffc000ULL) >> (14 - 5));
break;
}
case ELF::R_AARCH64_LDST32_ABS_LO12_NC: {
// Operation: S + A
uint64_t Result = Value + Addend;
// AArch64 code is emitted with .rela relocations. The data already in any
// bits affected by the relocation on entry is garbage.
*TargetPtr &= 0xffc003ffU;
// Immediate goes in bits 21:10 of LD/ST instruction, taken
// from bits 11:2 of X
*TargetPtr |= ((Result & 0xffc) << (10 - 2));
break;
}
case ELF::R_AARCH64_LDST64_ABS_LO12_NC: {
// Operation: S + A
uint64_t Result = Value + Addend;
// AArch64 code is emitted with .rela relocations. The data already in any
// bits affected by the relocation on entry is garbage.
*TargetPtr &= 0xffc003ffU;
// Immediate goes in bits 21:10 of LD/ST instruction, taken
// from bits 11:3 of X
*TargetPtr |= ((Result & 0xff8) << (10 - 3));
break;
}
}
}
void RuntimeDyldELF::resolveARMRelocation(const SectionEntry &Section,
uint64_t Offset, uint32_t Value,
uint32_t Type, int32_t Addend) {
// TODO: Add Thumb relocations.
uint32_t *TargetPtr = (uint32_t *)(Section.Address + Offset);
uint32_t FinalAddress = ((Section.LoadAddress + Offset) & 0xFFFFFFFF);
Value += Addend;
DEBUG(dbgs() << "resolveARMRelocation, LocalAddress: "
<< Section.Address + Offset
<< " FinalAddress: " << format("%p", FinalAddress) << " Value: "
<< format("%x", Value) << " Type: " << format("%x", Type)
<< " Addend: " << format("%x", Addend) << "\n");
switch (Type) {
default:
llvm_unreachable("Not implemented relocation type!");
case ELF::R_ARM_NONE:
break;
case ELF::R_ARM_PREL31:
case ELF::R_ARM_TARGET1:
case ELF::R_ARM_ABS32:
*TargetPtr = Value;
break;
// Write first 16 bit of 32 bit value to the mov instruction.
// Last 4 bit should be shifted.
case ELF::R_ARM_MOVW_ABS_NC:
case ELF::R_ARM_MOVT_ABS:
if (Type == ELF::R_ARM_MOVW_ABS_NC)
Value = Value & 0xFFFF;
else if (Type == ELF::R_ARM_MOVT_ABS)
Value = (Value >> 16) & 0xFFFF;
*TargetPtr &= ~0x000F0FFF;
*TargetPtr |= Value & 0xFFF;
*TargetPtr |= ((Value >> 12) & 0xF) << 16;
break;
// Write 24 bit relative value to the branch instruction.
case ELF::R_ARM_PC24: // Fall through.
case ELF::R_ARM_CALL: // Fall through.
case ELF::R_ARM_JUMP24:
int32_t RelValue = static_cast<int32_t>(Value - FinalAddress - 8);
RelValue = (RelValue & 0x03FFFFFC) >> 2;
assert((*TargetPtr & 0xFFFFFF) == 0xFFFFFE);
*TargetPtr &= 0xFF000000;
*TargetPtr |= RelValue;
break;
}
}
void RuntimeDyldELF::resolveMIPSRelocation(const SectionEntry &Section,
uint64_t Offset, uint32_t Value,
uint32_t Type, int32_t Addend) {
uint8_t *TargetPtr = Section.Address + Offset;
Value += Addend;
DEBUG(dbgs() << "resolveMIPSRelocation, LocalAddress: "
<< Section.Address + Offset << " FinalAddress: "
<< format("%p", Section.LoadAddress + Offset) << " Value: "
<< format("%x", Value) << " Type: " << format("%x", Type)
<< " Addend: " << format("%x", Addend) << "\n");
uint32_t Insn = readBytesUnaligned(TargetPtr, 4);
switch (Type) {
default:
llvm_unreachable("Not implemented relocation type!");
break;
case ELF::R_MIPS_32:
writeBytesUnaligned(Value, TargetPtr, 4);
break;
case ELF::R_MIPS_26:
Insn &= 0xfc000000;
Insn |= (Value & 0x0fffffff) >> 2;
writeBytesUnaligned(Insn, TargetPtr, 4);
break;
case ELF::R_MIPS_HI16:
// Get the higher 16-bits. Also add 1 if bit 15 is 1.
Insn &= 0xffff0000;
Insn |= ((Value + 0x8000) >> 16) & 0xffff;
writeBytesUnaligned(Insn, TargetPtr, 4);
break;
case ELF::R_MIPS_LO16:
Insn &= 0xffff0000;
Insn |= Value & 0xffff;
writeBytesUnaligned(Insn, TargetPtr, 4);
break;
case ELF::R_MIPS_PC32: {
uint32_t FinalAddress = (Section.LoadAddress + Offset);
writeBytesUnaligned(Value - FinalAddress, (uint8_t *)TargetPtr, 4);
break;
}
case ELF::R_MIPS_PC16: {
uint32_t FinalAddress = (Section.LoadAddress + Offset);
Insn &= 0xffff0000;
Insn |= ((Value - FinalAddress) >> 2) & 0xffff;
writeBytesUnaligned(Insn, TargetPtr, 4);
break;
}
case ELF::R_MIPS_PC19_S2: {
uint32_t FinalAddress = (Section.LoadAddress + Offset);
Insn &= 0xfff80000;
Insn |= ((Value - (FinalAddress & ~0x3)) >> 2) & 0x7ffff;
writeBytesUnaligned(Insn, TargetPtr, 4);
break;
}
case ELF::R_MIPS_PC21_S2: {
uint32_t FinalAddress = (Section.LoadAddress + Offset);
Insn &= 0xffe00000;
Insn |= ((Value - FinalAddress) >> 2) & 0x1fffff;
writeBytesUnaligned(Insn, TargetPtr, 4);
break;
}
case ELF::R_MIPS_PC26_S2: {
uint32_t FinalAddress = (Section.LoadAddress + Offset);
Insn &= 0xfc000000;
Insn |= ((Value - FinalAddress) >> 2) & 0x3ffffff;
writeBytesUnaligned(Insn, TargetPtr, 4);
break;
}
case ELF::R_MIPS_PCHI16: {
uint32_t FinalAddress = (Section.LoadAddress + Offset);
Insn &= 0xffff0000;
Insn |= ((Value - FinalAddress + 0x8000) >> 16) & 0xffff;
writeBytesUnaligned(Insn, TargetPtr, 4);
break;
}
case ELF::R_MIPS_PCLO16: {
uint32_t FinalAddress = (Section.LoadAddress + Offset);
Insn &= 0xffff0000;
Insn |= (Value - FinalAddress) & 0xffff;
writeBytesUnaligned(Insn, TargetPtr, 4);
break;
}
}
}
void RuntimeDyldELF::setMipsABI(const ObjectFile &Obj) {
if (Arch == Triple::UnknownArch ||
!StringRef(Triple::getArchTypePrefix(Arch)).equals("mips")) {
IsMipsO32ABI = false;
IsMipsN64ABI = false;
return;
}
unsigned AbiVariant;
Obj.getPlatformFlags(AbiVariant);
IsMipsO32ABI = AbiVariant & ELF::EF_MIPS_ABI_O32;
IsMipsN64ABI = Obj.getFileFormatName().equals("ELF64-mips");
if (AbiVariant & ELF::EF_MIPS_ABI2)
llvm_unreachable("Mips N32 ABI is not supported yet");
}
void RuntimeDyldELF::resolveMIPS64Relocation(const SectionEntry &Section,
uint64_t Offset, uint64_t Value,
uint32_t Type, int64_t Addend,
uint64_t SymOffset,
SID SectionID) {
uint32_t r_type = Type & 0xff;
uint32_t r_type2 = (Type >> 8) & 0xff;
uint32_t r_type3 = (Type >> 16) & 0xff;
// RelType is used to keep information for which relocation type we are
// applying relocation.
uint32_t RelType = r_type;
int64_t CalculatedValue = evaluateMIPS64Relocation(Section, Offset, Value,
RelType, Addend,
SymOffset, SectionID);
if (r_type2 != ELF::R_MIPS_NONE) {
RelType = r_type2;
CalculatedValue = evaluateMIPS64Relocation(Section, Offset, 0, RelType,
CalculatedValue, SymOffset,
SectionID);
}
if (r_type3 != ELF::R_MIPS_NONE) {
RelType = r_type3;
CalculatedValue = evaluateMIPS64Relocation(Section, Offset, 0, RelType,
CalculatedValue, SymOffset,
SectionID);
}
applyMIPS64Relocation(Section.Address + Offset, CalculatedValue, RelType);
}
int64_t
RuntimeDyldELF::evaluateMIPS64Relocation(const SectionEntry &Section,
uint64_t Offset, uint64_t Value,
uint32_t Type, int64_t Addend,
uint64_t SymOffset, SID SectionID) {
DEBUG(dbgs() << "evaluateMIPS64Relocation, LocalAddress: 0x"
<< format("%llx", Section.Address + Offset)
<< " FinalAddress: 0x"
<< format("%llx", Section.LoadAddress + Offset)
<< " Value: 0x" << format("%llx", Value) << " Type: 0x"
<< format("%x", Type) << " Addend: 0x" << format("%llx", Addend)
<< " SymOffset: " << format("%x", SymOffset)
<< "\n");
switch (Type) {
default:
llvm_unreachable("Not implemented relocation type!");
break;
case ELF::R_MIPS_JALR:
case ELF::R_MIPS_NONE:
break;
case ELF::R_MIPS_32:
case ELF::R_MIPS_64:
return Value + Addend;
case ELF::R_MIPS_26:
return ((Value + Addend) >> 2) & 0x3ffffff;
case ELF::R_MIPS_GPREL16: {
uint64_t GOTAddr = getSectionLoadAddress(SectionToGOTMap[SectionID]);
return Value + Addend - (GOTAddr + 0x7ff0);
}
case ELF::R_MIPS_SUB:
return Value - Addend;
case ELF::R_MIPS_HI16:
// Get the higher 16-bits. Also add 1 if bit 15 is 1.
return ((Value + Addend + 0x8000) >> 16) & 0xffff;
case ELF::R_MIPS_LO16:
return (Value + Addend) & 0xffff;
case ELF::R_MIPS_CALL16:
case ELF::R_MIPS_GOT_DISP:
case ELF::R_MIPS_GOT_PAGE: {
uint8_t *LocalGOTAddr =
getSectionAddress(SectionToGOTMap[SectionID]) + SymOffset;
uint64_t GOTEntry = readBytesUnaligned(LocalGOTAddr, 8);
Value += Addend;
if (Type == ELF::R_MIPS_GOT_PAGE)
Value = (Value + 0x8000) & ~0xffff;
if (GOTEntry)
assert(GOTEntry == Value &&
"GOT entry has two different addresses.");
else
writeBytesUnaligned(Value, LocalGOTAddr, 8);
return (SymOffset - 0x7ff0) & 0xffff;
}
case ELF::R_MIPS_GOT_OFST: {
int64_t page = (Value + Addend + 0x8000) & ~0xffff;
return (Value + Addend - page) & 0xffff;
}
case ELF::R_MIPS_GPREL32: {
uint64_t GOTAddr = getSectionLoadAddress(SectionToGOTMap[SectionID]);
return Value + Addend - (GOTAddr + 0x7ff0);
}
case ELF::R_MIPS_PC16: {
uint64_t FinalAddress = (Section.LoadAddress + Offset);
return ((Value + Addend - FinalAddress) >> 2) & 0xffff;
}
case ELF::R_MIPS_PC32: {
uint64_t FinalAddress = (Section.LoadAddress + Offset);
return Value + Addend - FinalAddress;
}
case ELF::R_MIPS_PC18_S3: {
uint64_t FinalAddress = (Section.LoadAddress + Offset);
return ((Value + Addend - ((FinalAddress | 7) ^ 7)) >> 3) & 0x3ffff;
}
case ELF::R_MIPS_PC19_S2: {
uint64_t FinalAddress = (Section.LoadAddress + Offset);
return ((Value + Addend - FinalAddress) >> 2) & 0x7ffff;
}
case ELF::R_MIPS_PC21_S2: {
uint64_t FinalAddress = (Section.LoadAddress + Offset);
return ((Value + Addend - FinalAddress) >> 2) & 0x1fffff;
}
case ELF::R_MIPS_PC26_S2: {
uint64_t FinalAddress = (Section.LoadAddress + Offset);
return ((Value + Addend - FinalAddress) >> 2) & 0x3ffffff;
}
case ELF::R_MIPS_PCHI16: {
uint64_t FinalAddress = (Section.LoadAddress + Offset);
return ((Value + Addend - FinalAddress + 0x8000) >> 16) & 0xffff;
}
case ELF::R_MIPS_PCLO16: {
uint64_t FinalAddress = (Section.LoadAddress + Offset);
return (Value + Addend - FinalAddress) & 0xffff;
}
}
return 0;
}
void RuntimeDyldELF::applyMIPS64Relocation(uint8_t *TargetPtr,
int64_t CalculatedValue,
uint32_t Type) {
uint32_t Insn = readBytesUnaligned(TargetPtr, 4);
switch (Type) {
default:
break;
case ELF::R_MIPS_32:
case ELF::R_MIPS_GPREL32:
case ELF::R_MIPS_PC32:
writeBytesUnaligned(CalculatedValue & 0xffffffff, TargetPtr, 4);
break;
case ELF::R_MIPS_64:
case ELF::R_MIPS_SUB:
writeBytesUnaligned(CalculatedValue, TargetPtr, 8);
break;
case ELF::R_MIPS_26:
case ELF::R_MIPS_PC26_S2:
Insn = (Insn & 0xfc000000) | CalculatedValue;
writeBytesUnaligned(Insn, TargetPtr, 4);
break;
case ELF::R_MIPS_GPREL16:
Insn = (Insn & 0xffff0000) | (CalculatedValue & 0xffff);
writeBytesUnaligned(Insn, TargetPtr, 4);
break;
case ELF::R_MIPS_HI16:
case ELF::R_MIPS_LO16:
case ELF::R_MIPS_PCHI16:
case ELF::R_MIPS_PCLO16:
case ELF::R_MIPS_PC16:
case ELF::R_MIPS_CALL16:
case ELF::R_MIPS_GOT_DISP:
case ELF::R_MIPS_GOT_PAGE:
case ELF::R_MIPS_GOT_OFST:
Insn = (Insn & 0xffff0000) | CalculatedValue;
writeBytesUnaligned(Insn, TargetPtr, 4);
break;
case ELF::R_MIPS_PC18_S3:
Insn = (Insn & 0xfffc0000) | CalculatedValue;
writeBytesUnaligned(Insn, TargetPtr, 4);
break;
case ELF::R_MIPS_PC19_S2:
Insn = (Insn & 0xfff80000) | CalculatedValue;
writeBytesUnaligned(Insn, TargetPtr, 4);
break;
case ELF::R_MIPS_PC21_S2:
Insn = (Insn & 0xffe00000) | CalculatedValue;
writeBytesUnaligned(Insn, TargetPtr, 4);
break;
}
}
// Return the .TOC. section and offset.
void RuntimeDyldELF::findPPC64TOCSection(const ELFObjectFileBase &Obj,
ObjSectionToIDMap &LocalSections,
RelocationValueRef &Rel) {
// Set a default SectionID in case we do not find a TOC section below.
// This may happen for references to TOC base base (sym@toc, .odp
// relocation) without a .toc directive. In this case just use the
// first section (which is usually the .odp) since the code won't
// reference the .toc base directly.
Rel.SymbolName = NULL;
Rel.SectionID = 0;
// The TOC consists of sections .got, .toc, .tocbss, .plt in that
// order. The TOC starts where the first of these sections starts.
for (auto &Section: Obj.sections()) {
StringRef SectionName;
check(Section.getName(SectionName));
if (SectionName == ".got"
|| SectionName == ".toc"
|| SectionName == ".tocbss"
|| SectionName == ".plt") {
Rel.SectionID = findOrEmitSection(Obj, Section, false, LocalSections);
break;
}
}
// Per the ppc64-elf-linux ABI, The TOC base is TOC value plus 0x8000
// thus permitting a full 64 Kbytes segment.
Rel.Addend = 0x8000;
}
// Returns the sections and offset associated with the ODP entry referenced
// by Symbol.
void RuntimeDyldELF::findOPDEntrySection(const ELFObjectFileBase &Obj,
ObjSectionToIDMap &LocalSections,
RelocationValueRef &Rel) {
// Get the ELF symbol value (st_value) to compare with Relocation offset in
// .opd entries
for (section_iterator si = Obj.section_begin(), se = Obj.section_end();
si != se; ++si) {
section_iterator RelSecI = si->getRelocatedSection();
if (RelSecI == Obj.section_end())
continue;
StringRef RelSectionName;
check(RelSecI->getName(RelSectionName));
if (RelSectionName != ".opd")
continue;
for (elf_relocation_iterator i = si->relocation_begin(),
e = si->relocation_end();
i != e;) {
// The R_PPC64_ADDR64 relocation indicates the first field
// of a .opd entry
uint64_t TypeFunc = i->getType();
if (TypeFunc != ELF::R_PPC64_ADDR64) {
++i;
continue;
}
uint64_t TargetSymbolOffset = i->getOffset();
symbol_iterator TargetSymbol = i->getSymbol();
ErrorOr<int64_t> AddendOrErr = i->getAddend();
Check(AddendOrErr.getError());
int64_t Addend = *AddendOrErr;
++i;
if (i == e)
break;
// Just check if following relocation is a R_PPC64_TOC
uint64_t TypeTOC = i->getType();
if (TypeTOC != ELF::R_PPC64_TOC)
continue;
// Finally compares the Symbol value and the target symbol offset
// to check if this .opd entry refers to the symbol the relocation
// points to.
if (Rel.Addend != (int64_t)TargetSymbolOffset)
continue;
section_iterator tsi(Obj.section_end());
check(TargetSymbol->getSection(tsi));
bool IsCode = tsi->isText();
Rel.SectionID = findOrEmitSection(Obj, (*tsi), IsCode, LocalSections);
Rel.Addend = (intptr_t)Addend;
return;
}
}
llvm_unreachable("Attempting to get address of ODP entry!");
}
// Relocation masks following the #lo(value), #hi(value), #ha(value),
// #higher(value), #highera(value), #highest(value), and #highesta(value)
// macros defined in section 4.5.1. Relocation Types of the PPC-elf64abi
// document.
static inline uint16_t applyPPClo(uint64_t value) { return value & 0xffff; }
static inline uint16_t applyPPChi(uint64_t value) {
return (value >> 16) & 0xffff;
}
static inline uint16_t applyPPCha (uint64_t value) {
return ((value + 0x8000) >> 16) & 0xffff;
}
static inline uint16_t applyPPChigher(uint64_t value) {
return (value >> 32) & 0xffff;
}
static inline uint16_t applyPPChighera (uint64_t value) {
return ((value + 0x8000) >> 32) & 0xffff;
}
static inline uint16_t applyPPChighest(uint64_t value) {
return (value >> 48) & 0xffff;
}
static inline uint16_t applyPPChighesta (uint64_t value) {
return ((value + 0x8000) >> 48) & 0xffff;
}
void RuntimeDyldELF::resolvePPC64Relocation(const SectionEntry &Section,
uint64_t Offset, uint64_t Value,
uint32_t Type, int64_t Addend) {
uint8_t *LocalAddress = Section.Address + Offset;
switch (Type) {
default:
llvm_unreachable("Relocation type not implemented yet!");
break;
case ELF::R_PPC64_ADDR16:
writeInt16BE(LocalAddress, applyPPClo(Value + Addend));
break;
case ELF::R_PPC64_ADDR16_DS:
writeInt16BE(LocalAddress, applyPPClo(Value + Addend) & ~3);
break;
case ELF::R_PPC64_ADDR16_LO:
writeInt16BE(LocalAddress, applyPPClo(Value + Addend));
break;
case ELF::R_PPC64_ADDR16_LO_DS:
writeInt16BE(LocalAddress, applyPPClo(Value + Addend) & ~3);
break;
case ELF::R_PPC64_ADDR16_HI:
writeInt16BE(LocalAddress, applyPPChi(Value + Addend));
break;
case ELF::R_PPC64_ADDR16_HA:
writeInt16BE(LocalAddress, applyPPCha(Value + Addend));
break;
case ELF::R_PPC64_ADDR16_HIGHER:
writeInt16BE(LocalAddress, applyPPChigher(Value + Addend));
break;
case ELF::R_PPC64_ADDR16_HIGHERA:
writeInt16BE(LocalAddress, applyPPChighera(Value + Addend));
break;
case ELF::R_PPC64_ADDR16_HIGHEST:
writeInt16BE(LocalAddress, applyPPChighest(Value + Addend));
break;
case ELF::R_PPC64_ADDR16_HIGHESTA:
writeInt16BE(LocalAddress, applyPPChighesta(Value + Addend));
break;
case ELF::R_PPC64_ADDR14: {
assert(((Value + Addend) & 3) == 0);
// Preserve the AA/LK bits in the branch instruction
uint8_t aalk = *(LocalAddress + 3);
writeInt16BE(LocalAddress + 2, (aalk & 3) | ((Value + Addend) & 0xfffc));
} break;
case ELF::R_PPC64_REL16_LO: {
uint64_t FinalAddress = (Section.LoadAddress + Offset);
uint64_t Delta = Value - FinalAddress + Addend;
writeInt16BE(LocalAddress, applyPPClo(Delta));
} break;
case ELF::R_PPC64_REL16_HI: {
uint64_t FinalAddress = (Section.LoadAddress + Offset);
uint64_t Delta = Value - FinalAddress + Addend;
writeInt16BE(LocalAddress, applyPPChi(Delta));
} break;
case ELF::R_PPC64_REL16_HA: {
uint64_t FinalAddress = (Section.LoadAddress + Offset);
uint64_t Delta = Value - FinalAddress + Addend;
writeInt16BE(LocalAddress, applyPPCha(Delta));
} break;
case ELF::R_PPC64_ADDR32: {
int32_t Result = static_cast<int32_t>(Value + Addend);
if (SignExtend32<32>(Result) != Result)
llvm_unreachable("Relocation R_PPC64_ADDR32 overflow");
writeInt32BE(LocalAddress, Result);
} break;
case ELF::R_PPC64_REL24: {
uint64_t FinalAddress = (Section.LoadAddress + Offset);
int32_t delta = static_cast<int32_t>(Value - FinalAddress + Addend);
if (SignExtend32<24>(delta) != delta)
llvm_unreachable("Relocation R_PPC64_REL24 overflow");
// Generates a 'bl <address>' instruction
writeInt32BE(LocalAddress, 0x48000001 | (delta & 0x03FFFFFC));
} break;
case ELF::R_PPC64_REL32: {
uint64_t FinalAddress = (Section.LoadAddress + Offset);
int32_t delta = static_cast<int32_t>(Value - FinalAddress + Addend);
if (SignExtend32<32>(delta) != delta)
llvm_unreachable("Relocation R_PPC64_REL32 overflow");
writeInt32BE(LocalAddress, delta);
} break;
case ELF::R_PPC64_REL64: {
uint64_t FinalAddress = (Section.LoadAddress + Offset);
uint64_t Delta = Value - FinalAddress + Addend;
writeInt64BE(LocalAddress, Delta);
} break;
case ELF::R_PPC64_ADDR64:
writeInt64BE(LocalAddress, Value + Addend);
break;
}
}
void RuntimeDyldELF::resolveSystemZRelocation(const SectionEntry &Section,
uint64_t Offset, uint64_t Value,
uint32_t Type, int64_t Addend) {
uint8_t *LocalAddress = Section.Address + Offset;
switch (Type) {
default:
llvm_unreachable("Relocation type not implemented yet!");
break;
case ELF::R_390_PC16DBL:
case ELF::R_390_PLT16DBL: {
int64_t Delta = (Value + Addend) - (Section.LoadAddress + Offset);
assert(int16_t(Delta / 2) * 2 == Delta && "R_390_PC16DBL overflow");
writeInt16BE(LocalAddress, Delta / 2);
break;
}
case ELF::R_390_PC32DBL:
case ELF::R_390_PLT32DBL: {
int64_t Delta = (Value + Addend) - (Section.LoadAddress + Offset);
assert(int32_t(Delta / 2) * 2 == Delta && "R_390_PC32DBL overflow");
writeInt32BE(LocalAddress, Delta / 2);
break;
}
case ELF::R_390_PC32: {
int64_t Delta = (Value + Addend) - (Section.LoadAddress + Offset);
assert(int32_t(Delta) == Delta && "R_390_PC32 overflow");
writeInt32BE(LocalAddress, Delta);
break;
}
case ELF::R_390_64:
writeInt64BE(LocalAddress, Value + Addend);
break;
}
}
// The target location for the relocation is described by RE.SectionID and
// RE.Offset. RE.SectionID can be used to find the SectionEntry. Each
// SectionEntry has three members describing its location.
// SectionEntry::Address is the address at which the section has been loaded
// into memory in the current (host) process. SectionEntry::LoadAddress is the
// address that the section will have in the target process.
// SectionEntry::ObjAddress is the address of the bits for this section in the
// original emitted object image (also in the current address space).
//
// Relocations will be applied as if the section were loaded at
// SectionEntry::LoadAddress, but they will be applied at an address based
// on SectionEntry::Address. SectionEntry::ObjAddress will be used to refer to
// Target memory contents if they are required for value calculations.
//
// The Value parameter here is the load address of the symbol for the
// relocation to be applied. For relocations which refer to symbols in the
// current object Value will be the LoadAddress of the section in which
// the symbol resides (RE.Addend provides additional information about the
// symbol location). For external symbols, Value will be the address of the
// symbol in the target address space.
void RuntimeDyldELF::resolveRelocation(const RelocationEntry &RE,
uint64_t Value) {
const SectionEntry &Section = Sections[RE.SectionID];
return resolveRelocation(Section, RE.Offset, Value, RE.RelType, RE.Addend,
RE.SymOffset, RE.SectionID);
}
void RuntimeDyldELF::resolveRelocation(const SectionEntry &Section,
uint64_t Offset, uint64_t Value,
uint32_t Type, int64_t Addend,
uint64_t SymOffset, SID SectionID) {
switch (Arch) {
case Triple::x86_64:
resolveX86_64Relocation(Section, Offset, Value, Type, Addend, SymOffset);
break;
case Triple::x86:
resolveX86Relocation(Section, Offset, (uint32_t)(Value & 0xffffffffL), Type,
(uint32_t)(Addend & 0xffffffffL));
break;
case Triple::aarch64:
case Triple::aarch64_be:
resolveAArch64Relocation(Section, Offset, Value, Type, Addend);
break;
case Triple::arm: // Fall through.
case Triple::armeb:
case Triple::thumb:
case Triple::thumbeb:
resolveARMRelocation(Section, Offset, (uint32_t)(Value & 0xffffffffL), Type,
(uint32_t)(Addend & 0xffffffffL));
break;
case Triple::mips: // Fall through.
case Triple::mipsel:
case Triple::mips64:
case Triple::mips64el:
if (IsMipsO32ABI)
resolveMIPSRelocation(Section, Offset, (uint32_t)(Value & 0xffffffffL),
Type, (uint32_t)(Addend & 0xffffffffL));
else if (IsMipsN64ABI)
resolveMIPS64Relocation(Section, Offset, Value, Type, Addend, SymOffset,
SectionID);
else
llvm_unreachable("Mips ABI not handled");
break;
case Triple::ppc64: // Fall through.
case Triple::ppc64le:
resolvePPC64Relocation(Section, Offset, Value, Type, Addend);
break;
case Triple::systemz:
resolveSystemZRelocation(Section, Offset, Value, Type, Addend);
break;
default:
llvm_unreachable("Unsupported CPU type!");
}
}
void *RuntimeDyldELF::computePlaceholderAddress(unsigned SectionID, uint64_t Offset) const {
return (void*)(Sections[SectionID].ObjAddress + Offset);
}
void RuntimeDyldELF::processSimpleRelocation(unsigned SectionID, uint64_t Offset, unsigned RelType, RelocationValueRef Value) {
RelocationEntry RE(SectionID, Offset, RelType, Value.Addend, Value.Offset);
if (Value.SymbolName)
addRelocationForSymbol(RE, Value.SymbolName);
else
addRelocationForSection(RE, Value.SectionID);
}
relocation_iterator RuntimeDyldELF::processRelocationRef(
unsigned SectionID, relocation_iterator RelI, const ObjectFile &O,
ObjSectionToIDMap &ObjSectionToID, StubMap &Stubs) {
const auto &Obj = cast<ELFObjectFileBase>(O);
uint64_t RelType = RelI->getType();
ErrorOr<int64_t> AddendOrErr = ELFRelocationRef(*RelI).getAddend();
int64_t Addend = AddendOrErr ? *AddendOrErr : 0;
elf_symbol_iterator Symbol = RelI->getSymbol();
// Obtain the symbol name which is referenced in the relocation
StringRef TargetName;
if (Symbol != Obj.symbol_end()) {
ErrorOr<StringRef> TargetNameOrErr = Symbol->getName();
if (std::error_code EC = TargetNameOrErr.getError())
report_fatal_error(EC.message());
TargetName = *TargetNameOrErr;
}
DEBUG(dbgs() << "\t\tRelType: " << RelType << " Addend: " << Addend
<< " TargetName: " << TargetName << "\n");
RelocationValueRef Value;
// First search for the symbol in the local symbol table
SymbolRef::Type SymType = SymbolRef::ST_Unknown;
// Search for the symbol in the global symbol table
RTDyldSymbolTable::const_iterator gsi = GlobalSymbolTable.end();
if (Symbol != Obj.symbol_end()) {
gsi = GlobalSymbolTable.find(TargetName.data());
SymType = Symbol->getType();
}
if (gsi != GlobalSymbolTable.end()) {
const auto &SymInfo = gsi->second;
Value.SectionID = SymInfo.getSectionID();
Value.Offset = SymInfo.getOffset();
Value.Addend = SymInfo.getOffset() + Addend;
} else {
switch (SymType) {
case SymbolRef::ST_Debug: {
// TODO: Now ELF SymbolRef::ST_Debug = STT_SECTION, it's not obviously
// and can be changed by another developers. Maybe best way is add
// a new symbol type ST_Section to SymbolRef and use it.
section_iterator si(Obj.section_end());
Symbol->getSection(si);
if (si == Obj.section_end())
llvm_unreachable("Symbol section not found, bad object file format!");
DEBUG(dbgs() << "\t\tThis is section symbol\n");
bool isCode = si->isText();
Value.SectionID = findOrEmitSection(Obj, (*si), isCode, ObjSectionToID);
Value.Addend = Addend;
break;
}
case SymbolRef::ST_Data:
case SymbolRef::ST_Unknown: {
Value.SymbolName = TargetName.data();
Value.Addend = Addend;
// Absolute relocations will have a zero symbol ID (STN_UNDEF), which
// will manifest here as a NULL symbol name.
// We can set this as a valid (but empty) symbol name, and rely
// on addRelocationForSymbol to handle this.
if (!Value.SymbolName)
Value.SymbolName = "";
break;
}
default:
llvm_unreachable("Unresolved symbol type!");
break;
}
}
uint64_t Offset = RelI->getOffset();
DEBUG(dbgs() << "\t\tSectionID: " << SectionID << " Offset: " << Offset
<< "\n");
if ((Arch == Triple::aarch64 || Arch == Triple::aarch64_be) &&
(RelType == ELF::R_AARCH64_CALL26 || RelType == ELF::R_AARCH64_JUMP26)) {
// This is an AArch64 branch relocation, need to use a stub function.
DEBUG(dbgs() << "\t\tThis is an AArch64 branch relocation.");
SectionEntry &Section = Sections[SectionID];
// Look for an existing stub.
StubMap::const_iterator i = Stubs.find(Value);
if (i != Stubs.end()) {
resolveRelocation(Section, Offset, (uint64_t)Section.Address + i->second,
RelType, 0);
DEBUG(dbgs() << " Stub function found\n");
} else {
// Create a new stub function.
DEBUG(dbgs() << " Create a new stub function\n");
Stubs[Value] = Section.StubOffset;
uint8_t *StubTargetAddr =
createStubFunction(Section.Address + Section.StubOffset);
RelocationEntry REmovz_g3(SectionID, StubTargetAddr - Section.Address,
ELF::R_AARCH64_MOVW_UABS_G3, Value.Addend);
RelocationEntry REmovk_g2(SectionID, StubTargetAddr - Section.Address + 4,
ELF::R_AARCH64_MOVW_UABS_G2_NC, Value.Addend);
RelocationEntry REmovk_g1(SectionID, StubTargetAddr - Section.Address + 8,
ELF::R_AARCH64_MOVW_UABS_G1_NC, Value.Addend);
RelocationEntry REmovk_g0(SectionID,
StubTargetAddr - Section.Address + 12,
ELF::R_AARCH64_MOVW_UABS_G0_NC, Value.Addend);
if (Value.SymbolName) {
addRelocationForSymbol(REmovz_g3, Value.SymbolName);
addRelocationForSymbol(REmovk_g2, Value.SymbolName);
addRelocationForSymbol(REmovk_g1, Value.SymbolName);
addRelocationForSymbol(REmovk_g0, Value.SymbolName);
} else {
addRelocationForSection(REmovz_g3, Value.SectionID);
addRelocationForSection(REmovk_g2, Value.SectionID);
addRelocationForSection(REmovk_g1, Value.SectionID);
addRelocationForSection(REmovk_g0, Value.SectionID);
}
resolveRelocation(Section, Offset,
(uint64_t)Section.Address + Section.StubOffset, RelType,
0);
Section.StubOffset += getMaxStubSize();
}
} else if (Arch == Triple::arm) {
if (RelType == ELF::R_ARM_PC24 || RelType == ELF::R_ARM_CALL ||
RelType == ELF::R_ARM_JUMP24) {
// This is an ARM branch relocation, need to use a stub function.
DEBUG(dbgs() << "\t\tThis is an ARM branch relocation.");
SectionEntry &Section = Sections[SectionID];
// Look for an existing stub.
StubMap::const_iterator i = Stubs.find(Value);
if (i != Stubs.end()) {
resolveRelocation(Section, Offset, (uint64_t)Section.Address + i->second,
RelType, 0);
DEBUG(dbgs() << " Stub function found\n");
} else {
// Create a new stub function.
DEBUG(dbgs() << " Create a new stub function\n");
Stubs[Value] = Section.StubOffset;
uint8_t *StubTargetAddr =
createStubFunction(Section.Address + Section.StubOffset);
RelocationEntry RE(SectionID, StubTargetAddr - Section.Address,
ELF::R_ARM_ABS32, Value.Addend);
if (Value.SymbolName)
addRelocationForSymbol(RE, Value.SymbolName);
else
addRelocationForSection(RE, Value.SectionID);
resolveRelocation(Section, Offset,
(uint64_t)Section.Address + Section.StubOffset, RelType,
0);
Section.StubOffset += getMaxStubSize();
}
} else {
uint32_t *Placeholder =
reinterpret_cast<uint32_t*>(computePlaceholderAddress(SectionID, Offset));
if (RelType == ELF::R_ARM_PREL31 || RelType == ELF::R_ARM_TARGET1 ||
RelType == ELF::R_ARM_ABS32) {
Value.Addend += *Placeholder;
} else if (RelType == ELF::R_ARM_MOVW_ABS_NC || RelType == ELF::R_ARM_MOVT_ABS) {
// See ELF for ARM documentation
Value.Addend += (int16_t)((*Placeholder & 0xFFF) | (((*Placeholder >> 16) & 0xF) << 12));
}
processSimpleRelocation(SectionID, Offset, RelType, Value);
}
} else if (IsMipsO32ABI) {
uint8_t *Placeholder = reinterpret_cast<uint8_t *>(
computePlaceholderAddress(SectionID, Offset));
uint32_t Opcode = readBytesUnaligned(Placeholder, 4);
if (RelType == ELF::R_MIPS_26) {
// This is an Mips branch relocation, need to use a stub function.
DEBUG(dbgs() << "\t\tThis is a Mips branch relocation.");
SectionEntry &Section = Sections[SectionID];
// Extract the addend from the instruction.
// We shift up by two since the Value will be down shifted again
// when applying the relocation.
uint32_t Addend = (Opcode & 0x03ffffff) << 2;
Value.Addend += Addend;
// Look up for existing stub.
StubMap::const_iterator i = Stubs.find(Value);
if (i != Stubs.end()) {
RelocationEntry RE(SectionID, Offset, RelType, i->second);
addRelocationForSection(RE, SectionID);
DEBUG(dbgs() << " Stub function found\n");
} else {
// Create a new stub function.
DEBUG(dbgs() << " Create a new stub function\n");
Stubs[Value] = Section.StubOffset;
uint8_t *StubTargetAddr =
createStubFunction(Section.Address + Section.StubOffset);
// Creating Hi and Lo relocations for the filled stub instructions.
RelocationEntry REHi(SectionID, StubTargetAddr - Section.Address,
ELF::R_MIPS_HI16, Value.Addend);
RelocationEntry RELo(SectionID, StubTargetAddr - Section.Address + 4,
ELF::R_MIPS_LO16, Value.Addend);
if (Value.SymbolName) {
addRelocationForSymbol(REHi, Value.SymbolName);
addRelocationForSymbol(RELo, Value.SymbolName);
}
else {
addRelocationForSection(REHi, Value.SectionID);
addRelocationForSection(RELo, Value.SectionID);
}
RelocationEntry RE(SectionID, Offset, RelType, Section.StubOffset);
addRelocationForSection(RE, SectionID);
Section.StubOffset += getMaxStubSize();
}
} else {
// FIXME: Calculate correct addends for R_MIPS_HI16, R_MIPS_LO16,
// R_MIPS_PCHI16 and R_MIPS_PCLO16 relocations.
if (RelType == ELF::R_MIPS_HI16 || RelType == ELF::R_MIPS_PCHI16)
Value.Addend += (Opcode & 0x0000ffff) << 16;
else if (RelType == ELF::R_MIPS_LO16)
Value.Addend += (Opcode & 0x0000ffff);
else if (RelType == ELF::R_MIPS_32)
Value.Addend += Opcode;
else if (RelType == ELF::R_MIPS_PCLO16)
Value.Addend += SignExtend32<16>((Opcode & 0x0000ffff));
else if (RelType == ELF::R_MIPS_PC16)
Value.Addend += SignExtend32<18>((Opcode & 0x0000ffff) << 2);
else if (RelType == ELF::R_MIPS_PC19_S2)
Value.Addend += SignExtend32<21>((Opcode & 0x0007ffff) << 2);
else if (RelType == ELF::R_MIPS_PC21_S2)
Value.Addend += SignExtend32<23>((Opcode & 0x001fffff) << 2);
else if (RelType == ELF::R_MIPS_PC26_S2)
Value.Addend += SignExtend32<28>((Opcode & 0x03ffffff) << 2);
processSimpleRelocation(SectionID, Offset, RelType, Value);
}
} else if (IsMipsN64ABI) {
uint32_t r_type = RelType & 0xff;
RelocationEntry RE(SectionID, Offset, RelType, Value.Addend);
if (r_type == ELF::R_MIPS_CALL16 || r_type == ELF::R_MIPS_GOT_PAGE
|| r_type == ELF::R_MIPS_GOT_DISP) {
StringMap<uint64_t>::iterator i = GOTSymbolOffsets.find(TargetName);
if (i != GOTSymbolOffsets.end())
RE.SymOffset = i->second;
else {
RE.SymOffset = allocateGOTEntries(SectionID, 1);
GOTSymbolOffsets[TargetName] = RE.SymOffset;
}
}
if (Value.SymbolName)
addRelocationForSymbol(RE, Value.SymbolName);
else
addRelocationForSection(RE, Value.SectionID);
} else if (Arch == Triple::ppc64 || Arch == Triple::ppc64le) {
if (RelType == ELF::R_PPC64_REL24) {
// Determine ABI variant in use for this object.
unsigned AbiVariant;
Obj.getPlatformFlags(AbiVariant);
AbiVariant &= ELF::EF_PPC64_ABI;
// A PPC branch relocation will need a stub function if the target is
// an external symbol (Symbol::ST_Unknown) or if the target address
// is not within the signed 24-bits branch address.
SectionEntry &Section = Sections[SectionID];
uint8_t *Target = Section.Address + Offset;
bool RangeOverflow = false;
if (SymType != SymbolRef::ST_Unknown) {
if (AbiVariant != 2) {
// In the ELFv1 ABI, a function call may point to the .opd entry,
// so the final symbol value is calculated based on the relocation
// values in the .opd section.
findOPDEntrySection(Obj, ObjSectionToID, Value);
} else {
// In the ELFv2 ABI, a function symbol may provide a local entry
// point, which must be used for direct calls.
uint8_t SymOther = Symbol->getOther();
Value.Addend += ELF::decodePPC64LocalEntryOffset(SymOther);
}
uint8_t *RelocTarget = Sections[Value.SectionID].Address + Value.Addend;
int32_t delta = static_cast<int32_t>(Target - RelocTarget);
// If it is within 24-bits branch range, just set the branch target
if (SignExtend32<24>(delta) == delta) {
RelocationEntry RE(SectionID, Offset, RelType, Value.Addend);
if (Value.SymbolName)
addRelocationForSymbol(RE, Value.SymbolName);
else
addRelocationForSection(RE, Value.SectionID);
} else {
RangeOverflow = true;
}
}
if (SymType == SymbolRef::ST_Unknown || RangeOverflow) {
// It is an external symbol (SymbolRef::ST_Unknown) or within a range
// larger than 24-bits.
StubMap::const_iterator i = Stubs.find(Value);
if (i != Stubs.end()) {
// Symbol function stub already created, just relocate to it
resolveRelocation(Section, Offset,
(uint64_t)Section.Address + i->second, RelType, 0);
DEBUG(dbgs() << " Stub function found\n");
} else {
// Create a new stub function.
DEBUG(dbgs() << " Create a new stub function\n");
Stubs[Value] = Section.StubOffset;
uint8_t *StubTargetAddr =
createStubFunction(Section.Address + Section.StubOffset,
AbiVariant);
RelocationEntry RE(SectionID, StubTargetAddr - Section.Address,
ELF::R_PPC64_ADDR64, Value.Addend);
// Generates the 64-bits address loads as exemplified in section
// 4.5.1 in PPC64 ELF ABI. Note that the relocations need to
// apply to the low part of the instructions, so we have to update
// the offset according to the target endianness.
uint64_t StubRelocOffset = StubTargetAddr - Section.Address;
if (!IsTargetLittleEndian)
StubRelocOffset += 2;
RelocationEntry REhst(SectionID, StubRelocOffset + 0,
ELF::R_PPC64_ADDR16_HIGHEST, Value.Addend);
RelocationEntry REhr(SectionID, StubRelocOffset + 4,
ELF::R_PPC64_ADDR16_HIGHER, Value.Addend);
RelocationEntry REh(SectionID, StubRelocOffset + 12,
ELF::R_PPC64_ADDR16_HI, Value.Addend);
RelocationEntry REl(SectionID, StubRelocOffset + 16,
ELF::R_PPC64_ADDR16_LO, Value.Addend);
if (Value.SymbolName) {
addRelocationForSymbol(REhst, Value.SymbolName);
addRelocationForSymbol(REhr, Value.SymbolName);
addRelocationForSymbol(REh, Value.SymbolName);
addRelocationForSymbol(REl, Value.SymbolName);
} else {
addRelocationForSection(REhst, Value.SectionID);
addRelocationForSection(REhr, Value.SectionID);
addRelocationForSection(REh, Value.SectionID);
addRelocationForSection(REl, Value.SectionID);
}
resolveRelocation(Section, Offset,
(uint64_t)Section.Address + Section.StubOffset,
RelType, 0);
Section.StubOffset += getMaxStubSize();
}
if (SymType == SymbolRef::ST_Unknown) {
// Restore the TOC for external calls
if (AbiVariant == 2)
writeInt32BE(Target + 4, 0xE8410018); // ld r2,28(r1)
else
writeInt32BE(Target + 4, 0xE8410028); // ld r2,40(r1)
}
}
} else if (RelType == ELF::R_PPC64_TOC16 ||
RelType == ELF::R_PPC64_TOC16_DS ||
RelType == ELF::R_PPC64_TOC16_LO ||
RelType == ELF::R_PPC64_TOC16_LO_DS ||
RelType == ELF::R_PPC64_TOC16_HI ||
RelType == ELF::R_PPC64_TOC16_HA) {
// These relocations are supposed to subtract the TOC address from
// the final value. This does not fit cleanly into the RuntimeDyld
// scheme, since there may be *two* sections involved in determining
// the relocation value (the section of the symbol refered to by the
// relocation, and the TOC section associated with the current module).
//
// Fortunately, these relocations are currently only ever generated
// refering to symbols that themselves reside in the TOC, which means
// that the two sections are actually the same. Thus they cancel out
// and we can immediately resolve the relocation right now.
switch (RelType) {
case ELF::R_PPC64_TOC16: RelType = ELF::R_PPC64_ADDR16; break;
case ELF::R_PPC64_TOC16_DS: RelType = ELF::R_PPC64_ADDR16_DS; break;
case ELF::R_PPC64_TOC16_LO: RelType = ELF::R_PPC64_ADDR16_LO; break;
case ELF::R_PPC64_TOC16_LO_DS: RelType = ELF::R_PPC64_ADDR16_LO_DS; break;
case ELF::R_PPC64_TOC16_HI: RelType = ELF::R_PPC64_ADDR16_HI; break;
case ELF::R_PPC64_TOC16_HA: RelType = ELF::R_PPC64_ADDR16_HA; break;
default: llvm_unreachable("Wrong relocation type.");
}
RelocationValueRef TOCValue;
findPPC64TOCSection(Obj, ObjSectionToID, TOCValue);
if (Value.SymbolName || Value.SectionID != TOCValue.SectionID)
llvm_unreachable("Unsupported TOC relocation.");
Value.Addend -= TOCValue.Addend;
resolveRelocation(Sections[SectionID], Offset, Value.Addend, RelType, 0);
} else {
// There are two ways to refer to the TOC address directly: either
// via a ELF::R_PPC64_TOC relocation (where both symbol and addend are
// ignored), or via any relocation that refers to the magic ".TOC."
// symbols (in which case the addend is respected).
if (RelType == ELF::R_PPC64_TOC) {
RelType = ELF::R_PPC64_ADDR64;
findPPC64TOCSection(Obj, ObjSectionToID, Value);
} else if (TargetName == ".TOC.") {
findPPC64TOCSection(Obj, ObjSectionToID, Value);
Value.Addend += Addend;
}
RelocationEntry RE(SectionID, Offset, RelType, Value.Addend);
if (Value.SymbolName)
addRelocationForSymbol(RE, Value.SymbolName);
else
addRelocationForSection(RE, Value.SectionID);
}
} else if (Arch == Triple::systemz &&
(RelType == ELF::R_390_PLT32DBL || RelType == ELF::R_390_GOTENT)) {
// Create function stubs for both PLT and GOT references, regardless of
// whether the GOT reference is to data or code. The stub contains the
// full address of the symbol, as needed by GOT references, and the
// executable part only adds an overhead of 8 bytes.
//
// We could try to conserve space by allocating the code and data
// parts of the stub separately. However, as things stand, we allocate
// a stub for every relocation, so using a GOT in JIT code should be
// no less space efficient than using an explicit constant pool.
DEBUG(dbgs() << "\t\tThis is a SystemZ indirect relocation.");
SectionEntry &Section = Sections[SectionID];
// Look for an existing stub.
StubMap::const_iterator i = Stubs.find(Value);
uintptr_t StubAddress;
if (i != Stubs.end()) {
StubAddress = uintptr_t(Section.Address) + i->second;
DEBUG(dbgs() << " Stub function found\n");
} else {
// Create a new stub function.
DEBUG(dbgs() << " Create a new stub function\n");
uintptr_t BaseAddress = uintptr_t(Section.Address);
uintptr_t StubAlignment = getStubAlignment();
StubAddress = (BaseAddress + Section.StubOffset + StubAlignment - 1) &
-StubAlignment;
unsigned StubOffset = StubAddress - BaseAddress;
Stubs[Value] = StubOffset;
createStubFunction((uint8_t *)StubAddress);
RelocationEntry RE(SectionID, StubOffset + 8, ELF::R_390_64,
Value.Offset);
if (Value.SymbolName)
addRelocationForSymbol(RE, Value.SymbolName);
else
addRelocationForSection(RE, Value.SectionID);
Section.StubOffset = StubOffset + getMaxStubSize();
}
if (RelType == ELF::R_390_GOTENT)
resolveRelocation(Section, Offset, StubAddress + 8, ELF::R_390_PC32DBL,
Addend);
else
resolveRelocation(Section, Offset, StubAddress, RelType, Addend);
} else if (Arch == Triple::x86_64) {
if (RelType == ELF::R_X86_64_PLT32) {
// The way the PLT relocations normally work is that the linker allocates
// the
// PLT and this relocation makes a PC-relative call into the PLT. The PLT
// entry will then jump to an address provided by the GOT. On first call,
// the
// GOT address will point back into PLT code that resolves the symbol. After
// the first call, the GOT entry points to the actual function.
//
// For local functions we're ignoring all of that here and just replacing
// the PLT32 relocation type with PC32, which will translate the relocation
// into a PC-relative call directly to the function. For external symbols we
// can't be sure the function will be within 2^32 bytes of the call site, so
// we need to create a stub, which calls into the GOT. This case is
// equivalent to the usual PLT implementation except that we use the stub
// mechanism in RuntimeDyld (which puts stubs at the end of the section)
// rather than allocating a PLT section.
if (Value.SymbolName) {
// This is a call to an external function.
// Look for an existing stub.
SectionEntry &Section = Sections[SectionID];
StubMap::const_iterator i = Stubs.find(Value);
uintptr_t StubAddress;
if (i != Stubs.end()) {
StubAddress = uintptr_t(Section.Address) + i->second;
DEBUG(dbgs() << " Stub function found\n");
} else {
// Create a new stub function (equivalent to a PLT entry).
DEBUG(dbgs() << " Create a new stub function\n");
uintptr_t BaseAddress = uintptr_t(Section.Address);
uintptr_t StubAlignment = getStubAlignment();
StubAddress = (BaseAddress + Section.StubOffset + StubAlignment - 1) &
-StubAlignment;
unsigned StubOffset = StubAddress - BaseAddress;
Stubs[Value] = StubOffset;
createStubFunction((uint8_t *)StubAddress);
// Bump our stub offset counter
Section.StubOffset = StubOffset + getMaxStubSize();
// Allocate a GOT Entry
uint64_t GOTOffset = allocateGOTEntries(SectionID, 1);
// The load of the GOT address has an addend of -4
resolveGOTOffsetRelocation(SectionID, StubOffset + 2, GOTOffset - 4);
// Fill in the value of the symbol we're targeting into the GOT
addRelocationForSymbol(computeGOTOffsetRE(SectionID,GOTOffset,0,ELF::R_X86_64_64),
Value.SymbolName);
}
// Make the target call a call into the stub table.
resolveRelocation(Section, Offset, StubAddress, ELF::R_X86_64_PC32,
Addend);
} else {
RelocationEntry RE(SectionID, Offset, ELF::R_X86_64_PC32, Value.Addend,
Value.Offset);
addRelocationForSection(RE, Value.SectionID);
}
} else if (RelType == ELF::R_X86_64_GOTPCREL) {
uint64_t GOTOffset = allocateGOTEntries(SectionID, 1);
resolveGOTOffsetRelocation(SectionID, Offset, GOTOffset + Addend);
// Fill in the value of the symbol we're targeting into the GOT
RelocationEntry RE = computeGOTOffsetRE(SectionID, GOTOffset, Value.Offset, ELF::R_X86_64_64);
if (Value.SymbolName)
addRelocationForSymbol(RE, Value.SymbolName);
else
addRelocationForSection(RE, Value.SectionID);
} else if (RelType == ELF::R_X86_64_PC32) {
Value.Addend += support::ulittle32_t::ref(computePlaceholderAddress(SectionID, Offset));
processSimpleRelocation(SectionID, Offset, RelType, Value);
} else if (RelType == ELF::R_X86_64_PC64) {
Value.Addend += support::ulittle64_t::ref(computePlaceholderAddress(SectionID, Offset));
processSimpleRelocation(SectionID, Offset, RelType, Value);
} else {
processSimpleRelocation(SectionID, Offset, RelType, Value);
}
} else {
if (Arch == Triple::x86) {
Value.Addend += support::ulittle32_t::ref(computePlaceholderAddress(SectionID, Offset));
}
processSimpleRelocation(SectionID, Offset, RelType, Value);
}
return ++RelI;
}
size_t RuntimeDyldELF::getGOTEntrySize() {
// We don't use the GOT in all of these cases, but it's essentially free
// to put them all here.
size_t Result = 0;
switch (Arch) {
case Triple::x86_64:
case Triple::aarch64:
case Triple::aarch64_be:
case Triple::ppc64:
case Triple::ppc64le:
case Triple::systemz:
Result = sizeof(uint64_t);
break;
case Triple::x86:
case Triple::arm:
case Triple::thumb:
Result = sizeof(uint32_t);
break;
case Triple::mips:
case Triple::mipsel:
case Triple::mips64:
case Triple::mips64el:
if (IsMipsO32ABI)
Result = sizeof(uint32_t);
else if (IsMipsN64ABI)
Result = sizeof(uint64_t);
else
llvm_unreachable("Mips ABI not handled");
break;
default:
llvm_unreachable("Unsupported CPU type!");
}
return Result;
}
uint64_t RuntimeDyldELF::allocateGOTEntries(unsigned SectionID, unsigned no)
{
(void)SectionID; // The GOT Section is the same for all section in the object file
if (GOTSectionID == 0) {
GOTSectionID = Sections.size();
// Reserve a section id. We'll allocate the section later
// once we know the total size
Sections.push_back(SectionEntry(".got", 0, 0, 0));
}
uint64_t StartOffset = CurrentGOTIndex * getGOTEntrySize();
CurrentGOTIndex += no;
return StartOffset;
}
void RuntimeDyldELF::resolveGOTOffsetRelocation(unsigned SectionID, uint64_t Offset, uint64_t GOTOffset)
{
// Fill in the relative address of the GOT Entry into the stub
RelocationEntry GOTRE(SectionID, Offset, ELF::R_X86_64_PC32, GOTOffset);
addRelocationForSection(GOTRE, GOTSectionID);
}
RelocationEntry RuntimeDyldELF::computeGOTOffsetRE(unsigned SectionID, uint64_t GOTOffset, uint64_t SymbolOffset,
uint32_t Type)
{
(void)SectionID; // The GOT Section is the same for all section in the object file
return RelocationEntry(GOTSectionID, GOTOffset, Type, SymbolOffset);
}
void RuntimeDyldELF::finalizeLoad(const ObjectFile &Obj,
ObjSectionToIDMap &SectionMap) {
// If necessary, allocate the global offset table
if (GOTSectionID != 0) {
// Allocate memory for the section
size_t TotalSize = CurrentGOTIndex * getGOTEntrySize();
uint8_t *Addr = MemMgr.allocateDataSection(TotalSize, getGOTEntrySize(),
GOTSectionID, ".got", false);
if (!Addr)
report_fatal_error("Unable to allocate memory for GOT!");
Sections[GOTSectionID] = SectionEntry(".got", Addr, TotalSize, 0);
if (Checker)
Checker->registerSection(Obj.getFileName(), GOTSectionID);
// For now, initialize all GOT entries to zero. We'll fill them in as
// needed when GOT-based relocations are applied.
memset(Addr, 0, TotalSize);
if (IsMipsN64ABI) {
// To correctly resolve Mips GOT relocations, we need a mapping from
// object's sections to GOTs.
for (section_iterator SI = Obj.section_begin(), SE = Obj.section_end();
SI != SE; ++SI) {
if (SI->relocation_begin() != SI->relocation_end()) {
section_iterator RelocatedSection = SI->getRelocatedSection();
ObjSectionToIDMap::iterator i = SectionMap.find(*RelocatedSection);
assert (i != SectionMap.end());
SectionToGOTMap[i->second] = GOTSectionID;
}
}
GOTSymbolOffsets.clear();
}
}
// Look for and record the EH frame section.
ObjSectionToIDMap::iterator i, e;
for (i = SectionMap.begin(), e = SectionMap.end(); i != e; ++i) {
const SectionRef &Section = i->first;
StringRef Name;
Section.getName(Name);
if (Name == ".eh_frame") {
UnregisteredEHFrameSections.push_back(i->second);
break;
}
}
GOTSectionID = 0;
CurrentGOTIndex = 0;
}
bool RuntimeDyldELF::isCompatibleFile(const object::ObjectFile &Obj) const {
return Obj.isELF();
}
} // namespace llvm
|
0 | repos/DirectXShaderCompiler/lib/ExecutionEngine/RuntimeDyld | repos/DirectXShaderCompiler/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOI386.h | //===---- RuntimeDyldMachOI386.h ---- MachO/I386 specific code. ---*- C++ -*-=//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_LIB_EXECUTIONENGINE_RUNTIMEDYLD_TARGETS_RUNTIMEDYLDMACHOI386_H
#define LLVM_LIB_EXECUTIONENGINE_RUNTIMEDYLD_TARGETS_RUNTIMEDYLDMACHOI386_H
#include "../RuntimeDyldMachO.h"
#define DEBUG_TYPE "dyld"
namespace llvm {
class RuntimeDyldMachOI386
: public RuntimeDyldMachOCRTPBase<RuntimeDyldMachOI386> {
public:
typedef uint32_t TargetPtrT;
RuntimeDyldMachOI386(RuntimeDyld::MemoryManager &MM,
RuntimeDyld::SymbolResolver &Resolver)
: RuntimeDyldMachOCRTPBase(MM, Resolver) {}
unsigned getMaxStubSize() override { return 0; }
unsigned getStubAlignment() override { return 1; }
relocation_iterator
processRelocationRef(unsigned SectionID, relocation_iterator RelI,
const ObjectFile &BaseObjT,
ObjSectionToIDMap &ObjSectionToID,
StubMap &Stubs) override {
const MachOObjectFile &Obj =
static_cast<const MachOObjectFile &>(BaseObjT);
MachO::any_relocation_info RelInfo =
Obj.getRelocation(RelI->getRawDataRefImpl());
uint32_t RelType = Obj.getAnyRelocationType(RelInfo);
if (Obj.isRelocationScattered(RelInfo)) {
if (RelType == MachO::GENERIC_RELOC_SECTDIFF ||
RelType == MachO::GENERIC_RELOC_LOCAL_SECTDIFF)
return processSECTDIFFRelocation(SectionID, RelI, Obj,
ObjSectionToID);
else if (RelType == MachO::GENERIC_RELOC_VANILLA)
return processI386ScatteredVANILLA(SectionID, RelI, Obj,
ObjSectionToID);
llvm_unreachable("Unhandled scattered relocation.");
}
RelocationEntry RE(getRelocationEntry(SectionID, Obj, RelI));
RE.Addend = memcpyAddend(RE);
RelocationValueRef Value(
getRelocationValueRef(Obj, RelI, RE, ObjSectionToID));
// Addends for external, PC-rel relocations on i386 point back to the zero
// offset. Calculate the final offset from the relocation target instead.
// This allows us to use the same logic for both external and internal
// relocations in resolveI386RelocationRef.
// bool IsExtern = Obj.getPlainRelocationExternal(RelInfo);
// if (IsExtern && RE.IsPCRel) {
// uint64_t RelocAddr = 0;
// RelI->getAddress(RelocAddr);
// Value.Addend += RelocAddr + 4;
// }
if (RE.IsPCRel)
makeValueAddendPCRel(Value, RelI, 1 << RE.Size);
RE.Addend = Value.Offset;
if (Value.SymbolName)
addRelocationForSymbol(RE, Value.SymbolName);
else
addRelocationForSection(RE, Value.SectionID);
return ++RelI;
}
void resolveRelocation(const RelocationEntry &RE, uint64_t Value) override {
DEBUG(dumpRelocationToResolve(RE, Value));
const SectionEntry &Section = Sections[RE.SectionID];
uint8_t *LocalAddress = Section.Address + RE.Offset;
if (RE.IsPCRel) {
uint64_t FinalAddress = Section.LoadAddress + RE.Offset;
Value -= FinalAddress + 4; // see MachOX86_64::resolveRelocation.
}
switch (RE.RelType) {
default:
llvm_unreachable("Invalid relocation type!");
case MachO::GENERIC_RELOC_VANILLA:
writeBytesUnaligned(Value + RE.Addend, LocalAddress, 1 << RE.Size);
break;
case MachO::GENERIC_RELOC_SECTDIFF:
case MachO::GENERIC_RELOC_LOCAL_SECTDIFF: {
uint64_t SectionABase = Sections[RE.Sections.SectionA].LoadAddress;
uint64_t SectionBBase = Sections[RE.Sections.SectionB].LoadAddress;
assert((Value == SectionABase || Value == SectionBBase) &&
"Unexpected SECTDIFF relocation value.");
Value = SectionABase - SectionBBase + RE.Addend;
writeBytesUnaligned(Value, LocalAddress, 1 << RE.Size);
break;
}
case MachO::GENERIC_RELOC_PB_LA_PTR:
Error("Relocation type not implemented yet!");
}
}
void finalizeSection(const ObjectFile &Obj, unsigned SectionID,
const SectionRef &Section) {
StringRef Name;
Section.getName(Name);
if (Name == "__jump_table")
populateJumpTable(cast<MachOObjectFile>(Obj), Section, SectionID);
else if (Name == "__pointers")
populateIndirectSymbolPointersSection(cast<MachOObjectFile>(Obj),
Section, SectionID);
}
private:
relocation_iterator
processSECTDIFFRelocation(unsigned SectionID, relocation_iterator RelI,
const ObjectFile &BaseObjT,
ObjSectionToIDMap &ObjSectionToID) {
const MachOObjectFile &Obj =
static_cast<const MachOObjectFile&>(BaseObjT);
MachO::any_relocation_info RE =
Obj.getRelocation(RelI->getRawDataRefImpl());
SectionEntry &Section = Sections[SectionID];
uint32_t RelocType = Obj.getAnyRelocationType(RE);
bool IsPCRel = Obj.getAnyRelocationPCRel(RE);
unsigned Size = Obj.getAnyRelocationLength(RE);
uint64_t Offset = RelI->getOffset();
uint8_t *LocalAddress = Section.Address + Offset;
unsigned NumBytes = 1 << Size;
uint64_t Addend = readBytesUnaligned(LocalAddress, NumBytes);
++RelI;
MachO::any_relocation_info RE2 =
Obj.getRelocation(RelI->getRawDataRefImpl());
uint32_t AddrA = Obj.getScatteredRelocationValue(RE);
section_iterator SAI = getSectionByAddress(Obj, AddrA);
assert(SAI != Obj.section_end() && "Can't find section for address A");
uint64_t SectionABase = SAI->getAddress();
uint64_t SectionAOffset = AddrA - SectionABase;
SectionRef SectionA = *SAI;
bool IsCode = SectionA.isText();
uint32_t SectionAID =
findOrEmitSection(Obj, SectionA, IsCode, ObjSectionToID);
uint32_t AddrB = Obj.getScatteredRelocationValue(RE2);
section_iterator SBI = getSectionByAddress(Obj, AddrB);
assert(SBI != Obj.section_end() && "Can't find section for address B");
uint64_t SectionBBase = SBI->getAddress();
uint64_t SectionBOffset = AddrB - SectionBBase;
SectionRef SectionB = *SBI;
uint32_t SectionBID =
findOrEmitSection(Obj, SectionB, IsCode, ObjSectionToID);
// Compute the addend 'C' from the original expression 'A - B + C'.
Addend -= AddrA - AddrB;
DEBUG(dbgs() << "Found SECTDIFF: AddrA: " << AddrA << ", AddrB: " << AddrB
<< ", Addend: " << Addend << ", SectionA ID: " << SectionAID
<< ", SectionAOffset: " << SectionAOffset
<< ", SectionB ID: " << SectionBID
<< ", SectionBOffset: " << SectionBOffset << "\n");
RelocationEntry R(SectionID, Offset, RelocType, Addend, SectionAID,
SectionAOffset, SectionBID, SectionBOffset,
IsPCRel, Size);
addRelocationForSection(R, SectionAID);
return ++RelI;
}
relocation_iterator processI386ScatteredVANILLA(
unsigned SectionID, relocation_iterator RelI,
const ObjectFile &BaseObjT,
RuntimeDyldMachO::ObjSectionToIDMap &ObjSectionToID) {
const MachOObjectFile &Obj =
static_cast<const MachOObjectFile&>(BaseObjT);
MachO::any_relocation_info RE =
Obj.getRelocation(RelI->getRawDataRefImpl());
SectionEntry &Section = Sections[SectionID];
uint32_t RelocType = Obj.getAnyRelocationType(RE);
bool IsPCRel = Obj.getAnyRelocationPCRel(RE);
unsigned Size = Obj.getAnyRelocationLength(RE);
uint64_t Offset = RelI->getOffset();
uint8_t *LocalAddress = Section.Address + Offset;
unsigned NumBytes = 1 << Size;
int64_t Addend = readBytesUnaligned(LocalAddress, NumBytes);
unsigned SymbolBaseAddr = Obj.getScatteredRelocationValue(RE);
section_iterator TargetSI = getSectionByAddress(Obj, SymbolBaseAddr);
assert(TargetSI != Obj.section_end() && "Can't find section for symbol");
uint64_t SectionBaseAddr = TargetSI->getAddress();
SectionRef TargetSection = *TargetSI;
bool IsCode = TargetSection.isText();
uint32_t TargetSectionID =
findOrEmitSection(Obj, TargetSection, IsCode, ObjSectionToID);
Addend -= SectionBaseAddr;
RelocationEntry R(SectionID, Offset, RelocType, Addend, IsPCRel, Size);
addRelocationForSection(R, TargetSectionID);
return ++RelI;
}
// Populate stubs in __jump_table section.
void populateJumpTable(const MachOObjectFile &Obj, const SectionRef &JTSection,
unsigned JTSectionID) {
assert(!Obj.is64Bit() &&
"__jump_table section not supported in 64-bit MachO.");
MachO::dysymtab_command DySymTabCmd = Obj.getDysymtabLoadCommand();
MachO::section Sec32 = Obj.getSection(JTSection.getRawDataRefImpl());
uint32_t JTSectionSize = Sec32.size;
unsigned FirstIndirectSymbol = Sec32.reserved1;
unsigned JTEntrySize = Sec32.reserved2;
unsigned NumJTEntries = JTSectionSize / JTEntrySize;
uint8_t *JTSectionAddr = getSectionAddress(JTSectionID);
unsigned JTEntryOffset = 0;
assert((JTSectionSize % JTEntrySize) == 0 &&
"Jump-table section does not contain a whole number of stubs?");
for (unsigned i = 0; i < NumJTEntries; ++i) {
unsigned SymbolIndex =
Obj.getIndirectSymbolTableEntry(DySymTabCmd, FirstIndirectSymbol + i);
symbol_iterator SI = Obj.getSymbolByIndex(SymbolIndex);
ErrorOr<StringRef> IndirectSymbolName = SI->getName();
if (std::error_code EC = IndirectSymbolName.getError())
report_fatal_error(EC.message());
uint8_t *JTEntryAddr = JTSectionAddr + JTEntryOffset;
createStubFunction(JTEntryAddr);
RelocationEntry RE(JTSectionID, JTEntryOffset + 1,
MachO::GENERIC_RELOC_VANILLA, 0, true, 2);
addRelocationForSymbol(RE, *IndirectSymbolName);
JTEntryOffset += JTEntrySize;
}
}
};
}
#undef DEBUG_TYPE
#endif
|
0 | repos/DirectXShaderCompiler/lib/ExecutionEngine/RuntimeDyld | repos/DirectXShaderCompiler/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOAArch64.h | //===-- RuntimeDyldMachOAArch64.h -- MachO/AArch64 specific code. -*- C++ -*-=//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_LIB_EXECUTIONENGINE_RUNTIMEDYLD_TARGETS_RUNTIMEDYLDMACHOAARCH64_H
#define LLVM_LIB_EXECUTIONENGINE_RUNTIMEDYLD_TARGETS_RUNTIMEDYLDMACHOAARCH64_H
#include "../RuntimeDyldMachO.h"
#include "llvm/Support/Endian.h"
#define DEBUG_TYPE "dyld"
namespace llvm {
class RuntimeDyldMachOAArch64
: public RuntimeDyldMachOCRTPBase<RuntimeDyldMachOAArch64> {
public:
typedef uint64_t TargetPtrT;
RuntimeDyldMachOAArch64(RuntimeDyld::MemoryManager &MM,
RuntimeDyld::SymbolResolver &Resolver)
: RuntimeDyldMachOCRTPBase(MM, Resolver) {}
unsigned getMaxStubSize() override { return 8; }
unsigned getStubAlignment() override { return 8; }
/// Extract the addend encoded in the instruction / memory location.
int64_t decodeAddend(const RelocationEntry &RE) const {
const SectionEntry &Section = Sections[RE.SectionID];
uint8_t *LocalAddress = Section.Address + RE.Offset;
unsigned NumBytes = 1 << RE.Size;
int64_t Addend = 0;
// Verify that the relocation has the correct size and alignment.
switch (RE.RelType) {
default:
llvm_unreachable("Unsupported relocation type!");
case MachO::ARM64_RELOC_UNSIGNED:
assert((NumBytes == 4 || NumBytes == 8) && "Invalid relocation size.");
break;
case MachO::ARM64_RELOC_BRANCH26:
case MachO::ARM64_RELOC_PAGE21:
case MachO::ARM64_RELOC_PAGEOFF12:
case MachO::ARM64_RELOC_GOT_LOAD_PAGE21:
case MachO::ARM64_RELOC_GOT_LOAD_PAGEOFF12:
assert(NumBytes == 4 && "Invalid relocation size.");
assert((((uintptr_t)LocalAddress & 0x3) == 0) &&
"Instruction address is not aligned to 4 bytes.");
break;
}
switch (RE.RelType) {
default:
llvm_unreachable("Unsupported relocation type!");
case MachO::ARM64_RELOC_UNSIGNED:
// This could be an unaligned memory location.
if (NumBytes == 4)
Addend = *reinterpret_cast<support::ulittle32_t *>(LocalAddress);
else
Addend = *reinterpret_cast<support::ulittle64_t *>(LocalAddress);
break;
case MachO::ARM64_RELOC_BRANCH26: {
// Verify that the relocation points to the expected branch instruction.
auto *p = reinterpret_cast<support::aligned_ulittle32_t *>(LocalAddress);
assert((*p & 0xFC000000) == 0x14000000 && "Expected branch instruction.");
// Get the 26 bit addend encoded in the branch instruction and sign-extend
// to 64 bit. The lower 2 bits are always zeros and are therefore implicit
// (<< 2).
Addend = (*p & 0x03FFFFFF) << 2;
Addend = SignExtend64(Addend, 28);
break;
}
case MachO::ARM64_RELOC_GOT_LOAD_PAGE21:
case MachO::ARM64_RELOC_PAGE21: {
// Verify that the relocation points to the expected adrp instruction.
auto *p = reinterpret_cast<support::aligned_ulittle32_t *>(LocalAddress);
assert((*p & 0x9F000000) == 0x90000000 && "Expected adrp instruction.");
// Get the 21 bit addend encoded in the adrp instruction and sign-extend
// to 64 bit. The lower 12 bits (4096 byte page) are always zeros and are
// therefore implicit (<< 12).
Addend = ((*p & 0x60000000) >> 29) | ((*p & 0x01FFFFE0) >> 3) << 12;
Addend = SignExtend64(Addend, 33);
break;
}
case MachO::ARM64_RELOC_GOT_LOAD_PAGEOFF12: {
// Verify that the relocation points to one of the expected load / store
// instructions.
auto *p = reinterpret_cast<support::aligned_ulittle32_t *>(LocalAddress);
(void)p;
assert((*p & 0x3B000000) == 0x39000000 &&
"Only expected load / store instructions.");
} // fall-through
case MachO::ARM64_RELOC_PAGEOFF12: {
// Verify that the relocation points to one of the expected load / store
// or add / sub instructions.
auto *p = reinterpret_cast<support::aligned_ulittle32_t *>(LocalAddress);
assert((((*p & 0x3B000000) == 0x39000000) ||
((*p & 0x11C00000) == 0x11000000) ) &&
"Expected load / store or add/sub instruction.");
// Get the 12 bit addend encoded in the instruction.
Addend = (*p & 0x003FFC00) >> 10;
// Check which instruction we are decoding to obtain the implicit shift
// factor of the instruction.
int ImplicitShift = 0;
if ((*p & 0x3B000000) == 0x39000000) { // << load / store
// For load / store instructions the size is encoded in bits 31:30.
ImplicitShift = ((*p >> 30) & 0x3);
if (ImplicitShift == 0) {
// Check if this a vector op to get the correct shift value.
if ((*p & 0x04800000) == 0x04800000)
ImplicitShift = 4;
}
}
// Compensate for implicit shift.
Addend <<= ImplicitShift;
break;
}
}
return Addend;
}
/// Extract the addend encoded in the instruction.
void encodeAddend(uint8_t *LocalAddress, unsigned NumBytes,
MachO::RelocationInfoType RelType, int64_t Addend) const {
// Verify that the relocation has the correct alignment.
switch (RelType) {
default:
llvm_unreachable("Unsupported relocation type!");
case MachO::ARM64_RELOC_UNSIGNED:
assert((NumBytes == 4 || NumBytes == 8) && "Invalid relocation size.");
break;
case MachO::ARM64_RELOC_BRANCH26:
case MachO::ARM64_RELOC_PAGE21:
case MachO::ARM64_RELOC_PAGEOFF12:
case MachO::ARM64_RELOC_GOT_LOAD_PAGE21:
case MachO::ARM64_RELOC_GOT_LOAD_PAGEOFF12:
assert(NumBytes == 4 && "Invalid relocation size.");
assert((((uintptr_t)LocalAddress & 0x3) == 0) &&
"Instruction address is not aligned to 4 bytes.");
break;
}
switch (RelType) {
default:
llvm_unreachable("Unsupported relocation type!");
case MachO::ARM64_RELOC_UNSIGNED:
// This could be an unaligned memory location.
if (NumBytes == 4)
*reinterpret_cast<support::ulittle32_t *>(LocalAddress) = Addend;
else
*reinterpret_cast<support::ulittle64_t *>(LocalAddress) = Addend;
break;
case MachO::ARM64_RELOC_BRANCH26: {
auto *p = reinterpret_cast<support::aligned_ulittle32_t *>(LocalAddress);
// Verify that the relocation points to the expected branch instruction.
assert((*p & 0xFC000000) == 0x14000000 && "Expected branch instruction.");
// Verify addend value.
assert((Addend & 0x3) == 0 && "Branch target is not aligned");
assert(isInt<28>(Addend) && "Branch target is out of range.");
// Encode the addend as 26 bit immediate in the branch instruction.
*p = (*p & 0xFC000000) | ((uint32_t)(Addend >> 2) & 0x03FFFFFF);
break;
}
case MachO::ARM64_RELOC_GOT_LOAD_PAGE21:
case MachO::ARM64_RELOC_PAGE21: {
// Verify that the relocation points to the expected adrp instruction.
auto *p = reinterpret_cast<support::aligned_ulittle32_t *>(LocalAddress);
assert((*p & 0x9F000000) == 0x90000000 && "Expected adrp instruction.");
// Check that the addend fits into 21 bits (+ 12 lower bits).
assert((Addend & 0xFFF) == 0 && "ADRP target is not page aligned.");
assert(isInt<33>(Addend) && "Invalid page reloc value.");
// Encode the addend into the instruction.
uint32_t ImmLoValue = ((uint64_t)Addend << 17) & 0x60000000;
uint32_t ImmHiValue = ((uint64_t)Addend >> 9) & 0x00FFFFE0;
*p = (*p & 0x9F00001F) | ImmHiValue | ImmLoValue;
break;
}
case MachO::ARM64_RELOC_GOT_LOAD_PAGEOFF12: {
// Verify that the relocation points to one of the expected load / store
// instructions.
auto *p = reinterpret_cast<support::aligned_ulittle32_t *>(LocalAddress);
assert((*p & 0x3B000000) == 0x39000000 &&
"Only expected load / store instructions.");
(void)p;
} // fall-through
case MachO::ARM64_RELOC_PAGEOFF12: {
// Verify that the relocation points to one of the expected load / store
// or add / sub instructions.
auto *p = reinterpret_cast<support::aligned_ulittle32_t *>(LocalAddress);
assert((((*p & 0x3B000000) == 0x39000000) ||
((*p & 0x11C00000) == 0x11000000) ) &&
"Expected load / store or add/sub instruction.");
// Check which instruction we are decoding to obtain the implicit shift
// factor of the instruction and verify alignment.
int ImplicitShift = 0;
if ((*p & 0x3B000000) == 0x39000000) { // << load / store
// For load / store instructions the size is encoded in bits 31:30.
ImplicitShift = ((*p >> 30) & 0x3);
switch (ImplicitShift) {
case 0:
// Check if this a vector op to get the correct shift value.
if ((*p & 0x04800000) == 0x04800000) {
ImplicitShift = 4;
assert(((Addend & 0xF) == 0) &&
"128-bit LDR/STR not 16-byte aligned.");
}
break;
case 1:
assert(((Addend & 0x1) == 0) && "16-bit LDR/STR not 2-byte aligned.");
break;
case 2:
assert(((Addend & 0x3) == 0) && "32-bit LDR/STR not 4-byte aligned.");
break;
case 3:
assert(((Addend & 0x7) == 0) && "64-bit LDR/STR not 8-byte aligned.");
break;
}
}
// Compensate for implicit shift.
Addend >>= ImplicitShift;
assert(isUInt<12>(Addend) && "Addend cannot be encoded.");
// Encode the addend into the instruction.
*p = (*p & 0xFFC003FF) | ((uint32_t)(Addend << 10) & 0x003FFC00);
break;
}
}
}
relocation_iterator
processRelocationRef(unsigned SectionID, relocation_iterator RelI,
const ObjectFile &BaseObjT,
ObjSectionToIDMap &ObjSectionToID,
StubMap &Stubs) override {
const MachOObjectFile &Obj =
static_cast<const MachOObjectFile &>(BaseObjT);
MachO::any_relocation_info RelInfo =
Obj.getRelocation(RelI->getRawDataRefImpl());
assert(!Obj.isRelocationScattered(RelInfo) && "");
// ARM64 has an ARM64_RELOC_ADDEND relocation type that carries an explicit
// addend for the following relocation. If found: (1) store the associated
// addend, (2) consume the next relocation, and (3) use the stored addend to
// override the addend.
int64_t ExplicitAddend = 0;
if (Obj.getAnyRelocationType(RelInfo) == MachO::ARM64_RELOC_ADDEND) {
assert(!Obj.getPlainRelocationExternal(RelInfo));
assert(!Obj.getAnyRelocationPCRel(RelInfo));
assert(Obj.getAnyRelocationLength(RelInfo) == 2);
int64_t RawAddend = Obj.getPlainRelocationSymbolNum(RelInfo);
// Sign-extend the 24-bit to 64-bit.
ExplicitAddend = SignExtend64(RawAddend, 24);
++RelI;
RelInfo = Obj.getRelocation(RelI->getRawDataRefImpl());
}
RelocationEntry RE(getRelocationEntry(SectionID, Obj, RelI));
RE.Addend = decodeAddend(RE);
RelocationValueRef Value(
getRelocationValueRef(Obj, RelI, RE, ObjSectionToID));
assert((ExplicitAddend == 0 || RE.Addend == 0) && "Relocation has "\
"ARM64_RELOC_ADDEND and embedded addend in the instruction.");
if (ExplicitAddend) {
RE.Addend = ExplicitAddend;
Value.Offset = ExplicitAddend;
}
bool IsExtern = Obj.getPlainRelocationExternal(RelInfo);
if (!IsExtern && RE.IsPCRel)
makeValueAddendPCRel(Value, RelI, 1 << RE.Size);
RE.Addend = Value.Offset;
if (RE.RelType == MachO::ARM64_RELOC_GOT_LOAD_PAGE21 ||
RE.RelType == MachO::ARM64_RELOC_GOT_LOAD_PAGEOFF12)
processGOTRelocation(RE, Value, Stubs);
else {
if (Value.SymbolName)
addRelocationForSymbol(RE, Value.SymbolName);
else
addRelocationForSection(RE, Value.SectionID);
}
return ++RelI;
}
void resolveRelocation(const RelocationEntry &RE, uint64_t Value) override {
DEBUG(dumpRelocationToResolve(RE, Value));
const SectionEntry &Section = Sections[RE.SectionID];
uint8_t *LocalAddress = Section.Address + RE.Offset;
MachO::RelocationInfoType RelType =
static_cast<MachO::RelocationInfoType>(RE.RelType);
switch (RelType) {
default:
llvm_unreachable("Invalid relocation type!");
case MachO::ARM64_RELOC_UNSIGNED: {
assert(!RE.IsPCRel && "PCRel and ARM64_RELOC_UNSIGNED not supported");
// Mask in the target value a byte at a time (we don't have an alignment
// guarantee for the target address, so this is safest).
if (RE.Size < 2)
llvm_unreachable("Invalid size for ARM64_RELOC_UNSIGNED");
encodeAddend(LocalAddress, 1 << RE.Size, RelType, Value + RE.Addend);
break;
}
case MachO::ARM64_RELOC_BRANCH26: {
assert(RE.IsPCRel && "not PCRel and ARM64_RELOC_BRANCH26 not supported");
// Check if branch is in range.
uint64_t FinalAddress = Section.LoadAddress + RE.Offset;
int64_t PCRelVal = Value - FinalAddress + RE.Addend;
encodeAddend(LocalAddress, /*Size=*/4, RelType, PCRelVal);
break;
}
case MachO::ARM64_RELOC_GOT_LOAD_PAGE21:
case MachO::ARM64_RELOC_PAGE21: {
assert(RE.IsPCRel && "not PCRel and ARM64_RELOC_PAGE21 not supported");
// Adjust for PC-relative relocation and offset.
uint64_t FinalAddress = Section.LoadAddress + RE.Offset;
int64_t PCRelVal =
((Value + RE.Addend) & (-4096)) - (FinalAddress & (-4096));
encodeAddend(LocalAddress, /*Size=*/4, RelType, PCRelVal);
break;
}
case MachO::ARM64_RELOC_GOT_LOAD_PAGEOFF12:
case MachO::ARM64_RELOC_PAGEOFF12: {
assert(!RE.IsPCRel && "PCRel and ARM64_RELOC_PAGEOFF21 not supported");
// Add the offset from the symbol.
Value += RE.Addend;
// Mask out the page address and only use the lower 12 bits.
Value &= 0xFFF;
encodeAddend(LocalAddress, /*Size=*/4, RelType, Value);
break;
}
case MachO::ARM64_RELOC_SUBTRACTOR:
case MachO::ARM64_RELOC_POINTER_TO_GOT:
case MachO::ARM64_RELOC_TLVP_LOAD_PAGE21:
case MachO::ARM64_RELOC_TLVP_LOAD_PAGEOFF12:
llvm_unreachable("Relocation type not yet implemented!");
case MachO::ARM64_RELOC_ADDEND:
llvm_unreachable("ARM64_RELOC_ADDEND should have been handeled by "
"processRelocationRef!");
}
}
void finalizeSection(const ObjectFile &Obj, unsigned SectionID,
const SectionRef &Section) {}
private:
void processGOTRelocation(const RelocationEntry &RE,
RelocationValueRef &Value, StubMap &Stubs) {
assert(RE.Size == 2);
SectionEntry &Section = Sections[RE.SectionID];
StubMap::const_iterator i = Stubs.find(Value);
int64_t Offset;
if (i != Stubs.end())
Offset = static_cast<int64_t>(i->second);
else {
// FIXME: There must be a better way to do this then to check and fix the
// alignment every time!!!
uintptr_t BaseAddress = uintptr_t(Section.Address);
uintptr_t StubAlignment = getStubAlignment();
uintptr_t StubAddress =
(BaseAddress + Section.StubOffset + StubAlignment - 1) &
-StubAlignment;
unsigned StubOffset = StubAddress - BaseAddress;
Stubs[Value] = StubOffset;
assert(((StubAddress % getStubAlignment()) == 0) &&
"GOT entry not aligned");
RelocationEntry GOTRE(RE.SectionID, StubOffset,
MachO::ARM64_RELOC_UNSIGNED, Value.Offset,
/*IsPCRel=*/false, /*Size=*/3);
if (Value.SymbolName)
addRelocationForSymbol(GOTRE, Value.SymbolName);
else
addRelocationForSection(GOTRE, Value.SectionID);
Section.StubOffset = StubOffset + getMaxStubSize();
Offset = static_cast<int64_t>(StubOffset);
}
RelocationEntry TargetRE(RE.SectionID, RE.Offset, RE.RelType, Offset,
RE.IsPCRel, RE.Size);
addRelocationForSection(TargetRE, RE.SectionID);
}
};
}
#undef DEBUG_TYPE
#endif
|
0 | repos/DirectXShaderCompiler/lib/ExecutionEngine/RuntimeDyld | repos/DirectXShaderCompiler/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOARM.h | //===----- RuntimeDyldMachOARM.h ---- MachO/ARM specific code. ----*- C++ -*-=//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_LIB_EXECUTIONENGINE_RUNTIMEDYLD_TARGETS_RUNTIMEDYLDMACHOARM_H
#define LLVM_LIB_EXECUTIONENGINE_RUNTIMEDYLD_TARGETS_RUNTIMEDYLDMACHOARM_H
#include "../RuntimeDyldMachO.h"
#define DEBUG_TYPE "dyld"
namespace llvm {
class RuntimeDyldMachOARM
: public RuntimeDyldMachOCRTPBase<RuntimeDyldMachOARM> {
private:
typedef RuntimeDyldMachOCRTPBase<RuntimeDyldMachOARM> ParentT;
public:
typedef uint32_t TargetPtrT;
RuntimeDyldMachOARM(RuntimeDyld::MemoryManager &MM,
RuntimeDyld::SymbolResolver &Resolver)
: RuntimeDyldMachOCRTPBase(MM, Resolver) {}
unsigned getMaxStubSize() override { return 8; }
unsigned getStubAlignment() override { return 4; }
int64_t decodeAddend(const RelocationEntry &RE) const {
const SectionEntry &Section = Sections[RE.SectionID];
uint8_t *LocalAddress = Section.Address + RE.Offset;
switch (RE.RelType) {
default:
return memcpyAddend(RE);
case MachO::ARM_RELOC_BR24: {
uint32_t Temp = readBytesUnaligned(LocalAddress, 4);
Temp &= 0x00ffffff; // Mask out the opcode.
// Now we've got the shifted immediate, shift by 2, sign extend and ret.
return SignExtend32<26>(Temp << 2);
}
}
}
relocation_iterator
processRelocationRef(unsigned SectionID, relocation_iterator RelI,
const ObjectFile &BaseObjT,
ObjSectionToIDMap &ObjSectionToID,
StubMap &Stubs) override {
const MachOObjectFile &Obj =
static_cast<const MachOObjectFile &>(BaseObjT);
MachO::any_relocation_info RelInfo =
Obj.getRelocation(RelI->getRawDataRefImpl());
uint32_t RelType = Obj.getAnyRelocationType(RelInfo);
if (Obj.isRelocationScattered(RelInfo)) {
if (RelType == MachO::ARM_RELOC_HALF_SECTDIFF)
return processHALFSECTDIFFRelocation(SectionID, RelI, Obj,
ObjSectionToID);
else
return ++++RelI;
}
RelocationEntry RE(getRelocationEntry(SectionID, Obj, RelI));
RE.Addend = decodeAddend(RE);
RelocationValueRef Value(
getRelocationValueRef(Obj, RelI, RE, ObjSectionToID));
if (RE.IsPCRel)
makeValueAddendPCRel(Value, RelI, 8);
if ((RE.RelType & 0xf) == MachO::ARM_RELOC_BR24)
processBranchRelocation(RE, Value, Stubs);
else {
RE.Addend = Value.Offset;
if (Value.SymbolName)
addRelocationForSymbol(RE, Value.SymbolName);
else
addRelocationForSection(RE, Value.SectionID);
}
return ++RelI;
}
void resolveRelocation(const RelocationEntry &RE, uint64_t Value) override {
DEBUG(dumpRelocationToResolve(RE, Value));
const SectionEntry &Section = Sections[RE.SectionID];
uint8_t *LocalAddress = Section.Address + RE.Offset;
// If the relocation is PC-relative, the value to be encoded is the
// pointer difference.
if (RE.IsPCRel) {
uint64_t FinalAddress = Section.LoadAddress + RE.Offset;
Value -= FinalAddress;
// ARM PCRel relocations have an effective-PC offset of two instructions
// (four bytes in Thumb mode, 8 bytes in ARM mode).
// FIXME: For now, assume ARM mode.
Value -= 8;
}
switch (RE.RelType) {
default:
llvm_unreachable("Invalid relocation type!");
case MachO::ARM_RELOC_VANILLA:
writeBytesUnaligned(Value + RE.Addend, LocalAddress, 1 << RE.Size);
break;
case MachO::ARM_RELOC_BR24: {
// Mask the value into the target address. We know instructions are
// 32-bit aligned, so we can do it all at once.
Value += RE.Addend;
// The low two bits of the value are not encoded.
Value >>= 2;
// Mask the value to 24 bits.
uint64_t FinalValue = Value & 0xffffff;
// FIXME: If the destination is a Thumb function (and the instruction
// is a non-predicated BL instruction), we need to change it to a BLX
// instruction instead.
// Insert the value into the instruction.
uint32_t Temp = readBytesUnaligned(LocalAddress, 4);
writeBytesUnaligned((Temp & ~0xffffff) | FinalValue, LocalAddress, 4);
break;
}
case MachO::ARM_RELOC_HALF_SECTDIFF: {
uint64_t SectionABase = Sections[RE.Sections.SectionA].LoadAddress;
uint64_t SectionBBase = Sections[RE.Sections.SectionB].LoadAddress;
assert((Value == SectionABase || Value == SectionBBase) &&
"Unexpected HALFSECTDIFF relocation value.");
Value = SectionABase - SectionBBase + RE.Addend;
if (RE.Size & 0x1) // :upper16:
Value = (Value >> 16);
Value &= 0xffff;
uint32_t Insn = readBytesUnaligned(LocalAddress, 4);
Insn = (Insn & 0xfff0f000) | ((Value & 0xf000) << 4) | (Value & 0x0fff);
writeBytesUnaligned(Insn, LocalAddress, 4);
break;
}
case MachO::ARM_THUMB_RELOC_BR22:
case MachO::ARM_THUMB_32BIT_BRANCH:
case MachO::ARM_RELOC_HALF:
case MachO::ARM_RELOC_PAIR:
case MachO::ARM_RELOC_SECTDIFF:
case MachO::ARM_RELOC_LOCAL_SECTDIFF:
case MachO::ARM_RELOC_PB_LA_PTR:
Error("Relocation type not implemented yet!");
return;
}
}
void finalizeSection(const ObjectFile &Obj, unsigned SectionID,
const SectionRef &Section) {
StringRef Name;
Section.getName(Name);
if (Name == "__nl_symbol_ptr")
populateIndirectSymbolPointersSection(cast<MachOObjectFile>(Obj),
Section, SectionID);
}
private:
void processBranchRelocation(const RelocationEntry &RE,
const RelocationValueRef &Value,
StubMap &Stubs) {
// This is an ARM branch relocation, need to use a stub function.
// Look up for existing stub.
SectionEntry &Section = Sections[RE.SectionID];
RuntimeDyldMachO::StubMap::const_iterator i = Stubs.find(Value);
uint8_t *Addr;
if (i != Stubs.end()) {
Addr = Section.Address + i->second;
} else {
// Create a new stub function.
Stubs[Value] = Section.StubOffset;
uint8_t *StubTargetAddr =
createStubFunction(Section.Address + Section.StubOffset);
RelocationEntry StubRE(RE.SectionID, StubTargetAddr - Section.Address,
MachO::GENERIC_RELOC_VANILLA, Value.Offset, false,
2);
if (Value.SymbolName)
addRelocationForSymbol(StubRE, Value.SymbolName);
else
addRelocationForSection(StubRE, Value.SectionID);
Addr = Section.Address + Section.StubOffset;
Section.StubOffset += getMaxStubSize();
}
RelocationEntry TargetRE(RE.SectionID, RE.Offset, RE.RelType, 0,
RE.IsPCRel, RE.Size);
resolveRelocation(TargetRE, (uint64_t)Addr);
}
relocation_iterator
processHALFSECTDIFFRelocation(unsigned SectionID, relocation_iterator RelI,
const ObjectFile &BaseTObj,
ObjSectionToIDMap &ObjSectionToID) {
const MachOObjectFile &MachO =
static_cast<const MachOObjectFile&>(BaseTObj);
MachO::any_relocation_info RE =
MachO.getRelocation(RelI->getRawDataRefImpl());
// For a half-diff relocation the length bits actually record whether this
// is a movw/movt, and whether this is arm or thumb.
// Bit 0 indicates movw (b0 == 0) or movt (b0 == 1).
// Bit 1 indicates arm (b1 == 0) or thumb (b1 == 1).
unsigned HalfDiffKindBits = MachO.getAnyRelocationLength(RE);
if (HalfDiffKindBits & 0x2)
llvm_unreachable("Thumb not yet supported.");
SectionEntry &Section = Sections[SectionID];
uint32_t RelocType = MachO.getAnyRelocationType(RE);
bool IsPCRel = MachO.getAnyRelocationPCRel(RE);
uint64_t Offset = RelI->getOffset();
uint8_t *LocalAddress = Section.Address + Offset;
int64_t Immediate = readBytesUnaligned(LocalAddress, 4); // Copy the whole instruction out.
Immediate = ((Immediate >> 4) & 0xf000) | (Immediate & 0xfff);
++RelI;
MachO::any_relocation_info RE2 =
MachO.getRelocation(RelI->getRawDataRefImpl());
uint32_t AddrA = MachO.getScatteredRelocationValue(RE);
section_iterator SAI = getSectionByAddress(MachO, AddrA);
assert(SAI != MachO.section_end() && "Can't find section for address A");
uint64_t SectionABase = SAI->getAddress();
uint64_t SectionAOffset = AddrA - SectionABase;
SectionRef SectionA = *SAI;
bool IsCode = SectionA.isText();
uint32_t SectionAID =
findOrEmitSection(MachO, SectionA, IsCode, ObjSectionToID);
uint32_t AddrB = MachO.getScatteredRelocationValue(RE2);
section_iterator SBI = getSectionByAddress(MachO, AddrB);
assert(SBI != MachO.section_end() && "Can't find section for address B");
uint64_t SectionBBase = SBI->getAddress();
uint64_t SectionBOffset = AddrB - SectionBBase;
SectionRef SectionB = *SBI;
uint32_t SectionBID =
findOrEmitSection(MachO, SectionB, IsCode, ObjSectionToID);
uint32_t OtherHalf = MachO.getAnyRelocationAddress(RE2) & 0xffff;
unsigned Shift = (HalfDiffKindBits & 0x1) ? 16 : 0;
uint32_t FullImmVal = (Immediate << Shift) | (OtherHalf << (16 - Shift));
int64_t Addend = FullImmVal - (AddrA - AddrB);
// addend = Encoded - Expected
// = Encoded - (AddrA - AddrB)
DEBUG(dbgs() << "Found SECTDIFF: AddrA: " << AddrA << ", AddrB: " << AddrB
<< ", Addend: " << Addend << ", SectionA ID: " << SectionAID
<< ", SectionAOffset: " << SectionAOffset
<< ", SectionB ID: " << SectionBID
<< ", SectionBOffset: " << SectionBOffset << "\n");
RelocationEntry R(SectionID, Offset, RelocType, Addend, SectionAID,
SectionAOffset, SectionBID, SectionBOffset, IsPCRel,
HalfDiffKindBits);
addRelocationForSection(R, SectionAID);
addRelocationForSection(R, SectionBID);
return ++RelI;
}
};
}
#undef DEBUG_TYPE
#endif
|
0 | repos/DirectXShaderCompiler/lib/ExecutionEngine/RuntimeDyld | repos/DirectXShaderCompiler/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOX86_64.h | //===-- RuntimeDyldMachOX86_64.h ---- MachO/X86_64 specific code. -*- C++ -*-=//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_LIB_EXECUTIONENGINE_RUNTIMEDYLD_TARGETS_RUNTIMEDYLDMACHOX86_64_H
#define LLVM_LIB_EXECUTIONENGINE_RUNTIMEDYLD_TARGETS_RUNTIMEDYLDMACHOX86_64_H
#include "../RuntimeDyldMachO.h"
#define DEBUG_TYPE "dyld"
namespace llvm {
class RuntimeDyldMachOX86_64
: public RuntimeDyldMachOCRTPBase<RuntimeDyldMachOX86_64> {
public:
typedef uint64_t TargetPtrT;
RuntimeDyldMachOX86_64(RuntimeDyld::MemoryManager &MM,
RuntimeDyld::SymbolResolver &Resolver)
: RuntimeDyldMachOCRTPBase(MM, Resolver) {}
unsigned getMaxStubSize() override { return 8; }
unsigned getStubAlignment() override { return 1; }
relocation_iterator
processRelocationRef(unsigned SectionID, relocation_iterator RelI,
const ObjectFile &BaseObjT,
ObjSectionToIDMap &ObjSectionToID,
StubMap &Stubs) override {
const MachOObjectFile &Obj =
static_cast<const MachOObjectFile &>(BaseObjT);
MachO::any_relocation_info RelInfo =
Obj.getRelocation(RelI->getRawDataRefImpl());
assert(!Obj.isRelocationScattered(RelInfo) &&
"Scattered relocations not supported on X86_64");
RelocationEntry RE(getRelocationEntry(SectionID, Obj, RelI));
RE.Addend = memcpyAddend(RE);
RelocationValueRef Value(
getRelocationValueRef(Obj, RelI, RE, ObjSectionToID));
bool IsExtern = Obj.getPlainRelocationExternal(RelInfo);
if (!IsExtern && RE.IsPCRel)
makeValueAddendPCRel(Value, RelI, 1 << RE.Size);
if (RE.RelType == MachO::X86_64_RELOC_GOT ||
RE.RelType == MachO::X86_64_RELOC_GOT_LOAD)
processGOTRelocation(RE, Value, Stubs);
else {
RE.Addend = Value.Offset;
if (Value.SymbolName)
addRelocationForSymbol(RE, Value.SymbolName);
else
addRelocationForSection(RE, Value.SectionID);
}
return ++RelI;
}
void resolveRelocation(const RelocationEntry &RE, uint64_t Value) override {
DEBUG(dumpRelocationToResolve(RE, Value));
const SectionEntry &Section = Sections[RE.SectionID];
uint8_t *LocalAddress = Section.Address + RE.Offset;
// If the relocation is PC-relative, the value to be encoded is the
// pointer difference.
if (RE.IsPCRel) {
// FIXME: It seems this value needs to be adjusted by 4 for an effective
// PC address. Is that expected? Only for branches, perhaps?
uint64_t FinalAddress = Section.LoadAddress + RE.Offset;
Value -= FinalAddress + 4;
}
switch (RE.RelType) {
default:
llvm_unreachable("Invalid relocation type!");
case MachO::X86_64_RELOC_SIGNED_1:
case MachO::X86_64_RELOC_SIGNED_2:
case MachO::X86_64_RELOC_SIGNED_4:
case MachO::X86_64_RELOC_SIGNED:
case MachO::X86_64_RELOC_UNSIGNED:
case MachO::X86_64_RELOC_BRANCH:
writeBytesUnaligned(Value + RE.Addend, LocalAddress, 1 << RE.Size);
break;
case MachO::X86_64_RELOC_GOT_LOAD:
case MachO::X86_64_RELOC_GOT:
case MachO::X86_64_RELOC_SUBTRACTOR:
case MachO::X86_64_RELOC_TLV:
Error("Relocation type not implemented yet!");
}
}
void finalizeSection(const ObjectFile &Obj, unsigned SectionID,
const SectionRef &Section) {}
private:
void processGOTRelocation(const RelocationEntry &RE,
RelocationValueRef &Value, StubMap &Stubs) {
SectionEntry &Section = Sections[RE.SectionID];
assert(RE.IsPCRel);
assert(RE.Size == 2);
Value.Offset -= RE.Addend;
RuntimeDyldMachO::StubMap::const_iterator i = Stubs.find(Value);
uint8_t *Addr;
if (i != Stubs.end()) {
Addr = Section.Address + i->second;
} else {
Stubs[Value] = Section.StubOffset;
uint8_t *GOTEntry = Section.Address + Section.StubOffset;
RelocationEntry GOTRE(RE.SectionID, Section.StubOffset,
MachO::X86_64_RELOC_UNSIGNED, Value.Offset, false,
3);
if (Value.SymbolName)
addRelocationForSymbol(GOTRE, Value.SymbolName);
else
addRelocationForSection(GOTRE, Value.SectionID);
Section.StubOffset += 8;
Addr = GOTEntry;
}
RelocationEntry TargetRE(RE.SectionID, RE.Offset,
MachO::X86_64_RELOC_UNSIGNED, RE.Addend, true, 2);
resolveRelocation(TargetRE, (uint64_t)Addr);
}
};
}
#undef DEBUG_TYPE
#endif
|
0 | repos/DirectXShaderCompiler/lib/ExecutionEngine/RuntimeDyld | repos/DirectXShaderCompiler/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldCOFFX86_64.h | //===-- RuntimeDyldCOFFX86_64.h --- COFF/X86_64 specific code ---*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// COFF x86_x64 support for MC-JIT runtime dynamic linker.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_LIB_EXECUTIONENGINE_RUNTIMEDYLD_TARGETS_RUNTIMEDYLDCOFF86_64_H
#define LLVM_LIB_EXECUTIONENGINE_RUNTIMEDYLD_TARGETS_RUNTIMEDYLDCOFF86_64_H
#include "llvm/Object/COFF.h"
#include "llvm/Support/COFF.h"
#include "../RuntimeDyldCOFF.h"
#define DEBUG_TYPE "dyld"
namespace llvm {
class RuntimeDyldCOFFX86_64 : public RuntimeDyldCOFF {
private:
// When a module is loaded we save the SectionID of the unwind
// sections in a table until we receive a request to register all
// unregisteredEH frame sections with the memory manager.
SmallVector<SID, 2> UnregisteredEHFrameSections;
SmallVector<SID, 2> RegisteredEHFrameSections;
public:
RuntimeDyldCOFFX86_64(RuntimeDyld::MemoryManager &MM,
RuntimeDyld::SymbolResolver &Resolver)
: RuntimeDyldCOFF(MM, Resolver) {}
unsigned getMaxStubSize() override {
return 6; // 2-byte jmp instruction + 32-bit relative address
}
// The target location for the relocation is described by RE.SectionID and
// RE.Offset. RE.SectionID can be used to find the SectionEntry. Each
// SectionEntry has three members describing its location.
// SectionEntry::Address is the address at which the section has been loaded
// into memory in the current (host) process. SectionEntry::LoadAddress is
// the address that the section will have in the target process.
// SectionEntry::ObjAddress is the address of the bits for this section in the
// original emitted object image (also in the current address space).
//
// Relocations will be applied as if the section were loaded at
// SectionEntry::LoadAddress, but they will be applied at an address based
// on SectionEntry::Address. SectionEntry::ObjAddress will be used to refer
// to Target memory contents if they are required for value calculations.
//
// The Value parameter here is the load address of the symbol for the
// relocation to be applied. For relocations which refer to symbols in the
// current object Value will be the LoadAddress of the section in which
// the symbol resides (RE.Addend provides additional information about the
// symbol location). For external symbols, Value will be the address of the
// symbol in the target address space.
void resolveRelocation(const RelocationEntry &RE, uint64_t Value) override {
const SectionEntry &Section = Sections[RE.SectionID];
uint8_t *Target = Section.Address + RE.Offset;
switch (RE.RelType) {
case COFF::IMAGE_REL_AMD64_REL32:
case COFF::IMAGE_REL_AMD64_REL32_1:
case COFF::IMAGE_REL_AMD64_REL32_2:
case COFF::IMAGE_REL_AMD64_REL32_3:
case COFF::IMAGE_REL_AMD64_REL32_4:
case COFF::IMAGE_REL_AMD64_REL32_5: {
uint32_t *TargetAddress = (uint32_t *)Target;
uint64_t FinalAddress = Section.LoadAddress + RE.Offset;
// Delta is the distance from the start of the reloc to the end of the
// instruction with the reloc.
uint64_t Delta = 4 + (RE.RelType - COFF::IMAGE_REL_AMD64_REL32);
Value -= FinalAddress + Delta;
uint64_t Result = Value + RE.Addend;
assert(((int64_t)Result <= INT32_MAX) && "Relocation overflow");
assert(((int64_t)Result >= INT32_MIN) && "Relocation underflow");
*TargetAddress = Result;
break;
}
case COFF::IMAGE_REL_AMD64_ADDR32NB: {
// Note ADDR32NB requires a well-established notion of
// image base. This address must be less than or equal
// to every section's load address, and all sections must be
// within a 32 bit offset from the base.
//
// For now we just set these to zero.
uint32_t *TargetAddress = (uint32_t *)Target;
*TargetAddress = 0;
break;
}
case COFF::IMAGE_REL_AMD64_ADDR64: {
uint64_t *TargetAddress = (uint64_t *)Target;
*TargetAddress = Value + RE.Addend;
break;
}
default:
llvm_unreachable("Relocation type not implemented yet!");
break;
}
}
relocation_iterator processRelocationRef(unsigned SectionID,
relocation_iterator RelI,
const ObjectFile &Obj,
ObjSectionToIDMap &ObjSectionToID,
StubMap &Stubs) override {
// If possible, find the symbol referred to in the relocation,
// and the section that contains it.
symbol_iterator Symbol = RelI->getSymbol();
if (Symbol == Obj.symbol_end())
report_fatal_error("Unknown symbol in relocation");
section_iterator SecI(Obj.section_end());
Symbol->getSection(SecI);
// If there is no section, this must be an external reference.
const bool IsExtern = SecI == Obj.section_end();
// Determine the Addend used to adjust the relocation value.
uint64_t RelType = RelI->getType();
uint64_t Offset = RelI->getOffset();
uint64_t Addend = 0;
SectionEntry &Section = Sections[SectionID];
uintptr_t ObjTarget = Section.ObjAddress + Offset;
switch (RelType) {
case COFF::IMAGE_REL_AMD64_REL32:
case COFF::IMAGE_REL_AMD64_REL32_1:
case COFF::IMAGE_REL_AMD64_REL32_2:
case COFF::IMAGE_REL_AMD64_REL32_3:
case COFF::IMAGE_REL_AMD64_REL32_4:
case COFF::IMAGE_REL_AMD64_REL32_5:
case COFF::IMAGE_REL_AMD64_ADDR32NB: {
uint32_t *Displacement = (uint32_t *)ObjTarget;
Addend = *Displacement;
break;
}
case COFF::IMAGE_REL_AMD64_ADDR64: {
uint64_t *Displacement = (uint64_t *)ObjTarget;
Addend = *Displacement;
break;
}
default:
break;
}
ErrorOr<StringRef> TargetNameOrErr = Symbol->getName();
if (std::error_code EC = TargetNameOrErr.getError())
report_fatal_error(EC.message());
StringRef TargetName = *TargetNameOrErr;
DEBUG(dbgs() << "\t\tIn Section " << SectionID << " Offset " << Offset
<< " RelType: " << RelType << " TargetName: " << TargetName
<< " Addend " << Addend << "\n");
if (IsExtern) {
RelocationEntry RE(SectionID, Offset, RelType, Addend);
addRelocationForSymbol(RE, TargetName);
} else {
bool IsCode = SecI->isText();
unsigned TargetSectionID =
findOrEmitSection(Obj, *SecI, IsCode, ObjSectionToID);
uint64_t TargetOffset = getSymbolOffset(*Symbol);
RelocationEntry RE(SectionID, Offset, RelType, TargetOffset + Addend);
addRelocationForSection(RE, TargetSectionID);
}
return ++RelI;
}
unsigned getStubAlignment() override { return 1; }
void registerEHFrames() override {
for (auto const &EHFrameSID : UnregisteredEHFrameSections) {
uint8_t *EHFrameAddr = Sections[EHFrameSID].Address;
uint64_t EHFrameLoadAddr = Sections[EHFrameSID].LoadAddress;
size_t EHFrameSize = Sections[EHFrameSID].Size;
MemMgr.registerEHFrames(EHFrameAddr, EHFrameLoadAddr, EHFrameSize);
RegisteredEHFrameSections.push_back(EHFrameSID);
}
UnregisteredEHFrameSections.clear();
}
void deregisterEHFrames() override {
// Stub
}
void finalizeLoad(const ObjectFile &Obj,
ObjSectionToIDMap &SectionMap) override {
// Look for and record the EH frame section IDs.
for (const auto &SectionPair : SectionMap) {
const SectionRef &Section = SectionPair.first;
StringRef Name;
Check(Section.getName(Name));
// Note unwind info is split across .pdata and .xdata, so this
// may not be sufficiently general for all users.
if (Name == ".xdata") {
UnregisteredEHFrameSections.push_back(SectionPair.second);
}
}
}
};
} // end namespace llvm
#undef DEBUG_TYPE
#endif
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/DxilValidation/DxilValidationUtils.h | ///////////////////////////////////////////////////////////////////////////////
// //
// DxilValidationUttils.h //
// Copyright (C) Microsoft Corporation. All rights reserved. //
// This file is distributed under the University of Illinois Open Source //
// License. See LICENSE.TXT for details. //
// //
// This file provides utils for validating DXIL. //
// //
///////////////////////////////////////////////////////////////////////////////
#pragma once
#include "dxc/DXIL/DxilConstants.h"
#include "dxc/DXIL/DxilResourceProperties.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Analysis/CallGraph.h"
#include "llvm/IR/DebugLoc.h"
#include "llvm/IR/ModuleSlotTracker.h"
#include <memory>
#include <unordered_map>
#include <unordered_set>
#include <vector>
using namespace llvm;
namespace llvm {
class Module;
class Function;
class DataLayout;
class Metadata;
class Value;
class GlobalVariable;
class Instruction;
class Type;
} // namespace llvm
namespace hlsl {
///////////////////////////////////////////////////////////////////////////////
// Validation rules.
#include "DxilValidation.inc"
const char *GetValidationRuleText(ValidationRule value);
class DxilEntryProps;
class DxilModule;
class DxilResourceBase;
class DxilSignatureElement;
// Save status like output write for entries.
struct EntryStatus {
bool hasOutputPosition[DXIL::kNumOutputStreams];
unsigned OutputPositionMask[DXIL::kNumOutputStreams];
std::vector<unsigned> outputCols;
std::vector<unsigned> patchConstOrPrimCols;
bool m_bCoverageIn, m_bInnerCoverageIn;
bool hasViewID;
unsigned domainLocSize;
EntryStatus(DxilEntryProps &entryProps);
};
struct ValidationContext {
bool Failed = false;
Module &M;
Module *pDebugModule;
DxilModule &DxilMod;
const Type *HandleTy;
const DataLayout &DL;
DebugLoc LastDebugLocEmit;
ValidationRule LastRuleEmit;
std::unordered_set<Function *> entryFuncCallSet;
std::unordered_set<Function *> patchConstFuncCallSet;
std::unordered_map<unsigned, bool> UavCounterIncMap;
std::unordered_map<Value *, unsigned> HandleResIndexMap;
// TODO: save resource map for each createHandle/createHandleForLib.
std::unordered_map<Value *, DxilResourceProperties> ResPropMap;
std::unordered_map<Function *, std::vector<Function *>> PatchConstantFuncMap;
std::unordered_map<Function *, std::unique_ptr<EntryStatus>> entryStatusMap;
bool isLibProfile;
const unsigned kDxilControlFlowHintMDKind;
const unsigned kDxilPreciseMDKind;
const unsigned kDxilNonUniformMDKind;
const unsigned kLLVMLoopMDKind;
unsigned m_DxilMajor, m_DxilMinor;
ModuleSlotTracker slotTracker;
std::unique_ptr<CallGraph> pCallGraph;
ValidationContext(Module &llvmModule, Module *DebugModule,
DxilModule &dxilModule);
void PropagateResMap(Value *V, DxilResourceBase *Res);
void BuildResMap();
bool HasEntryStatus(Function *F);
EntryStatus &GetEntryStatus(Function *F);
CallGraph &GetCallGraph();
DxilResourceProperties GetResourceFromVal(Value *resVal);
void EmitGlobalVariableFormatError(GlobalVariable *GV, ValidationRule rule,
ArrayRef<StringRef> args);
// This is the least desirable mechanism, as it has no context.
void EmitError(ValidationRule rule);
void FormatRuleText(std::string &ruleText, ArrayRef<StringRef> args);
void EmitFormatError(ValidationRule rule, ArrayRef<StringRef> args);
void EmitMetaError(Metadata *Meta, ValidationRule rule);
// Use this instead of DxilResourceBase::GetGlobalName
std::string GetResourceName(const hlsl::DxilResourceBase *Res);
void EmitResourceError(const hlsl::DxilResourceBase *Res,
ValidationRule rule);
void EmitResourceFormatError(const hlsl::DxilResourceBase *Res,
ValidationRule rule, ArrayRef<StringRef> args);
bool IsDebugFunctionCall(Instruction *I);
Instruction *GetDebugInstr(Instruction *I);
// Emit Error or note on instruction `I` with `Msg`.
// If `isError` is true, `Rule` may omit repeated errors
void EmitInstrDiagMsg(Instruction *I, ValidationRule Rule, std::string Msg,
bool isError = true);
void EmitInstrError(Instruction *I, ValidationRule rule);
void EmitInstrNote(Instruction *I, std::string Msg);
void EmitInstrFormatError(Instruction *I, ValidationRule rule,
ArrayRef<StringRef> args);
void EmitSignatureError(DxilSignatureElement *SE, ValidationRule rule);
void EmitTypeError(Type *Ty, ValidationRule rule);
void EmitFnError(Function *F, ValidationRule rule);
void EmitFnFormatError(Function *F, ValidationRule rule,
ArrayRef<StringRef> args);
void EmitFnAttributeError(Function *F, StringRef Kind, StringRef Value);
};
uint32_t ValidateDxilModule(llvm::Module *pModule, llvm::Module *pDebugModule);
} // namespace hlsl
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/DxilValidation/DxilContainerValidation.cpp | ///////////////////////////////////////////////////////////////////////////////
// //
// DxilContainerValidation.cpp //
// Copyright (C) Microsoft Corporation. All rights reserved. //
// This file is distributed under the University of Illinois Open Source //
// License. See LICENSE.TXT for details. //
// //
// This file provides support for validating DXIL container. //
// //
///////////////////////////////////////////////////////////////////////////////
#include "dxc/Support/FileIOHelper.h"
#include "dxc/Support/Global.h"
#include "dxc/Support/WinIncludes.h"
#include "dxc/DxilContainer/DxilContainer.h"
#include "dxc/DxilContainer/DxilContainerAssembler.h"
#include "dxc/DxilContainer/DxilPipelineStateValidation.h"
#include "dxc/DxilContainer/DxilRuntimeReflection.h"
#include "dxc/DxilRootSignature/DxilRootSignature.h"
#include "dxc/DxilValidation/DxilValidation.h"
#include "dxc/DXIL/DxilModule.h"
#include "dxc/DXIL/DxilUtil.h"
#include "llvm/Bitcode/ReaderWriter.h"
#include "llvm/IR/DiagnosticPrinter.h"
#include "llvm/IR/Module.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/raw_ostream.h"
#include "DxilValidationUtils.h"
#include <memory>
using std::unique_ptr;
using std::unordered_set;
using std::vector;
namespace {
// Utility class for setting and restoring the diagnostic context so we may
// capture errors/warnings
struct DiagRestore {
LLVMContext *Ctx = nullptr;
void *OrigDiagContext;
LLVMContext::DiagnosticHandlerTy OrigHandler;
DiagRestore(llvm::LLVMContext &InputCtx, void *DiagContext) : Ctx(&InputCtx) {
init(DiagContext);
}
DiagRestore(Module *M, void *DiagContext) {
if (!M)
return;
Ctx = &M->getContext();
init(DiagContext);
}
~DiagRestore() {
if (!Ctx)
return;
Ctx->setDiagnosticHandler(OrigHandler, OrigDiagContext);
}
private:
void init(void *DiagContext) {
OrigHandler = Ctx->getDiagnosticHandler();
OrigDiagContext = Ctx->getDiagnosticContext();
Ctx->setDiagnosticHandler(
hlsl::PrintDiagnosticContext::PrintDiagnosticHandler, DiagContext);
}
};
static void emitDxilDiag(LLVMContext &Ctx, const char *str) {
hlsl::dxilutil::EmitErrorOnContext(Ctx, str);
}
} // namespace
namespace hlsl {
// DXIL Container Verification Functions
static void VerifyBlobPartMatches(ValidationContext &ValCtx, LPCSTR pName,
DxilPartWriter *pWriter, const void *pData,
uint32_t Size) {
if (!pData && pWriter->size()) {
// No blob part, but writer says non-zero size is expected.
ValCtx.EmitFormatError(ValidationRule::ContainerPartMissing, {pName});
return;
}
// Compare sizes
if (pWriter->size() != Size) {
ValCtx.EmitFormatError(ValidationRule::ContainerPartMatches, {pName});
return;
}
if (Size == 0) {
return;
}
CComPtr<AbstractMemoryStream> pOutputStream;
IFT(CreateMemoryStream(DxcGetThreadMallocNoRef(), &pOutputStream));
pOutputStream->Reserve(Size);
pWriter->write(pOutputStream);
DXASSERT(pOutputStream->GetPtrSize() == Size,
"otherwise, DxilPartWriter misreported size");
if (memcmp(pData, pOutputStream->GetPtr(), Size)) {
ValCtx.EmitFormatError(ValidationRule::ContainerPartMatches, {pName});
return;
}
return;
}
static void VerifySignatureMatches(ValidationContext &ValCtx,
DXIL::SignatureKind SigKind,
const void *pSigData, uint32_t SigSize) {
// Generate corresponding signature from module and memcmp
const char *pName = nullptr;
switch (SigKind) {
case hlsl::DXIL::SignatureKind::Input:
pName = "Program Input Signature";
break;
case hlsl::DXIL::SignatureKind::Output:
pName = "Program Output Signature";
break;
case hlsl::DXIL::SignatureKind::PatchConstOrPrim:
if (ValCtx.DxilMod.GetShaderModel()->GetKind() == DXIL::ShaderKind::Mesh)
pName = "Program Primitive Signature";
else
pName = "Program Patch Constant Signature";
break;
default:
break;
}
unique_ptr<DxilPartWriter> pWriter(
NewProgramSignatureWriter(ValCtx.DxilMod, SigKind));
VerifyBlobPartMatches(ValCtx, pName, pWriter.get(), pSigData, SigSize);
}
bool VerifySignatureMatches(llvm::Module *pModule, DXIL::SignatureKind SigKind,
const void *pSigData, uint32_t SigSize) {
ValidationContext ValCtx(*pModule, nullptr, pModule->GetOrCreateDxilModule());
VerifySignatureMatches(ValCtx, SigKind, pSigData, SigSize);
return !ValCtx.Failed;
}
static void VerifyPSVMatches(ValidationContext &ValCtx, const void *pPSVData,
uint32_t PSVSize) {
uint32_t PSVVersion =
MAX_PSV_VERSION; // This should be set to the newest version
unique_ptr<DxilPartWriter> pWriter(NewPSVWriter(ValCtx.DxilMod, PSVVersion));
// Try each version in case an earlier version matches module
while (PSVVersion && pWriter->size() != PSVSize) {
PSVVersion--;
pWriter.reset(NewPSVWriter(ValCtx.DxilMod, PSVVersion));
}
// generate PSV data from module and memcmp
VerifyBlobPartMatches(ValCtx, "Pipeline State Validation", pWriter.get(),
pPSVData, PSVSize);
}
static void VerifyFeatureInfoMatches(ValidationContext &ValCtx,
const void *pFeatureInfoData,
uint32_t FeatureInfoSize) {
// generate Feature Info data from module and memcmp
unique_ptr<DxilPartWriter> pWriter(NewFeatureInfoWriter(ValCtx.DxilMod));
VerifyBlobPartMatches(ValCtx, "Feature Info", pWriter.get(), pFeatureInfoData,
FeatureInfoSize);
}
// return true if the pBlob is a valid, well-formed CompilerVersion part, false
// otherwise
bool ValidateCompilerVersionPart(const void *pBlobPtr, UINT blobSize) {
// The hlsl::DxilCompilerVersion struct is always 16 bytes. (2 2-byte
// uint16's, 3 4-byte uint32's) The blob size should absolutely never be less
// than 16 bytes.
if (blobSize < sizeof(hlsl::DxilCompilerVersion)) {
return false;
}
const hlsl::DxilCompilerVersion *pDCV =
(const hlsl::DxilCompilerVersion *)pBlobPtr;
if (pDCV->VersionStringListSizeInBytes == 0) {
// No version strings, just make sure there is no extra space.
return blobSize == sizeof(hlsl::DxilCompilerVersion);
}
// after this point, we know VersionStringListSizeInBytes >= 1, because it is
// a UINT
UINT EndOfVersionStringIndex =
sizeof(hlsl::DxilCompilerVersion) + pDCV->VersionStringListSizeInBytes;
// Make sure that the buffer size is large enough to contain both the DCV
// struct and the version string but not any larger than necessary
if (PSVALIGN4(EndOfVersionStringIndex) != blobSize) {
return false;
}
const char *VersionStringsListData =
(const char *)pBlobPtr + sizeof(hlsl::DxilCompilerVersion);
UINT VersionStringListSizeInBytes = pDCV->VersionStringListSizeInBytes;
// now make sure that any pad bytes that were added are null-terminators.
for (UINT i = VersionStringListSizeInBytes;
i < blobSize - sizeof(hlsl::DxilCompilerVersion); i++) {
if (VersionStringsListData[i] != '\0') {
return false;
}
}
// Now, version string validation
// first, the final byte of the string should always be null-terminator so
// that the string ends
if (VersionStringsListData[VersionStringListSizeInBytes - 1] != '\0') {
return false;
}
// construct the first string
// data format for VersionString can be see in the definition for the
// DxilCompilerVersion struct. summary: 2 strings that each end with the null
// terminator, and [0-3] null terminators after the final null terminator
StringRef firstStr(VersionStringsListData);
// if the second string exists, attempt to construct it.
if (VersionStringListSizeInBytes > (firstStr.size() + 1)) {
StringRef secondStr(VersionStringsListData + firstStr.size() + 1);
// the VersionStringListSizeInBytes member should be exactly equal to the
// two string lengths, plus the 2 null terminator bytes.
if (VersionStringListSizeInBytes !=
firstStr.size() + secondStr.size() + 2) {
return false;
}
} else {
// the VersionStringListSizeInBytes member should be exactly equal to the
// first string length, plus the 1 null terminator byte.
if (VersionStringListSizeInBytes != firstStr.size() + 1) {
return false;
}
}
return true;
}
static void VerifyRDATMatches(ValidationContext &ValCtx, const void *pRDATData,
uint32_t RDATSize) {
const char *PartName = "Runtime Data (RDAT)";
RDAT::DxilRuntimeData rdat(pRDATData, RDATSize);
if (!rdat.Validate()) {
ValCtx.EmitFormatError(ValidationRule::ContainerPartMatches, {PartName});
return;
}
// If DxilModule subobjects already loaded, validate these against the RDAT
// blob, otherwise, load subobject into DxilModule to generate reference RDAT.
if (!ValCtx.DxilMod.GetSubobjects()) {
auto table = rdat.GetSubobjectTable();
if (table && table.Count() > 0) {
ValCtx.DxilMod.ResetSubobjects(new DxilSubobjects());
if (!LoadSubobjectsFromRDAT(*ValCtx.DxilMod.GetSubobjects(), rdat)) {
ValCtx.EmitFormatError(ValidationRule::ContainerPartMatches,
{PartName});
return;
}
}
}
unique_ptr<DxilPartWriter> pWriter(NewRDATWriter(ValCtx.DxilMod));
VerifyBlobPartMatches(ValCtx, PartName, pWriter.get(), pRDATData, RDATSize);
}
bool VerifyRDATMatches(llvm::Module *pModule, const void *pRDATData,
uint32_t RDATSize) {
ValidationContext ValCtx(*pModule, nullptr, pModule->GetOrCreateDxilModule());
VerifyRDATMatches(ValCtx, pRDATData, RDATSize);
return !ValCtx.Failed;
}
bool VerifyFeatureInfoMatches(llvm::Module *pModule,
const void *pFeatureInfoData,
uint32_t FeatureInfoSize) {
ValidationContext ValCtx(*pModule, nullptr, pModule->GetOrCreateDxilModule());
VerifyFeatureInfoMatches(ValCtx, pFeatureInfoData, FeatureInfoSize);
return !ValCtx.Failed;
}
HRESULT ValidateDxilContainerParts(llvm::Module *pModule,
llvm::Module *pDebugModule,
const DxilContainerHeader *pContainer,
uint32_t ContainerSize) {
DXASSERT_NOMSG(pModule);
if (!pContainer || !IsValidDxilContainer(pContainer, ContainerSize)) {
return DXC_E_CONTAINER_INVALID;
}
DxilModule *pDxilModule = DxilModule::TryGetDxilModule(pModule);
if (!pDxilModule) {
return DXC_E_IR_VERIFICATION_FAILED;
}
ValidationContext ValCtx(*pModule, pDebugModule, *pDxilModule);
DXIL::ShaderKind ShaderKind = pDxilModule->GetShaderModel()->GetKind();
bool bTessOrMesh = ShaderKind == DXIL::ShaderKind::Hull ||
ShaderKind == DXIL::ShaderKind::Domain ||
ShaderKind == DXIL::ShaderKind::Mesh;
std::unordered_set<uint32_t> FourCCFound;
const DxilPartHeader *pRootSignaturePart = nullptr;
const DxilPartHeader *pPSVPart = nullptr;
for (auto it = begin(pContainer), itEnd = end(pContainer); it != itEnd;
++it) {
const DxilPartHeader *pPart = *it;
char szFourCC[5];
PartKindToCharArray(pPart->PartFourCC, szFourCC);
if (FourCCFound.find(pPart->PartFourCC) != FourCCFound.end()) {
// Two parts with same FourCC found
ValCtx.EmitFormatError(ValidationRule::ContainerPartRepeated, {szFourCC});
continue;
}
FourCCFound.insert(pPart->PartFourCC);
switch (pPart->PartFourCC) {
case DFCC_InputSignature:
if (ValCtx.isLibProfile) {
ValCtx.EmitFormatError(ValidationRule::ContainerPartInvalid,
{szFourCC});
} else {
VerifySignatureMatches(ValCtx, DXIL::SignatureKind::Input,
GetDxilPartData(pPart), pPart->PartSize);
}
break;
case DFCC_OutputSignature:
if (ValCtx.isLibProfile) {
ValCtx.EmitFormatError(ValidationRule::ContainerPartInvalid,
{szFourCC});
} else {
VerifySignatureMatches(ValCtx, DXIL::SignatureKind::Output,
GetDxilPartData(pPart), pPart->PartSize);
}
break;
case DFCC_PatchConstantSignature:
if (ValCtx.isLibProfile) {
ValCtx.EmitFormatError(ValidationRule::ContainerPartInvalid,
{szFourCC});
} else {
if (bTessOrMesh) {
VerifySignatureMatches(ValCtx, DXIL::SignatureKind::PatchConstOrPrim,
GetDxilPartData(pPart), pPart->PartSize);
} else {
ValCtx.EmitFormatError(ValidationRule::ContainerPartMatches,
{"Program Patch Constant Signature"});
}
}
break;
case DFCC_FeatureInfo:
VerifyFeatureInfoMatches(ValCtx, GetDxilPartData(pPart), pPart->PartSize);
break;
case DFCC_CompilerVersion:
// This blob is either a PDB, or a library profile
if (ValCtx.isLibProfile) {
if (!ValidateCompilerVersionPart((void *)GetDxilPartData(pPart),
pPart->PartSize)) {
ValCtx.EmitFormatError(ValidationRule::ContainerPartInvalid,
{szFourCC});
}
} else {
ValCtx.EmitFormatError(ValidationRule::ContainerPartInvalid,
{szFourCC});
}
break;
case DFCC_RootSignature:
pRootSignaturePart = pPart;
if (ValCtx.isLibProfile) {
ValCtx.EmitFormatError(ValidationRule::ContainerPartInvalid,
{szFourCC});
}
break;
case DFCC_PipelineStateValidation:
pPSVPart = pPart;
if (ValCtx.isLibProfile) {
ValCtx.EmitFormatError(ValidationRule::ContainerPartInvalid,
{szFourCC});
} else {
VerifyPSVMatches(ValCtx, GetDxilPartData(pPart), pPart->PartSize);
}
break;
// Skip these
case DFCC_ResourceDef:
case DFCC_ShaderStatistics:
case DFCC_PrivateData:
case DFCC_DXIL:
case DFCC_ShaderDebugInfoDXIL:
case DFCC_ShaderDebugName:
continue;
case DFCC_ShaderHash:
if (pPart->PartSize != sizeof(DxilShaderHash)) {
ValCtx.EmitFormatError(ValidationRule::ContainerPartInvalid,
{szFourCC});
}
break;
// Runtime Data (RDAT) for libraries
case DFCC_RuntimeData:
if (ValCtx.isLibProfile) {
// TODO: validate without exact binary comparison of serialized data
// - support earlier versions
// - verify no newer record versions than known here (size no larger
// than newest version)
// - verify all data makes sense and matches expectations based on
// module
VerifyRDATMatches(ValCtx, GetDxilPartData(pPart), pPart->PartSize);
} else {
ValCtx.EmitFormatError(ValidationRule::ContainerPartInvalid,
{szFourCC});
}
break;
case DFCC_Container:
default:
ValCtx.EmitFormatError(ValidationRule::ContainerPartInvalid, {szFourCC});
break;
}
}
// Verify required parts found
if (ValCtx.isLibProfile) {
if (FourCCFound.find(DFCC_RuntimeData) == FourCCFound.end()) {
ValCtx.EmitFormatError(ValidationRule::ContainerPartMissing,
{"Runtime Data (RDAT)"});
}
} else {
if (FourCCFound.find(DFCC_InputSignature) == FourCCFound.end()) {
VerifySignatureMatches(ValCtx, DXIL::SignatureKind::Input, nullptr, 0);
}
if (FourCCFound.find(DFCC_OutputSignature) == FourCCFound.end()) {
VerifySignatureMatches(ValCtx, DXIL::SignatureKind::Output, nullptr, 0);
}
if (bTessOrMesh &&
FourCCFound.find(DFCC_PatchConstantSignature) == FourCCFound.end() &&
pDxilModule->GetPatchConstOrPrimSignature().GetElements().size()) {
ValCtx.EmitFormatError(ValidationRule::ContainerPartMissing,
{"Program Patch Constant Signature"});
}
if (FourCCFound.find(DFCC_FeatureInfo) == FourCCFound.end()) {
// Could be optional, but RS1 runtime doesn't handle this case properly.
ValCtx.EmitFormatError(ValidationRule::ContainerPartMissing,
{"Feature Info"});
}
// Validate Root Signature
if (pPSVPart) {
if (pRootSignaturePart) {
std::string diagStr;
raw_string_ostream DiagStream(diagStr);
try {
RootSignatureHandle RS;
RS.LoadSerialized(
(const uint8_t *)GetDxilPartData(pRootSignaturePart),
pRootSignaturePart->PartSize);
RS.Deserialize();
IFTBOOL(VerifyRootSignatureWithShaderPSV(
RS.GetDesc(), pDxilModule->GetShaderModel()->GetKind(),
GetDxilPartData(pPSVPart), pPSVPart->PartSize,
DiagStream),
DXC_E_INCORRECT_ROOT_SIGNATURE);
} catch (...) {
ValCtx.EmitError(ValidationRule::ContainerRootSignatureIncompatible);
emitDxilDiag(pModule->getContext(), DiagStream.str().c_str());
}
}
} else {
ValCtx.EmitFormatError(ValidationRule::ContainerPartMissing,
{"Pipeline State Validation"});
}
}
if (ValCtx.Failed) {
return DXC_E_MALFORMED_CONTAINER;
}
return S_OK;
}
static HRESULT FindDxilPart(const void *pContainerBytes, uint32_t ContainerSize,
DxilFourCC FourCC, const DxilPartHeader **ppPart) {
const DxilContainerHeader *pContainer =
IsDxilContainerLike(pContainerBytes, ContainerSize);
if (!pContainer) {
IFR(DXC_E_CONTAINER_INVALID);
}
if (!IsValidDxilContainer(pContainer, ContainerSize)) {
IFR(DXC_E_CONTAINER_INVALID);
}
DxilPartIterator it =
std::find_if(begin(pContainer), end(pContainer), DxilPartIsType(FourCC));
if (it == end(pContainer)) {
IFR(DXC_E_CONTAINER_MISSING_DXIL);
}
const DxilProgramHeader *pProgramHeader =
reinterpret_cast<const DxilProgramHeader *>(GetDxilPartData(*it));
if (!IsValidDxilProgramHeader(pProgramHeader, (*it)->PartSize)) {
IFR(DXC_E_CONTAINER_INVALID);
}
*ppPart = *it;
return S_OK;
}
HRESULT ValidateLoadModule(const char *pIL, uint32_t ILLength,
unique_ptr<llvm::Module> &pModule, LLVMContext &Ctx,
llvm::raw_ostream &DiagStream, unsigned bLazyLoad) {
llvm::DiagnosticPrinterRawOStream DiagPrinter(DiagStream);
PrintDiagnosticContext DiagContext(DiagPrinter);
DiagRestore DR(Ctx, &DiagContext);
std::unique_ptr<llvm::MemoryBuffer> pBitcodeBuf;
pBitcodeBuf.reset(llvm::MemoryBuffer::getMemBuffer(
llvm::StringRef(pIL, ILLength), "", false)
.release());
ErrorOr<std::unique_ptr<Module>> loadedModuleResult =
bLazyLoad == 0
? llvm::parseBitcodeFile(pBitcodeBuf->getMemBufferRef(), Ctx, nullptr,
true /*Track Bitstream*/)
: llvm::getLazyBitcodeModule(std::move(pBitcodeBuf), Ctx, nullptr,
false, true /*Track Bitstream*/);
// DXIL disallows some LLVM bitcode constructs, like unaccounted-for
// sub-blocks. These appear as warnings, which the validator should reject.
if (DiagContext.HasErrors() || DiagContext.HasWarnings() ||
loadedModuleResult.getError())
return DXC_E_IR_VERIFICATION_FAILED;
pModule = std::move(loadedModuleResult.get());
return S_OK;
}
HRESULT ValidateDxilBitcode(const char *pIL, uint32_t ILLength,
llvm::raw_ostream &DiagStream) {
LLVMContext Ctx;
std::unique_ptr<llvm::Module> pModule;
llvm::DiagnosticPrinterRawOStream DiagPrinter(DiagStream);
PrintDiagnosticContext DiagContext(DiagPrinter);
Ctx.setDiagnosticHandler(PrintDiagnosticContext::PrintDiagnosticHandler,
&DiagContext, true);
HRESULT hr;
if (FAILED(hr = ValidateLoadModule(pIL, ILLength, pModule, Ctx, DiagStream,
/*bLazyLoad*/ false)))
return hr;
if (FAILED(hr = ValidateDxilModule(pModule.get(), nullptr)))
return hr;
DxilModule &dxilModule = pModule->GetDxilModule();
auto &SerializedRootSig = dxilModule.GetSerializedRootSignature();
if (!SerializedRootSig.empty()) {
unique_ptr<DxilPartWriter> pWriter(NewPSVWriter(dxilModule));
DXASSERT_NOMSG(pWriter->size());
CComPtr<AbstractMemoryStream> pOutputStream;
IFT(CreateMemoryStream(DxcGetThreadMallocNoRef(), &pOutputStream));
pOutputStream->Reserve(pWriter->size());
pWriter->write(pOutputStream);
DxilVersionedRootSignature desc;
try {
DeserializeRootSignature(SerializedRootSig.data(),
SerializedRootSig.size(), desc.get_address_of());
if (!desc.get()) {
return DXC_E_INCORRECT_ROOT_SIGNATURE;
}
IFTBOOL(VerifyRootSignatureWithShaderPSV(
desc.get(), dxilModule.GetShaderModel()->GetKind(),
pOutputStream->GetPtr(), pWriter->size(), DiagStream),
DXC_E_INCORRECT_ROOT_SIGNATURE);
} catch (...) {
return DXC_E_INCORRECT_ROOT_SIGNATURE;
}
}
if (DiagContext.HasErrors() || DiagContext.HasWarnings()) {
return DXC_E_IR_VERIFICATION_FAILED;
}
return S_OK;
}
static HRESULT ValidateLoadModuleFromContainer(
const void *pContainer, uint32_t ContainerSize,
std::unique_ptr<llvm::Module> &pModule,
std::unique_ptr<llvm::Module> &pDebugModule, llvm::LLVMContext &Ctx,
LLVMContext &DbgCtx, llvm::raw_ostream &DiagStream, unsigned bLazyLoad) {
llvm::DiagnosticPrinterRawOStream DiagPrinter(DiagStream);
PrintDiagnosticContext DiagContext(DiagPrinter);
DiagRestore DR(Ctx, &DiagContext);
DiagRestore DR2(DbgCtx, &DiagContext);
const DxilPartHeader *pPart = nullptr;
IFR(FindDxilPart(pContainer, ContainerSize, DFCC_DXIL, &pPart));
const char *pIL = nullptr;
uint32_t ILLength = 0;
GetDxilProgramBitcode(
reinterpret_cast<const DxilProgramHeader *>(GetDxilPartData(pPart)), &pIL,
&ILLength);
IFR(ValidateLoadModule(pIL, ILLength, pModule, Ctx, DiagStream, bLazyLoad));
HRESULT hr;
const DxilPartHeader *pDbgPart = nullptr;
if (FAILED(hr = FindDxilPart(pContainer, ContainerSize,
DFCC_ShaderDebugInfoDXIL, &pDbgPart)) &&
hr != DXC_E_CONTAINER_MISSING_DXIL) {
return hr;
}
if (pDbgPart) {
GetDxilProgramBitcode(
reinterpret_cast<const DxilProgramHeader *>(GetDxilPartData(pDbgPart)),
&pIL, &ILLength);
if (FAILED(hr = ValidateLoadModule(pIL, ILLength, pDebugModule, DbgCtx,
DiagStream, bLazyLoad))) {
return hr;
}
}
return S_OK;
}
HRESULT ValidateLoadModuleFromContainer(
const void *pContainer, uint32_t ContainerSize,
std::unique_ptr<llvm::Module> &pModule,
std::unique_ptr<llvm::Module> &pDebugModule, llvm::LLVMContext &Ctx,
llvm::LLVMContext &DbgCtx, llvm::raw_ostream &DiagStream) {
return ValidateLoadModuleFromContainer(pContainer, ContainerSize, pModule,
pDebugModule, Ctx, DbgCtx, DiagStream,
/*bLazyLoad*/ false);
}
// Lazy loads module from container, validating load, but not module.
HRESULT ValidateLoadModuleFromContainerLazy(
const void *pContainer, uint32_t ContainerSize,
std::unique_ptr<llvm::Module> &pModule,
std::unique_ptr<llvm::Module> &pDebugModule, llvm::LLVMContext &Ctx,
llvm::LLVMContext &DbgCtx, llvm::raw_ostream &DiagStream) {
return ValidateLoadModuleFromContainer(pContainer, ContainerSize, pModule,
pDebugModule, Ctx, DbgCtx, DiagStream,
/*bLazyLoad*/ true);
}
HRESULT ValidateDxilContainer(const void *pContainer, uint32_t ContainerSize,
llvm::Module *pDebugModule,
llvm::raw_ostream &DiagStream) {
LLVMContext Ctx, DbgCtx;
std::unique_ptr<llvm::Module> pModule, pDebugModuleInContainer;
llvm::DiagnosticPrinterRawOStream DiagPrinter(DiagStream);
PrintDiagnosticContext DiagContext(DiagPrinter);
Ctx.setDiagnosticHandler(PrintDiagnosticContext::PrintDiagnosticHandler,
&DiagContext, true);
DbgCtx.setDiagnosticHandler(PrintDiagnosticContext::PrintDiagnosticHandler,
&DiagContext, true);
DiagRestore DR(pDebugModule, &DiagContext);
IFR(ValidateLoadModuleFromContainer(pContainer, ContainerSize, pModule,
pDebugModuleInContainer, Ctx, DbgCtx,
DiagStream));
if (pDebugModuleInContainer)
pDebugModule = pDebugModuleInContainer.get();
// Validate DXIL Module
IFR(ValidateDxilModule(pModule.get(), pDebugModule));
if (DiagContext.HasErrors() || DiagContext.HasWarnings()) {
return DXC_E_IR_VERIFICATION_FAILED;
}
return ValidateDxilContainerParts(
pModule.get(), pDebugModule,
IsDxilContainerLike(pContainer, ContainerSize), ContainerSize);
}
HRESULT ValidateDxilContainer(const void *pContainer, uint32_t ContainerSize,
llvm::raw_ostream &DiagStream) {
return ValidateDxilContainer(pContainer, ContainerSize, nullptr, DiagStream);
}
} // namespace hlsl
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/DxilValidation/CMakeLists.txt | # Copyright (C) Microsoft Corporation. All rights reserved.
# This file is distributed under the University of Illinois Open Source License. See LICENSE.TXT for details.
add_hlsl_hctgen(DxilValidationInc OUTPUT DxilValidation.inc BUILD_DIR)
add_hlsl_hctgen(DxilValidation OUTPUT DxilValidationImpl.inc BUILD_DIR)
add_llvm_library(LLVMDxilValidation
DxilContainerValidation.cpp
DxilValidation.cpp
DxilValidationUtils.cpp
ADDITIONAL_HEADER_DIRS
${LLVM_MAIN_INCLUDE_DIR}/llvm/IR
)
add_dependencies(LLVMDxilValidation intrinsics_gen)
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/DxilValidation/DxilValidationUtils.cpp | ///////////////////////////////////////////////////////////////////////////////
// //
// DxilValidationUttils.cpp //
// Copyright (C) Microsoft Corporation. All rights reserved. //
// This file is distributed under the University of Illinois Open Source //
// License. See LICENSE.TXT for details. //
// //
// This file provides utils for validating DXIL. //
// //
///////////////////////////////////////////////////////////////////////////////
#include "DxilValidationUtils.h"
#include "dxc/DXIL/DxilEntryProps.h"
#include "dxc/DXIL/DxilInstructions.h"
#include "dxc/DXIL/DxilModule.h"
#include "dxc/DXIL/DxilOperations.h"
#include "dxc/DXIL/DxilUtil.h"
#include "dxc/Support/Global.h"
#include "llvm/IR/InstIterator.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/Operator.h"
#include "llvm/Support/raw_ostream.h"
namespace hlsl {
EntryStatus::EntryStatus(DxilEntryProps &entryProps)
: m_bCoverageIn(false), m_bInnerCoverageIn(false), hasViewID(false) {
for (unsigned i = 0; i < DXIL::kNumOutputStreams; i++) {
hasOutputPosition[i] = false;
OutputPositionMask[i] = 0;
}
outputCols.resize(entryProps.sig.OutputSignature.GetElements().size(), 0);
patchConstOrPrimCols.resize(
entryProps.sig.PatchConstOrPrimSignature.GetElements().size(), 0);
}
ValidationContext::ValidationContext(Module &llvmModule, Module *DebugModule,
DxilModule &dxilModule)
: M(llvmModule), pDebugModule(DebugModule), DxilMod(dxilModule),
DL(llvmModule.getDataLayout()), LastRuleEmit((ValidationRule)-1),
kDxilControlFlowHintMDKind(llvmModule.getContext().getMDKindID(
DxilMDHelper::kDxilControlFlowHintMDName)),
kDxilPreciseMDKind(llvmModule.getContext().getMDKindID(
DxilMDHelper::kDxilPreciseAttributeMDName)),
kDxilNonUniformMDKind(llvmModule.getContext().getMDKindID(
DxilMDHelper::kDxilNonUniformAttributeMDName)),
kLLVMLoopMDKind(llvmModule.getContext().getMDKindID("llvm.loop")),
slotTracker(&llvmModule, true) {
DxilMod.GetDxilVersion(m_DxilMajor, m_DxilMinor);
HandleTy = DxilMod.GetOP()->GetHandleType();
for (Function &F : llvmModule.functions()) {
if (DxilMod.HasDxilEntryProps(&F)) {
DxilEntryProps &entryProps = DxilMod.GetDxilEntryProps(&F);
entryStatusMap[&F] = llvm::make_unique<EntryStatus>(entryProps);
}
}
isLibProfile = dxilModule.GetShaderModel()->IsLib();
BuildResMap();
// Collect patch constant map.
if (isLibProfile) {
for (Function &F : dxilModule.GetModule()->functions()) {
if (dxilModule.HasDxilEntryProps(&F)) {
DxilEntryProps &entryProps = dxilModule.GetDxilEntryProps(&F);
DxilFunctionProps &props = entryProps.props;
if (props.IsHS()) {
PatchConstantFuncMap[props.ShaderProps.HS.patchConstantFunc]
.emplace_back(&F);
}
}
}
} else {
Function *Entry = dxilModule.GetEntryFunction();
if (!dxilModule.HasDxilEntryProps(Entry)) {
// must have props.
EmitFnError(Entry, ValidationRule::MetaNoEntryPropsForEntry);
return;
}
DxilEntryProps &entryProps = dxilModule.GetDxilEntryProps(Entry);
DxilFunctionProps &props = entryProps.props;
if (props.IsHS()) {
PatchConstantFuncMap[props.ShaderProps.HS.patchConstantFunc].emplace_back(
Entry);
}
}
}
void ValidationContext::PropagateResMap(Value *V, DxilResourceBase *Res) {
auto it = ResPropMap.find(V);
if (it != ResPropMap.end()) {
DxilResourceProperties RP = resource_helper::loadPropsFromResourceBase(Res);
DxilResourceProperties itRP = it->second;
if (itRP != RP) {
EmitResourceError(Res, ValidationRule::InstrResourceMapToSingleEntry);
}
} else {
DxilResourceProperties RP = resource_helper::loadPropsFromResourceBase(Res);
ResPropMap[V] = RP;
for (User *U : V->users()) {
if (isa<GEPOperator>(U)) {
PropagateResMap(U, Res);
} else if (CallInst *CI = dyn_cast<CallInst>(U)) {
// Stop propagate on function call.
DxilInst_CreateHandleForLib hdl(CI);
if (hdl) {
DxilResourceProperties RP =
resource_helper::loadPropsFromResourceBase(Res);
ResPropMap[CI] = RP;
}
} else if (isa<LoadInst>(U)) {
PropagateResMap(U, Res);
} else if (isa<BitCastOperator>(U) && U->user_empty()) {
// For hlsl type.
continue;
} else {
EmitResourceError(Res, ValidationRule::InstrResourceUser);
}
}
}
}
void ValidationContext::BuildResMap() {
hlsl::OP *hlslOP = DxilMod.GetOP();
if (isLibProfile) {
std::unordered_set<Value *> ResSet;
// Start from all global variable in resTab.
for (auto &Res : DxilMod.GetCBuffers())
PropagateResMap(Res->GetGlobalSymbol(), Res.get());
for (auto &Res : DxilMod.GetUAVs())
PropagateResMap(Res->GetGlobalSymbol(), Res.get());
for (auto &Res : DxilMod.GetSRVs())
PropagateResMap(Res->GetGlobalSymbol(), Res.get());
for (auto &Res : DxilMod.GetSamplers())
PropagateResMap(Res->GetGlobalSymbol(), Res.get());
} else {
// Scan all createHandle.
for (auto &it : hlslOP->GetOpFuncList(DXIL::OpCode::CreateHandle)) {
Function *F = it.second;
if (!F)
continue;
for (User *U : F->users()) {
CallInst *CI = cast<CallInst>(U);
DxilInst_CreateHandle hdl(CI);
// Validate Class/RangeID/Index.
Value *resClass = hdl.get_resourceClass();
if (!isa<ConstantInt>(resClass)) {
EmitInstrError(CI, ValidationRule::InstrOpConstRange);
continue;
}
Value *rangeIndex = hdl.get_rangeId();
if (!isa<ConstantInt>(rangeIndex)) {
EmitInstrError(CI, ValidationRule::InstrOpConstRange);
continue;
}
DxilResourceBase *Res = nullptr;
unsigned rangeId = hdl.get_rangeId_val();
switch (static_cast<DXIL::ResourceClass>(hdl.get_resourceClass_val())) {
default:
EmitInstrError(CI, ValidationRule::InstrOpConstRange);
continue;
break;
case DXIL::ResourceClass::CBuffer:
if (DxilMod.GetCBuffers().size() > rangeId) {
Res = &DxilMod.GetCBuffer(rangeId);
} else {
// Emit Error.
EmitInstrError(CI, ValidationRule::InstrOpConstRange);
continue;
}
break;
case DXIL::ResourceClass::Sampler:
if (DxilMod.GetSamplers().size() > rangeId) {
Res = &DxilMod.GetSampler(rangeId);
} else {
// Emit Error.
EmitInstrError(CI, ValidationRule::InstrOpConstRange);
continue;
}
break;
case DXIL::ResourceClass::SRV:
if (DxilMod.GetSRVs().size() > rangeId) {
Res = &DxilMod.GetSRV(rangeId);
} else {
// Emit Error.
EmitInstrError(CI, ValidationRule::InstrOpConstRange);
continue;
}
break;
case DXIL::ResourceClass::UAV:
if (DxilMod.GetUAVs().size() > rangeId) {
Res = &DxilMod.GetUAV(rangeId);
} else {
// Emit Error.
EmitInstrError(CI, ValidationRule::InstrOpConstRange);
continue;
}
break;
}
ConstantInt *cIndex = dyn_cast<ConstantInt>(hdl.get_index());
if (!Res->GetHLSLType()->getPointerElementType()->isArrayTy()) {
if (!cIndex) {
// index must be 0 for none array resource.
EmitInstrError(CI, ValidationRule::InstrOpConstRange);
continue;
}
}
if (cIndex) {
unsigned index = cIndex->getLimitedValue();
if (index < Res->GetLowerBound() || index > Res->GetUpperBound()) {
// index out of range.
EmitInstrError(CI, ValidationRule::InstrOpConstRange);
continue;
}
}
HandleResIndexMap[CI] = rangeId;
DxilResourceProperties RP =
resource_helper::loadPropsFromResourceBase(Res);
ResPropMap[CI] = RP;
}
}
}
const ShaderModel &SM = *DxilMod.GetShaderModel();
for (auto &it : hlslOP->GetOpFuncList(DXIL::OpCode::AnnotateHandle)) {
Function *F = it.second;
if (!F)
continue;
for (User *U : F->users()) {
CallInst *CI = cast<CallInst>(U);
DxilInst_AnnotateHandle hdl(CI);
DxilResourceProperties RP =
resource_helper::loadPropsFromAnnotateHandle(hdl, SM);
if (RP.getResourceKind() == DXIL::ResourceKind::Invalid) {
EmitInstrError(CI, ValidationRule::InstrOpConstRange);
continue;
}
ResPropMap[CI] = RP;
}
}
}
bool ValidationContext::HasEntryStatus(Function *F) {
return entryStatusMap.find(F) != entryStatusMap.end();
}
EntryStatus &ValidationContext::GetEntryStatus(Function *F) {
return *entryStatusMap[F];
}
CallGraph &ValidationContext::GetCallGraph() {
if (!pCallGraph)
pCallGraph = llvm::make_unique<CallGraph>(M);
return *pCallGraph.get();
}
void ValidationContext::EmitGlobalVariableFormatError(
GlobalVariable *GV, ValidationRule rule, ArrayRef<StringRef> args) {
std::string ruleText = GetValidationRuleText(rule);
FormatRuleText(ruleText, args);
if (pDebugModule)
GV = pDebugModule->getGlobalVariable(GV->getName());
dxilutil::EmitErrorOnGlobalVariable(M.getContext(), GV, ruleText);
Failed = true;
}
// This is the least desirable mechanism, as it has no context.
void ValidationContext::EmitError(ValidationRule rule) {
dxilutil::EmitErrorOnContext(M.getContext(), GetValidationRuleText(rule));
Failed = true;
}
void ValidationContext::FormatRuleText(std::string &ruleText,
ArrayRef<StringRef> args) {
std::string escapedArg;
// Consider changing const char * to StringRef
for (unsigned i = 0; i < args.size(); i++) {
std::string argIdx = "%" + std::to_string(i);
StringRef pArg = args[i];
if (pArg == "")
pArg = "<null>";
if (pArg[0] == 1) {
escapedArg = "";
raw_string_ostream os(escapedArg);
dxilutil::PrintEscapedString(pArg, os);
os.flush();
pArg = escapedArg;
}
std::string::size_type offset = ruleText.find(argIdx);
if (offset == std::string::npos)
continue;
unsigned size = argIdx.size();
ruleText.replace(offset, size, pArg);
}
}
void ValidationContext::EmitFormatError(ValidationRule rule,
ArrayRef<StringRef> args) {
std::string ruleText = GetValidationRuleText(rule);
FormatRuleText(ruleText, args);
dxilutil::EmitErrorOnContext(M.getContext(), ruleText);
Failed = true;
}
void ValidationContext::EmitMetaError(Metadata *Meta, ValidationRule rule) {
std::string O;
raw_string_ostream OSS(O);
Meta->print(OSS, &M);
dxilutil::EmitErrorOnContext(M.getContext(), GetValidationRuleText(rule) + O);
Failed = true;
}
// Use this instead of DxilResourceBase::GetGlobalName
std::string
ValidationContext::GetResourceName(const hlsl::DxilResourceBase *Res) {
if (!Res)
return "nullptr";
std::string resName = Res->GetGlobalName();
if (!resName.empty())
return resName;
if (pDebugModule) {
DxilModule &DM = pDebugModule->GetOrCreateDxilModule();
switch (Res->GetClass()) {
case DXIL::ResourceClass::CBuffer:
return DM.GetCBuffer(Res->GetID()).GetGlobalName();
case DXIL::ResourceClass::Sampler:
return DM.GetSampler(Res->GetID()).GetGlobalName();
case DXIL::ResourceClass::SRV:
return DM.GetSRV(Res->GetID()).GetGlobalName();
case DXIL::ResourceClass::UAV:
return DM.GetUAV(Res->GetID()).GetGlobalName();
default:
return "Invalid Resource";
}
}
// When names have been stripped, use class and binding location to
// identify the resource. Format is roughly:
// Allocated: (CB|T|U|S)<ID>: <ResourceKind> ((cb|t|u|s)<LB>[<RangeSize>]
// space<SpaceID>) Unallocated: (CB|T|U|S)<ID>: <ResourceKind> (no bind
// location) Example: U0: TypedBuffer (u5[2] space1)
// [<RangeSize>] and space<SpaceID> skipped if 1 and 0 respectively.
return (Twine(Res->GetResIDPrefix()) + Twine(Res->GetID()) + ": " +
Twine(Res->GetResKindName()) +
(Res->IsAllocated() ? (" (" + Twine(Res->GetResBindPrefix()) +
Twine(Res->GetLowerBound()) +
(Res->IsUnbounded() ? Twine("[unbounded]")
: (Res->GetRangeSize() != 1)
? "[" + Twine(Res->GetRangeSize()) + "]"
: Twine()) +
((Res->GetSpaceID() != 0)
? " space" + Twine(Res->GetSpaceID())
: Twine()) +
")")
: Twine(" (no bind location)")))
.str();
}
void ValidationContext::EmitResourceError(const hlsl::DxilResourceBase *Res,
ValidationRule rule) {
std::string QuotedRes = " '" + GetResourceName(Res) + "'";
dxilutil::EmitErrorOnContext(M.getContext(),
GetValidationRuleText(rule) + QuotedRes);
Failed = true;
}
void ValidationContext::EmitResourceFormatError(
const hlsl::DxilResourceBase *Res, ValidationRule rule,
ArrayRef<StringRef> args) {
std::string QuotedRes = " '" + GetResourceName(Res) + "'";
std::string ruleText = GetValidationRuleText(rule);
FormatRuleText(ruleText, args);
dxilutil::EmitErrorOnContext(M.getContext(), ruleText + QuotedRes);
Failed = true;
}
bool ValidationContext::IsDebugFunctionCall(Instruction *I) {
return isa<DbgInfoIntrinsic>(I);
}
Instruction *ValidationContext::GetDebugInstr(Instruction *I) {
DXASSERT_NOMSG(I);
if (pDebugModule) {
// Look up the matching instruction in the debug module.
llvm::Function *Fn = I->getParent()->getParent();
llvm::Function *DbgFn = pDebugModule->getFunction(Fn->getName());
if (DbgFn) {
// Linear lookup, but then again, failing validation is rare.
inst_iterator it = inst_begin(Fn);
inst_iterator dbg_it = inst_begin(DbgFn);
while (IsDebugFunctionCall(&*dbg_it))
++dbg_it;
while (&*it != I) {
++it;
++dbg_it;
while (IsDebugFunctionCall(&*dbg_it))
++dbg_it;
}
return &*dbg_it;
}
}
return I;
}
// Emit Error or note on instruction `I` with `Msg`.
// If `isError` is true, `Rule` may omit repeated errors
void ValidationContext::EmitInstrDiagMsg(Instruction *I, ValidationRule Rule,
std::string Msg, bool isError) {
BasicBlock *BB = I->getParent();
Function *F = BB->getParent();
Instruction *DbgI = GetDebugInstr(I);
if (isError) {
if (const DebugLoc L = DbgI->getDebugLoc()) {
// Instructions that get scalarized will likely hit
// this case. Avoid redundant diagnostic messages.
if (Rule == LastRuleEmit && L == LastDebugLocEmit) {
return;
}
LastRuleEmit = Rule;
LastDebugLocEmit = L;
}
dxilutil::EmitErrorOnInstruction(DbgI, Msg);
} else {
dxilutil::EmitNoteOnContext(DbgI->getContext(), Msg);
}
// Add llvm information as a note to instruction string
std::string InstrStr;
raw_string_ostream InstrStream(InstrStr);
I->print(InstrStream, slotTracker);
InstrStream.flush();
StringRef InstrStrRef = InstrStr;
InstrStrRef = InstrStrRef.ltrim(); // Ignore indentation
Msg = "at '" + InstrStrRef.str() + "'";
// Print the parent block name
Msg += " in block '";
if (!BB->getName().empty()) {
Msg += BB->getName();
} else {
unsigned idx = 0;
for (auto i = F->getBasicBlockList().begin(),
e = F->getBasicBlockList().end();
i != e; ++i) {
if (BB == &(*i)) {
break;
}
idx++;
}
Msg += "#" + std::to_string(idx);
}
Msg += "'";
// Print the function name
Msg += " of function '" + F->getName().str() + "'.";
dxilutil::EmitNoteOnContext(DbgI->getContext(), Msg);
Failed = true;
}
void ValidationContext::EmitInstrError(Instruction *I, ValidationRule rule) {
EmitInstrDiagMsg(I, rule, GetValidationRuleText(rule));
}
void ValidationContext::EmitInstrNote(Instruction *I, std::string Msg) {
EmitInstrDiagMsg(I, LastRuleEmit, Msg, false);
}
void ValidationContext::EmitInstrFormatError(Instruction *I,
ValidationRule rule,
ArrayRef<StringRef> args) {
std::string ruleText = GetValidationRuleText(rule);
FormatRuleText(ruleText, args);
EmitInstrDiagMsg(I, rule, ruleText);
}
void ValidationContext::EmitSignatureError(DxilSignatureElement *SE,
ValidationRule rule) {
EmitFormatError(rule, {SE->GetName()});
}
void ValidationContext::EmitTypeError(Type *Ty, ValidationRule rule) {
std::string O;
raw_string_ostream OSS(O);
Ty->print(OSS);
EmitFormatError(rule, {OSS.str()});
}
void ValidationContext::EmitFnError(Function *F, ValidationRule rule) {
if (pDebugModule)
if (Function *dbgF = pDebugModule->getFunction(F->getName()))
F = dbgF;
dxilutil::EmitErrorOnFunction(M.getContext(), F, GetValidationRuleText(rule));
Failed = true;
}
void ValidationContext::EmitFnFormatError(Function *F, ValidationRule rule,
ArrayRef<StringRef> args) {
std::string ruleText = GetValidationRuleText(rule);
FormatRuleText(ruleText, args);
if (pDebugModule)
if (Function *dbgF = pDebugModule->getFunction(F->getName()))
F = dbgF;
dxilutil::EmitErrorOnFunction(M.getContext(), F, ruleText);
Failed = true;
}
void ValidationContext::EmitFnAttributeError(Function *F, StringRef Kind,
StringRef Value) {
EmitFnFormatError(F, ValidationRule::DeclFnAttribute,
{F->getName(), Kind, Value});
}
} // namespace hlsl
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/DxilValidation/DxilValidation.cpp | ///////////////////////////////////////////////////////////////////////////////
// //
// DxilValidation.cpp //
// Copyright (C) Microsoft Corporation. All rights reserved. //
// This file is distributed under the University of Illinois Open Source //
// License. See LICENSE.TXT for details. //
// //
// This file provides support for validating DXIL shaders. //
// //
///////////////////////////////////////////////////////////////////////////////
#include "dxc/Support/Global.h"
#include "dxc/Support/WinIncludes.h"
#include "dxc/DXIL/DxilConstants.h"
#include "dxc/DXIL/DxilEntryProps.h"
#include "dxc/DXIL/DxilFunctionProps.h"
#include "dxc/DXIL/DxilInstructions.h"
#include "dxc/DXIL/DxilModule.h"
#include "dxc/DXIL/DxilOperations.h"
#include "dxc/DXIL/DxilResourceProperties.h"
#include "dxc/DXIL/DxilShaderModel.h"
#include "dxc/DXIL/DxilUtil.h"
#include "dxc/DxilValidation/DxilValidation.h"
#include "dxc/HLSL/DxilGenerationPass.h"
#include "llvm/Analysis/ReducibilityAnalysis.h"
#include "dxc/HLSL/DxilPackSignatureElement.h"
#include "dxc/HLSL/DxilSignatureAllocator.h"
#include "dxc/HLSL/DxilSpanAllocator.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/Analysis/CallGraph.h"
#include "llvm/Analysis/LoopInfo.h"
#include "llvm/Analysis/PostDominators.h"
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/Bitcode/ReaderWriter.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DiagnosticInfo.h"
#include "llvm/IR/DiagnosticPrinter.h"
#include "llvm/IR/Dominators.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/Operator.h"
#include "llvm/IR/Type.h"
#include "llvm/IR/Verifier.h"
#include "llvm/Support/raw_ostream.h"
#include "DxilValidationUtils.h"
#include <algorithm>
#include <deque>
#include <unordered_set>
using namespace llvm;
using std::unique_ptr;
using std::unordered_set;
using std::vector;
///////////////////////////////////////////////////////////////////////////////
// Error messages.
#include "DxilValidationImpl.inc"
namespace hlsl {
// PrintDiagnosticContext methods.
PrintDiagnosticContext::PrintDiagnosticContext(DiagnosticPrinter &printer)
: m_Printer(printer), m_errorsFound(false), m_warningsFound(false) {}
bool PrintDiagnosticContext::HasErrors() const { return m_errorsFound; }
bool PrintDiagnosticContext::HasWarnings() const { return m_warningsFound; }
void PrintDiagnosticContext::Handle(const DiagnosticInfo &DI) {
DI.print(m_Printer);
switch (DI.getSeverity()) {
case llvm::DiagnosticSeverity::DS_Error:
m_errorsFound = true;
break;
case llvm::DiagnosticSeverity::DS_Warning:
m_warningsFound = true;
break;
default:
break;
}
m_Printer << "\n";
}
void PrintDiagnosticContext::PrintDiagnosticHandler(const DiagnosticInfo &DI,
void *Context) {
reinterpret_cast<hlsl::PrintDiagnosticContext *>(Context)->Handle(DI);
}
struct PSExecutionInfo {
bool SuperSampling = false;
DXIL::SemanticKind OutputDepthKind = DXIL::SemanticKind::Invalid;
const InterpolationMode *PositionInterpolationMode = nullptr;
};
static unsigned ValidateSignatureRowCol(Instruction *I,
DxilSignatureElement &SE, Value *rowVal,
Value *colVal, EntryStatus &Status,
ValidationContext &ValCtx) {
if (ConstantInt *constRow = dyn_cast<ConstantInt>(rowVal)) {
unsigned row = constRow->getLimitedValue();
if (row >= SE.GetRows()) {
std::string range = std::string("0~") + std::to_string(SE.GetRows());
ValCtx.EmitInstrFormatError(I, ValidationRule::InstrOperandRange,
{"Row", range, std::to_string(row)});
}
}
if (!isa<ConstantInt>(colVal)) {
// col must be const
ValCtx.EmitInstrFormatError(I, ValidationRule::InstrOpConst,
{"Col", "LoadInput/StoreOutput"});
return 0;
}
unsigned col = cast<ConstantInt>(colVal)->getLimitedValue();
if (col > SE.GetCols()) {
std::string range = std::string("0~") + std::to_string(SE.GetCols());
ValCtx.EmitInstrFormatError(I, ValidationRule::InstrOperandRange,
{"Col", range, std::to_string(col)});
} else {
if (SE.IsOutput())
Status.outputCols[SE.GetID()] |= 1 << col;
if (SE.IsPatchConstOrPrim())
Status.patchConstOrPrimCols[SE.GetID()] |= 1 << col;
}
return col;
}
static DxilSignatureElement *
ValidateSignatureAccess(Instruction *I, DxilSignature &sig, Value *sigID,
Value *rowVal, Value *colVal, EntryStatus &Status,
ValidationContext &ValCtx) {
if (!isa<ConstantInt>(sigID)) {
// inputID must be const
ValCtx.EmitInstrFormatError(I, ValidationRule::InstrOpConst,
{"SignatureID", "LoadInput/StoreOutput"});
return nullptr;
}
unsigned SEIdx = cast<ConstantInt>(sigID)->getLimitedValue();
if (sig.GetElements().size() <= SEIdx) {
ValCtx.EmitInstrError(I, ValidationRule::InstrOpConstRange);
return nullptr;
}
DxilSignatureElement &SE = sig.GetElement(SEIdx);
bool isOutput = sig.IsOutput();
unsigned col = ValidateSignatureRowCol(I, SE, rowVal, colVal, Status, ValCtx);
if (isOutput && SE.GetSemantic()->GetKind() == DXIL::SemanticKind::Position) {
unsigned mask = Status.OutputPositionMask[SE.GetOutputStream()];
mask |= 1 << col;
if (SE.GetOutputStream() < DXIL::kNumOutputStreams)
Status.OutputPositionMask[SE.GetOutputStream()] = mask;
}
return &SE;
}
static DxilResourceProperties GetResourceFromHandle(Value *Handle,
ValidationContext &ValCtx) {
if (!isa<CallInst>(Handle)) {
if (Instruction *I = dyn_cast<Instruction>(Handle))
ValCtx.EmitInstrError(I, ValidationRule::InstrHandleNotFromCreateHandle);
else
ValCtx.EmitError(ValidationRule::InstrHandleNotFromCreateHandle);
DxilResourceProperties RP;
return RP;
}
DxilResourceProperties RP = ValCtx.GetResourceFromVal(Handle);
if (RP.getResourceClass() == DXIL::ResourceClass::Invalid) {
ValCtx.EmitInstrError(cast<CallInst>(Handle),
ValidationRule::InstrHandleNotFromCreateHandle);
}
return RP;
}
static DXIL::SamplerKind GetSamplerKind(Value *samplerHandle,
ValidationContext &ValCtx) {
DxilResourceProperties RP = GetResourceFromHandle(samplerHandle, ValCtx);
if (RP.getResourceClass() != DXIL::ResourceClass::Sampler) {
// must be sampler.
return DXIL::SamplerKind::Invalid;
}
if (RP.Basic.SamplerCmpOrHasCounter)
return DXIL::SamplerKind::Comparison;
else if (RP.getResourceKind() == DXIL::ResourceKind::Invalid)
return DXIL::SamplerKind::Invalid;
else
return DXIL::SamplerKind::Default;
}
static DXIL::ResourceKind
GetResourceKindAndCompTy(Value *handle, DXIL::ComponentType &CompTy,
DXIL::ResourceClass &ResClass,
ValidationContext &ValCtx) {
CompTy = DXIL::ComponentType::Invalid;
ResClass = DXIL::ResourceClass::Invalid;
// TODO: validate ROV is used only in PS.
DxilResourceProperties RP = GetResourceFromHandle(handle, ValCtx);
ResClass = RP.getResourceClass();
switch (ResClass) {
case DXIL::ResourceClass::SRV:
case DXIL::ResourceClass::UAV:
break;
case DXIL::ResourceClass::CBuffer:
return DXIL::ResourceKind::CBuffer;
case DXIL::ResourceClass::Sampler:
return DXIL::ResourceKind::Sampler;
default:
// Emit invalid res class
return DXIL::ResourceKind::Invalid;
}
if (!DXIL::IsStructuredBuffer(RP.getResourceKind()))
CompTy = static_cast<DXIL::ComponentType>(RP.Typed.CompType);
else
CompTy = DXIL::ComponentType::Invalid;
return RP.getResourceKind();
}
DxilFieldAnnotation *GetFieldAnnotation(Type *Ty, DxilTypeSystem &typeSys,
std::deque<unsigned> &offsets) {
unsigned CurIdx = 1;
unsigned LastIdx = offsets.size() - 1;
DxilStructAnnotation *StructAnnot = nullptr;
for (; CurIdx < offsets.size(); ++CurIdx) {
if (const StructType *EltST = dyn_cast<StructType>(Ty)) {
if (DxilStructAnnotation *EltAnnot = typeSys.GetStructAnnotation(EltST)) {
StructAnnot = EltAnnot;
Ty = EltST->getElementType(offsets[CurIdx]);
if (CurIdx == LastIdx) {
return &StructAnnot->GetFieldAnnotation(offsets[CurIdx]);
}
} else {
return nullptr;
}
} else if (const ArrayType *AT = dyn_cast<ArrayType>(Ty)) {
Ty = AT->getElementType();
StructAnnot = nullptr;
} else {
if (StructAnnot)
return &StructAnnot->GetFieldAnnotation(offsets[CurIdx]);
}
}
return nullptr;
}
DxilResourceProperties ValidationContext::GetResourceFromVal(Value *resVal) {
auto it = ResPropMap.find(resVal);
if (it != ResPropMap.end()) {
return it->second;
} else {
DxilResourceProperties RP;
return RP;
}
}
struct ResRetUsage {
bool x;
bool y;
bool z;
bool w;
bool status;
ResRetUsage() : x(false), y(false), z(false), w(false), status(false) {}
};
static void CollectGetDimResRetUsage(ResRetUsage &usage, Instruction *ResRet,
ValidationContext &ValCtx) {
for (User *U : ResRet->users()) {
if (ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(U)) {
for (unsigned idx : EVI->getIndices()) {
switch (idx) {
case 0:
usage.x = true;
break;
case 1:
usage.y = true;
break;
case 2:
usage.z = true;
break;
case 3:
usage.w = true;
break;
case DXIL::kResRetStatusIndex:
usage.status = true;
break;
default:
// Emit index out of bound.
ValCtx.EmitInstrError(EVI,
ValidationRule::InstrDxilStructUserOutOfBound);
break;
}
}
} else if (PHINode *PHI = dyn_cast<PHINode>(U)) {
CollectGetDimResRetUsage(usage, PHI, ValCtx);
} else {
Instruction *User = cast<Instruction>(U);
ValCtx.EmitInstrError(User, ValidationRule::InstrDxilStructUser);
}
}
}
static void ValidateResourceCoord(CallInst *CI, DXIL::ResourceKind resKind,
ArrayRef<Value *> coords,
ValidationContext &ValCtx) {
const unsigned kMaxNumCoords = 4;
unsigned numCoords = DxilResource::GetNumCoords(resKind);
for (unsigned i = 0; i < kMaxNumCoords; i++) {
if (i < numCoords) {
if (isa<UndefValue>(coords[i])) {
ValCtx.EmitInstrError(CI, ValidationRule::InstrResourceCoordinateMiss);
}
} else {
if (!isa<UndefValue>(coords[i])) {
ValCtx.EmitInstrError(CI,
ValidationRule::InstrResourceCoordinateTooMany);
}
}
}
}
static void ValidateCalcLODResourceDimensionCoord(CallInst *CI,
DXIL::ResourceKind resKind,
ArrayRef<Value *> coords,
ValidationContext &ValCtx) {
const unsigned kMaxNumDimCoords = 3;
unsigned numCoords = DxilResource::GetNumDimensionsForCalcLOD(resKind);
for (unsigned i = 0; i < kMaxNumDimCoords; i++) {
if (i < numCoords) {
if (isa<UndefValue>(coords[i])) {
ValCtx.EmitInstrError(CI, ValidationRule::InstrResourceCoordinateMiss);
}
} else {
if (!isa<UndefValue>(coords[i])) {
ValCtx.EmitInstrError(CI,
ValidationRule::InstrResourceCoordinateTooMany);
}
}
}
}
static void ValidateResourceOffset(CallInst *CI, DXIL::ResourceKind resKind,
ArrayRef<Value *> offsets,
ValidationContext &ValCtx) {
const ShaderModel *pSM = ValCtx.DxilMod.GetShaderModel();
unsigned numOffsets = DxilResource::GetNumOffsets(resKind);
bool hasOffset = !isa<UndefValue>(offsets[0]);
auto validateOffset = [&](Value *offset) {
// 6.7 Advanced Textures allow programmable offsets
if (pSM->IsSM67Plus())
return;
if (ConstantInt *cOffset = dyn_cast<ConstantInt>(offset)) {
int offset = cOffset->getValue().getSExtValue();
if (offset > 7 || offset < -8) {
ValCtx.EmitInstrError(CI, ValidationRule::InstrTextureOffset);
}
} else {
ValCtx.EmitInstrError(CI, ValidationRule::InstrTextureOffset);
}
};
if (hasOffset) {
validateOffset(offsets[0]);
}
for (unsigned i = 1; i < offsets.size(); i++) {
if (i < numOffsets) {
if (hasOffset) {
if (isa<UndefValue>(offsets[i]))
ValCtx.EmitInstrError(CI, ValidationRule::InstrResourceOffsetMiss);
else
validateOffset(offsets[i]);
}
} else {
if (!isa<UndefValue>(offsets[i])) {
ValCtx.EmitInstrError(CI, ValidationRule::InstrResourceOffsetTooMany);
}
}
}
}
// Validate derivative and derivative dependent ops in CS/MS/AS
static void ValidateDerivativeOp(CallInst *CI, ValidationContext &ValCtx) {
const ShaderModel *pSM = ValCtx.DxilMod.GetShaderModel();
if (pSM && (pSM->IsMS() || pSM->IsAS() || pSM->IsCS()) && !pSM->IsSM66Plus())
ValCtx.EmitInstrFormatError(
CI, ValidationRule::SmOpcodeInInvalidFunction,
{"Derivatives in CS/MS/AS", "Shader Model 6.6+"});
}
static void ValidateSampleInst(CallInst *CI, Value *srvHandle,
Value *samplerHandle, ArrayRef<Value *> coords,
ArrayRef<Value *> offsets, bool IsSampleC,
ValidationContext &ValCtx) {
if (!IsSampleC) {
if (GetSamplerKind(samplerHandle, ValCtx) != DXIL::SamplerKind::Default) {
ValCtx.EmitInstrError(CI, ValidationRule::InstrSamplerModeForSample);
}
} else {
if (GetSamplerKind(samplerHandle, ValCtx) !=
DXIL::SamplerKind::Comparison) {
ValCtx.EmitInstrError(CI, ValidationRule::InstrSamplerModeForSampleC);
}
}
DXIL::ComponentType compTy;
DXIL::ResourceClass resClass;
DXIL::ResourceKind resKind =
GetResourceKindAndCompTy(srvHandle, compTy, resClass, ValCtx);
bool isSampleCompTy = compTy == DXIL::ComponentType::F32;
isSampleCompTy |= compTy == DXIL::ComponentType::SNormF32;
isSampleCompTy |= compTy == DXIL::ComponentType::UNormF32;
isSampleCompTy |= compTy == DXIL::ComponentType::F16;
isSampleCompTy |= compTy == DXIL::ComponentType::SNormF16;
isSampleCompTy |= compTy == DXIL::ComponentType::UNormF16;
const ShaderModel *pSM = ValCtx.DxilMod.GetShaderModel();
if (pSM->IsSM67Plus() && !IsSampleC) {
isSampleCompTy |= compTy == DXIL::ComponentType::I16;
isSampleCompTy |= compTy == DXIL::ComponentType::U16;
isSampleCompTy |= compTy == DXIL::ComponentType::I32;
isSampleCompTy |= compTy == DXIL::ComponentType::U32;
}
if (!isSampleCompTy) {
ValCtx.EmitInstrError(CI, ValidationRule::InstrSampleCompType);
}
if (resClass != DXIL::ResourceClass::SRV) {
ValCtx.EmitInstrError(CI,
ValidationRule::InstrResourceClassForSamplerGather);
}
ValidationRule rule = ValidationRule::InstrResourceKindForSample;
if (IsSampleC) {
rule = ValidationRule::InstrResourceKindForSampleC;
}
switch (resKind) {
case DXIL::ResourceKind::Texture1D:
case DXIL::ResourceKind::Texture1DArray:
case DXIL::ResourceKind::Texture2D:
case DXIL::ResourceKind::Texture2DArray:
case DXIL::ResourceKind::TextureCube:
case DXIL::ResourceKind::TextureCubeArray:
break;
case DXIL::ResourceKind::Texture3D:
if (IsSampleC) {
ValCtx.EmitInstrError(CI, rule);
}
break;
default:
ValCtx.EmitInstrError(CI, rule);
return;
}
// Coord match resource kind.
ValidateResourceCoord(CI, resKind, coords, ValCtx);
// Offset match resource kind.
ValidateResourceOffset(CI, resKind, offsets, ValCtx);
}
static void ValidateGather(CallInst *CI, Value *srvHandle, Value *samplerHandle,
ArrayRef<Value *> coords, ArrayRef<Value *> offsets,
bool IsSampleC, ValidationContext &ValCtx) {
if (!IsSampleC) {
if (GetSamplerKind(samplerHandle, ValCtx) != DXIL::SamplerKind::Default) {
ValCtx.EmitInstrError(CI, ValidationRule::InstrSamplerModeForSample);
}
} else {
if (GetSamplerKind(samplerHandle, ValCtx) !=
DXIL::SamplerKind::Comparison) {
ValCtx.EmitInstrError(CI, ValidationRule::InstrSamplerModeForSampleC);
}
}
DXIL::ComponentType compTy;
DXIL::ResourceClass resClass;
DXIL::ResourceKind resKind =
GetResourceKindAndCompTy(srvHandle, compTy, resClass, ValCtx);
if (resClass != DXIL::ResourceClass::SRV) {
ValCtx.EmitInstrError(CI,
ValidationRule::InstrResourceClassForSamplerGather);
return;
}
// Coord match resource kind.
ValidateResourceCoord(CI, resKind, coords, ValCtx);
// Offset match resource kind.
switch (resKind) {
case DXIL::ResourceKind::Texture2D:
case DXIL::ResourceKind::Texture2DArray: {
bool hasOffset = !isa<UndefValue>(offsets[0]);
if (hasOffset) {
if (isa<UndefValue>(offsets[1])) {
ValCtx.EmitInstrError(CI, ValidationRule::InstrResourceOffsetMiss);
}
}
} break;
case DXIL::ResourceKind::TextureCube:
case DXIL::ResourceKind::TextureCubeArray: {
if (!isa<UndefValue>(offsets[0])) {
ValCtx.EmitInstrError(CI, ValidationRule::InstrResourceOffsetTooMany);
}
if (!isa<UndefValue>(offsets[1])) {
ValCtx.EmitInstrError(CI, ValidationRule::InstrResourceOffsetTooMany);
}
} break;
default:
// Invalid resource type for gather.
ValCtx.EmitInstrError(CI, ValidationRule::InstrResourceKindForGather);
return;
}
}
static unsigned StoreValueToMask(ArrayRef<Value *> vals) {
unsigned mask = 0;
for (unsigned i = 0; i < 4; i++) {
if (!isa<UndefValue>(vals[i])) {
mask |= 1 << i;
}
}
return mask;
}
static int GetCBufSize(Value *cbHandle, ValidationContext &ValCtx) {
DxilResourceProperties RP = GetResourceFromHandle(cbHandle, ValCtx);
if (RP.getResourceClass() != DXIL::ResourceClass::CBuffer) {
ValCtx.EmitInstrError(cast<CallInst>(cbHandle),
ValidationRule::InstrCBufferClassForCBufferHandle);
return -1;
}
return RP.CBufferSizeInBytes;
}
// Make sure none of the handle arguments are undef / zero-initializer,
// Also, do not accept any resource handles with invalid dxil resource
// properties
void ValidateHandleArgsForInstruction(CallInst *CI, DXIL::OpCode opcode,
ValidationContext &ValCtx) {
for (Value *op : CI->operands()) {
const Type *pHandleTy = ValCtx.HandleTy; // This is a resource handle
const Type *pNodeHandleTy = ValCtx.DxilMod.GetOP()->GetNodeHandleType();
const Type *pNodeRecordHandleTy =
ValCtx.DxilMod.GetOP()->GetNodeRecordHandleType();
const Type *argTy = op->getType();
if (argTy == pNodeHandleTy || argTy == pNodeRecordHandleTy ||
argTy == pHandleTy) {
if (isa<UndefValue>(op) || isa<ConstantAggregateZero>(op)) {
ValCtx.EmitInstrError(CI, ValidationRule::InstrNoReadingUninitialized);
} else if (argTy == pHandleTy) {
// GetResourceFromHandle will emit an error on an invalid handle
GetResourceFromHandle(op, ValCtx);
}
}
}
}
void ValidateHandleArgs(CallInst *CI, DXIL::OpCode opcode,
ValidationContext &ValCtx) {
switch (opcode) {
// TODO: add case DXIL::OpCode::IndexNodeRecordHandle:
case DXIL::OpCode::AnnotateHandle:
case DXIL::OpCode::AnnotateNodeHandle:
case DXIL::OpCode::AnnotateNodeRecordHandle:
case DXIL::OpCode::CreateHandleForLib:
// TODO: add custom validation for these intrinsics
break;
default:
ValidateHandleArgsForInstruction(CI, opcode, ValCtx);
break;
}
}
static unsigned GetNumVertices(DXIL::InputPrimitive inputPrimitive) {
const unsigned InputPrimitiveVertexTab[] = {
0, // Undefined = 0,
1, // Point = 1,
2, // Line = 2,
3, // Triangle = 3,
0, // Reserved4 = 4,
0, // Reserved5 = 5,
4, // LineWithAdjacency = 6,
6, // TriangleWithAdjacency = 7,
1, // ControlPointPatch1 = 8,
2, // ControlPointPatch2 = 9,
3, // ControlPointPatch3 = 10,
4, // ControlPointPatch4 = 11,
5, // ControlPointPatch5 = 12,
6, // ControlPointPatch6 = 13,
7, // ControlPointPatch7 = 14,
8, // ControlPointPatch8 = 15,
9, // ControlPointPatch9 = 16,
10, // ControlPointPatch10 = 17,
11, // ControlPointPatch11 = 18,
12, // ControlPointPatch12 = 19,
13, // ControlPointPatch13 = 20,
14, // ControlPointPatch14 = 21,
15, // ControlPointPatch15 = 22,
16, // ControlPointPatch16 = 23,
17, // ControlPointPatch17 = 24,
18, // ControlPointPatch18 = 25,
19, // ControlPointPatch19 = 26,
20, // ControlPointPatch20 = 27,
21, // ControlPointPatch21 = 28,
22, // ControlPointPatch22 = 29,
23, // ControlPointPatch23 = 30,
24, // ControlPointPatch24 = 31,
25, // ControlPointPatch25 = 32,
26, // ControlPointPatch26 = 33,
27, // ControlPointPatch27 = 34,
28, // ControlPointPatch28 = 35,
29, // ControlPointPatch29 = 36,
30, // ControlPointPatch30 = 37,
31, // ControlPointPatch31 = 38,
32, // ControlPointPatch32 = 39,
0, // LastEntry,
};
unsigned primitiveIdx = static_cast<unsigned>(inputPrimitive);
return InputPrimitiveVertexTab[primitiveIdx];
}
static void ValidateSignatureDxilOp(CallInst *CI, DXIL::OpCode opcode,
ValidationContext &ValCtx) {
Function *F = CI->getParent()->getParent();
DxilModule &DM = ValCtx.DxilMod;
bool bIsPatchConstantFunc = false;
if (!DM.HasDxilEntryProps(F)) {
auto it = ValCtx.PatchConstantFuncMap.find(F);
if (it == ValCtx.PatchConstantFuncMap.end()) {
// Missing entry props.
ValCtx.EmitInstrError(CI,
ValidationRule::InstrSignatureOperationNotInEntry);
return;
}
// Use hull entry instead of patch constant function.
F = it->second.front();
bIsPatchConstantFunc = true;
}
if (!ValCtx.HasEntryStatus(F)) {
return;
}
EntryStatus &Status = ValCtx.GetEntryStatus(F);
DxilEntryProps &EntryProps = DM.GetDxilEntryProps(F);
DxilFunctionProps &props = EntryProps.props;
DxilEntrySignature &S = EntryProps.sig;
switch (opcode) {
case DXIL::OpCode::LoadInput: {
Value *inputID = CI->getArgOperand(DXIL::OperandIndex::kLoadInputIDOpIdx);
DxilSignature &inputSig = S.InputSignature;
Value *row = CI->getArgOperand(DXIL::OperandIndex::kLoadInputRowOpIdx);
Value *col = CI->getArgOperand(DXIL::OperandIndex::kLoadInputColOpIdx);
ValidateSignatureAccess(CI, inputSig, inputID, row, col, Status, ValCtx);
// Check vertexID in ps/vs. and none array input.
Value *vertexID =
CI->getArgOperand(DXIL::OperandIndex::kLoadInputVertexIDOpIdx);
bool usedVertexID = vertexID && !isa<UndefValue>(vertexID);
if (props.IsVS() || props.IsPS()) {
if (usedVertexID) {
// use vertexID in VS/PS input.
ValCtx.EmitInstrError(CI, ValidationRule::SmOperand);
return;
}
} else {
if (ConstantInt *cVertexID = dyn_cast<ConstantInt>(vertexID)) {
int immVertexID = cVertexID->getValue().getLimitedValue();
if (cVertexID->getValue().isNegative()) {
immVertexID = cVertexID->getValue().getSExtValue();
}
const int low = 0;
int high = 0;
if (props.IsGS()) {
DXIL::InputPrimitive inputPrimitive =
props.ShaderProps.GS.inputPrimitive;
high = GetNumVertices(inputPrimitive);
} else if (props.IsDS()) {
high = props.ShaderProps.DS.inputControlPoints;
} else if (props.IsHS()) {
high = props.ShaderProps.HS.inputControlPoints;
} else {
ValCtx.EmitInstrFormatError(CI,
ValidationRule::SmOpcodeInInvalidFunction,
{"LoadInput", "VS/HS/DS/GS/PS"});
}
if (immVertexID < low || immVertexID >= high) {
std::string range = std::to_string(low) + "~" + std::to_string(high);
ValCtx.EmitInstrFormatError(
CI, ValidationRule::InstrOperandRange,
{"VertexID", range, std::to_string(immVertexID)});
}
}
}
} break;
case DXIL::OpCode::DomainLocation: {
Value *colValue =
CI->getArgOperand(DXIL::OperandIndex::kDomainLocationColOpIdx);
if (!isa<ConstantInt>(colValue)) {
// col must be const
ValCtx.EmitInstrFormatError(CI, ValidationRule::InstrOpConst,
{"Col", "DomainLocation"});
} else {
unsigned col = cast<ConstantInt>(colValue)->getLimitedValue();
if (col >= Status.domainLocSize) {
ValCtx.EmitInstrError(CI, ValidationRule::SmDomainLocationIdxOOB);
}
}
} break;
case DXIL::OpCode::StoreOutput:
case DXIL::OpCode::StoreVertexOutput:
case DXIL::OpCode::StorePrimitiveOutput: {
Value *outputID =
CI->getArgOperand(DXIL::OperandIndex::kStoreOutputIDOpIdx);
DxilSignature &outputSig = opcode == DXIL::OpCode::StorePrimitiveOutput
? S.PatchConstOrPrimSignature
: S.OutputSignature;
Value *row = CI->getArgOperand(DXIL::OperandIndex::kStoreOutputRowOpIdx);
Value *col = CI->getArgOperand(DXIL::OperandIndex::kStoreOutputColOpIdx);
ValidateSignatureAccess(CI, outputSig, outputID, row, col, Status, ValCtx);
} break;
case DXIL::OpCode::OutputControlPointID: {
// Only used in hull shader.
Function *func = CI->getParent()->getParent();
// Make sure this is inside hs shader entry function.
if (!(props.IsHS() && F == func)) {
ValCtx.EmitInstrFormatError(CI, ValidationRule::SmOpcodeInInvalidFunction,
{"OutputControlPointID", "hull function"});
}
} break;
case DXIL::OpCode::LoadOutputControlPoint: {
// Only used in patch constant function.
Function *func = CI->getParent()->getParent();
if (ValCtx.entryFuncCallSet.count(func) > 0) {
ValCtx.EmitInstrFormatError(
CI, ValidationRule::SmOpcodeInInvalidFunction,
{"LoadOutputControlPoint", "PatchConstant function"});
}
Value *outputID =
CI->getArgOperand(DXIL::OperandIndex::kStoreOutputIDOpIdx);
DxilSignature &outputSig = S.OutputSignature;
Value *row = CI->getArgOperand(DXIL::OperandIndex::kStoreOutputRowOpIdx);
Value *col = CI->getArgOperand(DXIL::OperandIndex::kStoreOutputColOpIdx);
ValidateSignatureAccess(CI, outputSig, outputID, row, col, Status, ValCtx);
} break;
case DXIL::OpCode::StorePatchConstant: {
// Only used in patch constant function.
Function *func = CI->getParent()->getParent();
if (!bIsPatchConstantFunc) {
ValCtx.EmitInstrFormatError(
CI, ValidationRule::SmOpcodeInInvalidFunction,
{"StorePatchConstant", "PatchConstant function"});
} else {
auto &hullShaders = ValCtx.PatchConstantFuncMap[func];
for (Function *F : hullShaders) {
EntryStatus &Status = ValCtx.GetEntryStatus(F);
DxilEntryProps &EntryProps = DM.GetDxilEntryProps(F);
DxilEntrySignature &S = EntryProps.sig;
Value *outputID =
CI->getArgOperand(DXIL::OperandIndex::kStoreOutputIDOpIdx);
DxilSignature &outputSig = S.PatchConstOrPrimSignature;
Value *row =
CI->getArgOperand(DXIL::OperandIndex::kStoreOutputRowOpIdx);
Value *col =
CI->getArgOperand(DXIL::OperandIndex::kStoreOutputColOpIdx);
ValidateSignatureAccess(CI, outputSig, outputID, row, col, Status,
ValCtx);
}
}
} break;
case DXIL::OpCode::Coverage:
Status.m_bCoverageIn = true;
break;
case DXIL::OpCode::InnerCoverage:
Status.m_bInnerCoverageIn = true;
break;
case DXIL::OpCode::ViewID:
Status.hasViewID = true;
break;
case DXIL::OpCode::EvalCentroid:
case DXIL::OpCode::EvalSampleIndex:
case DXIL::OpCode::EvalSnapped: {
// Eval* share same operand index with load input.
Value *inputID = CI->getArgOperand(DXIL::OperandIndex::kLoadInputIDOpIdx);
DxilSignature &inputSig = S.InputSignature;
Value *row = CI->getArgOperand(DXIL::OperandIndex::kLoadInputRowOpIdx);
Value *col = CI->getArgOperand(DXIL::OperandIndex::kLoadInputColOpIdx);
DxilSignatureElement *pSE = ValidateSignatureAccess(
CI, inputSig, inputID, row, col, Status, ValCtx);
if (pSE) {
switch (pSE->GetInterpolationMode()->GetKind()) {
case DXIL::InterpolationMode::Linear:
case DXIL::InterpolationMode::LinearNoperspective:
case DXIL::InterpolationMode::LinearCentroid:
case DXIL::InterpolationMode::LinearNoperspectiveCentroid:
case DXIL::InterpolationMode::LinearSample:
case DXIL::InterpolationMode::LinearNoperspectiveSample:
break;
default:
ValCtx.EmitInstrFormatError(
CI, ValidationRule::InstrEvalInterpolationMode, {pSE->GetName()});
break;
}
if (pSE->GetSemantic()->GetKind() == DXIL::SemanticKind::Position) {
ValCtx.EmitInstrFormatError(
CI, ValidationRule::InstrCannotPullPosition,
{ValCtx.DxilMod.GetShaderModel()->GetName()});
}
}
} break;
case DXIL::OpCode::AttributeAtVertex: {
Value *Attribute = CI->getArgOperand(DXIL::OperandIndex::kBinarySrc0OpIdx);
DxilSignature &inputSig = S.InputSignature;
Value *row = CI->getArgOperand(DXIL::OperandIndex::kLoadInputRowOpIdx);
Value *col = CI->getArgOperand(DXIL::OperandIndex::kLoadInputColOpIdx);
DxilSignatureElement *pSE = ValidateSignatureAccess(
CI, inputSig, Attribute, row, col, Status, ValCtx);
if (pSE && pSE->GetInterpolationMode()->GetKind() !=
hlsl::InterpolationMode::Kind::Constant) {
ValCtx.EmitInstrFormatError(
CI, ValidationRule::InstrAttributeAtVertexNoInterpolation,
{pSE->GetName()});
}
} break;
case DXIL::OpCode::CutStream:
case DXIL::OpCode::EmitThenCutStream:
case DXIL::OpCode::EmitStream: {
if (props.IsGS()) {
auto &GS = props.ShaderProps.GS;
unsigned streamMask = 0;
for (size_t i = 0; i < _countof(GS.streamPrimitiveTopologies); ++i) {
if (GS.streamPrimitiveTopologies[i] !=
DXIL::PrimitiveTopology::Undefined) {
streamMask |= 1 << i;
}
}
Value *streamID =
CI->getArgOperand(DXIL::OperandIndex::kStreamEmitCutIDOpIdx);
if (ConstantInt *cStreamID = dyn_cast<ConstantInt>(streamID)) {
int immStreamID = cStreamID->getValue().getLimitedValue();
if (cStreamID->getValue().isNegative() || immStreamID >= 4) {
ValCtx.EmitInstrFormatError(
CI, ValidationRule::InstrOperandRange,
{"StreamID", "0~4", std::to_string(immStreamID)});
} else {
unsigned immMask = 1 << immStreamID;
if ((streamMask & immMask) == 0) {
std::string range;
for (unsigned i = 0; i < 4; i++) {
if (streamMask & (1 << i)) {
range += std::to_string(i) + " ";
}
}
ValCtx.EmitInstrFormatError(
CI, ValidationRule::InstrOperandRange,
{"StreamID", range, std::to_string(immStreamID)});
}
}
} else {
ValCtx.EmitInstrFormatError(CI, ValidationRule::InstrOpConst,
{"StreamID", "Emit/CutStream"});
}
} else {
ValCtx.EmitInstrFormatError(CI, ValidationRule::SmOpcodeInInvalidFunction,
{"Emit/CutStream", "Geometry shader"});
}
} break;
case DXIL::OpCode::EmitIndices: {
if (!props.IsMS()) {
ValCtx.EmitInstrFormatError(CI, ValidationRule::SmOpcodeInInvalidFunction,
{"EmitIndices", "Mesh shader"});
}
} break;
case DXIL::OpCode::SetMeshOutputCounts: {
if (!props.IsMS()) {
ValCtx.EmitInstrFormatError(CI, ValidationRule::SmOpcodeInInvalidFunction,
{"SetMeshOutputCounts", "Mesh shader"});
}
} break;
case DXIL::OpCode::GetMeshPayload: {
if (!props.IsMS()) {
ValCtx.EmitInstrFormatError(CI, ValidationRule::SmOpcodeInInvalidFunction,
{"GetMeshPayload", "Mesh shader"});
}
} break;
case DXIL::OpCode::DispatchMesh: {
if (!props.IsAS()) {
ValCtx.EmitInstrFormatError(CI, ValidationRule::SmOpcodeInInvalidFunction,
{"DispatchMesh", "Amplification shader"});
}
} break;
default:
break;
}
if (Status.m_bCoverageIn && Status.m_bInnerCoverageIn) {
ValCtx.EmitInstrError(CI, ValidationRule::SmPSCoverageAndInnerCoverage);
}
}
static void ValidateImmOperandForMathDxilOp(CallInst *CI, DXIL::OpCode opcode,
ValidationContext &ValCtx) {
switch (opcode) {
// Imm input value validation.
case DXIL::OpCode::Asin: {
DxilInst_Asin I(CI);
if (ConstantFP *imm = dyn_cast<ConstantFP>(I.get_value())) {
if (imm->getValueAPF().isInfinity()) {
ValCtx.EmitInstrError(CI, ValidationRule::InstrNoIndefiniteAsin);
}
}
} break;
case DXIL::OpCode::Acos: {
DxilInst_Acos I(CI);
if (ConstantFP *imm = dyn_cast<ConstantFP>(I.get_value())) {
if (imm->getValueAPF().isInfinity()) {
ValCtx.EmitInstrError(CI, ValidationRule::InstrNoIndefiniteAcos);
}
}
} break;
case DXIL::OpCode::Log: {
DxilInst_Log I(CI);
if (ConstantFP *imm = dyn_cast<ConstantFP>(I.get_value())) {
if (imm->getValueAPF().isInfinity()) {
ValCtx.EmitInstrError(CI, ValidationRule::InstrNoIndefiniteLog);
}
}
} break;
case DXIL::OpCode::DerivFineX:
case DXIL::OpCode::DerivFineY:
case DXIL::OpCode::DerivCoarseX:
case DXIL::OpCode::DerivCoarseY: {
Value *V = CI->getArgOperand(DXIL::OperandIndex::kUnarySrc0OpIdx);
if (ConstantFP *imm = dyn_cast<ConstantFP>(V)) {
if (imm->getValueAPF().isInfinity()) {
ValCtx.EmitInstrError(CI, ValidationRule::InstrNoIndefiniteDsxy);
}
}
ValidateDerivativeOp(CI, ValCtx);
} break;
default:
break;
}
}
// Validate the type-defined mask compared to the store value mask which
// indicates which parts were defined returns true if caller should continue
// validation
static bool ValidateStorageMasks(Instruction *I, DXIL::OpCode opcode,
ConstantInt *mask, unsigned stValMask,
bool isTyped, ValidationContext &ValCtx) {
if (!mask) {
// Mask for buffer store should be immediate.
ValCtx.EmitInstrFormatError(I, ValidationRule::InstrOpConst,
{"Mask", hlsl::OP::GetOpCodeName(opcode)});
return false;
}
unsigned uMask = mask->getLimitedValue();
if (isTyped && uMask != 0xf) {
ValCtx.EmitInstrError(I, ValidationRule::InstrWriteMaskForTypedUAVStore);
}
// write mask must be contiguous (.x .xy .xyz or .xyzw)
if (!((uMask == 0xf) || (uMask == 0x7) || (uMask == 0x3) || (uMask == 0x1))) {
ValCtx.EmitInstrError(I, ValidationRule::InstrWriteMaskGapForUAV);
}
// If a bit is set in the uMask (expected values) that isn't set in stValMask
// (user provided values) then the user failed to define some of the output
// values.
if (uMask & ~stValMask)
ValCtx.EmitInstrError(I, ValidationRule::InstrUndefinedValueForUAVStore);
else if (uMask != stValMask)
ValCtx.EmitInstrFormatError(
I, ValidationRule::InstrWriteMaskMatchValueForUAVStore,
{std::to_string(uMask), std::to_string(stValMask)});
return true;
}
static void ValidateResourceDxilOp(CallInst *CI, DXIL::OpCode opcode,
ValidationContext &ValCtx) {
switch (opcode) {
case DXIL::OpCode::GetDimensions: {
DxilInst_GetDimensions getDim(CI);
Value *handle = getDim.get_handle();
DXIL::ComponentType compTy;
DXIL::ResourceClass resClass;
DXIL::ResourceKind resKind =
GetResourceKindAndCompTy(handle, compTy, resClass, ValCtx);
// Check the result component use.
ResRetUsage usage;
CollectGetDimResRetUsage(usage, CI, ValCtx);
// Mip level only for texture.
switch (resKind) {
case DXIL::ResourceKind::Texture1D:
if (usage.y) {
ValCtx.EmitInstrFormatError(
CI, ValidationRule::InstrUndefResultForGetDimension,
{"y", "Texture1D"});
}
if (usage.z) {
ValCtx.EmitInstrFormatError(
CI, ValidationRule::InstrUndefResultForGetDimension,
{"z", "Texture1D"});
}
break;
case DXIL::ResourceKind::Texture1DArray:
if (usage.z) {
ValCtx.EmitInstrFormatError(
CI, ValidationRule::InstrUndefResultForGetDimension,
{"z", "Texture1DArray"});
}
break;
case DXIL::ResourceKind::Texture2D:
if (usage.z) {
ValCtx.EmitInstrFormatError(
CI, ValidationRule::InstrUndefResultForGetDimension,
{"z", "Texture2D"});
}
break;
case DXIL::ResourceKind::Texture2DArray:
break;
case DXIL::ResourceKind::Texture2DMS:
if (usage.z) {
ValCtx.EmitInstrFormatError(
CI, ValidationRule::InstrUndefResultForGetDimension,
{"z", "Texture2DMS"});
}
break;
case DXIL::ResourceKind::Texture2DMSArray:
break;
case DXIL::ResourceKind::Texture3D:
break;
case DXIL::ResourceKind::TextureCube:
if (usage.z) {
ValCtx.EmitInstrFormatError(
CI, ValidationRule::InstrUndefResultForGetDimension,
{"z", "TextureCube"});
}
break;
case DXIL::ResourceKind::TextureCubeArray:
break;
case DXIL::ResourceKind::StructuredBuffer:
case DXIL::ResourceKind::RawBuffer:
case DXIL::ResourceKind::TypedBuffer:
case DXIL::ResourceKind::TBuffer: {
Value *mip = getDim.get_mipLevel();
if (!isa<UndefValue>(mip)) {
ValCtx.EmitInstrError(CI, ValidationRule::InstrMipLevelForGetDimension);
}
if (resKind != DXIL::ResourceKind::Invalid) {
if (usage.y || usage.z || usage.w) {
ValCtx.EmitInstrFormatError(
CI, ValidationRule::InstrUndefResultForGetDimension,
{"invalid", "resource"});
}
}
} break;
default: {
ValCtx.EmitInstrError(CI, ValidationRule::InstrResourceKindForGetDim);
} break;
}
if (usage.status) {
ValCtx.EmitInstrFormatError(
CI, ValidationRule::InstrUndefResultForGetDimension,
{"invalid", "resource"});
}
} break;
case DXIL::OpCode::CalculateLOD: {
DxilInst_CalculateLOD lod(CI);
Value *samplerHandle = lod.get_sampler();
DXIL::SamplerKind samplerKind = GetSamplerKind(samplerHandle, ValCtx);
if (samplerKind != DXIL::SamplerKind::Default) {
// After SM68, Comparison is supported.
if (!ValCtx.DxilMod.GetShaderModel()->IsSM68Plus() ||
samplerKind != DXIL::SamplerKind::Comparison)
ValCtx.EmitInstrError(CI, ValidationRule::InstrSamplerModeForLOD);
}
Value *handle = lod.get_handle();
DXIL::ComponentType compTy;
DXIL::ResourceClass resClass;
DXIL::ResourceKind resKind =
GetResourceKindAndCompTy(handle, compTy, resClass, ValCtx);
if (resClass != DXIL::ResourceClass::SRV) {
ValCtx.EmitInstrError(CI,
ValidationRule::InstrResourceClassForSamplerGather);
return;
}
// Coord match resource.
ValidateCalcLODResourceDimensionCoord(
CI, resKind, {lod.get_coord0(), lod.get_coord1(), lod.get_coord2()},
ValCtx);
switch (resKind) {
case DXIL::ResourceKind::Texture1D:
case DXIL::ResourceKind::Texture1DArray:
case DXIL::ResourceKind::Texture2D:
case DXIL::ResourceKind::Texture2DArray:
case DXIL::ResourceKind::Texture3D:
case DXIL::ResourceKind::TextureCube:
case DXIL::ResourceKind::TextureCubeArray:
break;
default:
ValCtx.EmitInstrError(CI, ValidationRule::InstrResourceKindForCalcLOD);
break;
}
ValidateDerivativeOp(CI, ValCtx);
} break;
case DXIL::OpCode::TextureGather: {
DxilInst_TextureGather gather(CI);
ValidateGather(CI, gather.get_srv(), gather.get_sampler(),
{gather.get_coord0(), gather.get_coord1(),
gather.get_coord2(), gather.get_coord3()},
{gather.get_offset0(), gather.get_offset1()},
/*IsSampleC*/ false, ValCtx);
} break;
case DXIL::OpCode::TextureGatherCmp: {
DxilInst_TextureGatherCmp gather(CI);
ValidateGather(CI, gather.get_srv(), gather.get_sampler(),
{gather.get_coord0(), gather.get_coord1(),
gather.get_coord2(), gather.get_coord3()},
{gather.get_offset0(), gather.get_offset1()},
/*IsSampleC*/ true, ValCtx);
} break;
case DXIL::OpCode::Sample: {
DxilInst_Sample sample(CI);
ValidateSampleInst(
CI, sample.get_srv(), sample.get_sampler(),
{sample.get_coord0(), sample.get_coord1(), sample.get_coord2(),
sample.get_coord3()},
{sample.get_offset0(), sample.get_offset1(), sample.get_offset2()},
/*IsSampleC*/ false, ValCtx);
ValidateDerivativeOp(CI, ValCtx);
} break;
case DXIL::OpCode::SampleCmp: {
DxilInst_SampleCmp sample(CI);
ValidateSampleInst(
CI, sample.get_srv(), sample.get_sampler(),
{sample.get_coord0(), sample.get_coord1(), sample.get_coord2(),
sample.get_coord3()},
{sample.get_offset0(), sample.get_offset1(), sample.get_offset2()},
/*IsSampleC*/ true, ValCtx);
ValidateDerivativeOp(CI, ValCtx);
} break;
case DXIL::OpCode::SampleCmpLevel: {
// sampler must be comparison mode.
DxilInst_SampleCmpLevel sample(CI);
ValidateSampleInst(
CI, sample.get_srv(), sample.get_sampler(),
{sample.get_coord0(), sample.get_coord1(), sample.get_coord2(),
sample.get_coord3()},
{sample.get_offset0(), sample.get_offset1(), sample.get_offset2()},
/*IsSampleC*/ true, ValCtx);
} break;
case DXIL::OpCode::SampleCmpLevelZero: {
// sampler must be comparison mode.
DxilInst_SampleCmpLevelZero sample(CI);
ValidateSampleInst(
CI, sample.get_srv(), sample.get_sampler(),
{sample.get_coord0(), sample.get_coord1(), sample.get_coord2(),
sample.get_coord3()},
{sample.get_offset0(), sample.get_offset1(), sample.get_offset2()},
/*IsSampleC*/ true, ValCtx);
} break;
case DXIL::OpCode::SampleBias: {
DxilInst_SampleBias sample(CI);
Value *bias = sample.get_bias();
if (ConstantFP *cBias = dyn_cast<ConstantFP>(bias)) {
float fBias = cBias->getValueAPF().convertToFloat();
if (fBias < DXIL::kMinMipLodBias || fBias > DXIL::kMaxMipLodBias) {
ValCtx.EmitInstrFormatError(
CI, ValidationRule::InstrImmBiasForSampleB,
{std::to_string(DXIL::kMinMipLodBias),
std::to_string(DXIL::kMaxMipLodBias),
std::to_string(cBias->getValueAPF().convertToFloat())});
}
}
ValidateSampleInst(
CI, sample.get_srv(), sample.get_sampler(),
{sample.get_coord0(), sample.get_coord1(), sample.get_coord2(),
sample.get_coord3()},
{sample.get_offset0(), sample.get_offset1(), sample.get_offset2()},
/*IsSampleC*/ false, ValCtx);
ValidateDerivativeOp(CI, ValCtx);
} break;
case DXIL::OpCode::SampleCmpBias: {
DxilInst_SampleCmpBias sample(CI);
Value *bias = sample.get_bias();
if (ConstantFP *cBias = dyn_cast<ConstantFP>(bias)) {
float fBias = cBias->getValueAPF().convertToFloat();
if (fBias < DXIL::kMinMipLodBias || fBias > DXIL::kMaxMipLodBias) {
ValCtx.EmitInstrFormatError(
CI, ValidationRule::InstrImmBiasForSampleB,
{std::to_string(DXIL::kMinMipLodBias),
std::to_string(DXIL::kMaxMipLodBias),
std::to_string(cBias->getValueAPF().convertToFloat())});
}
}
ValidateSampleInst(
CI, sample.get_srv(), sample.get_sampler(),
{sample.get_coord0(), sample.get_coord1(), sample.get_coord2(),
sample.get_coord3()},
{sample.get_offset0(), sample.get_offset1(), sample.get_offset2()},
/*IsSampleC*/ true, ValCtx);
ValidateDerivativeOp(CI, ValCtx);
} break;
case DXIL::OpCode::SampleGrad: {
DxilInst_SampleGrad sample(CI);
ValidateSampleInst(
CI, sample.get_srv(), sample.get_sampler(),
{sample.get_coord0(), sample.get_coord1(), sample.get_coord2(),
sample.get_coord3()},
{sample.get_offset0(), sample.get_offset1(), sample.get_offset2()},
/*IsSampleC*/ false, ValCtx);
} break;
case DXIL::OpCode::SampleCmpGrad: {
DxilInst_SampleCmpGrad sample(CI);
ValidateSampleInst(
CI, sample.get_srv(), sample.get_sampler(),
{sample.get_coord0(), sample.get_coord1(), sample.get_coord2(),
sample.get_coord3()},
{sample.get_offset0(), sample.get_offset1(), sample.get_offset2()},
/*IsSampleC*/ true, ValCtx);
} break;
case DXIL::OpCode::SampleLevel: {
DxilInst_SampleLevel sample(CI);
ValidateSampleInst(
CI, sample.get_srv(), sample.get_sampler(),
{sample.get_coord0(), sample.get_coord1(), sample.get_coord2(),
sample.get_coord3()},
{sample.get_offset0(), sample.get_offset1(), sample.get_offset2()},
/*IsSampleC*/ false, ValCtx);
} break;
case DXIL::OpCode::CheckAccessFullyMapped: {
Value *Src = CI->getArgOperand(DXIL::OperandIndex::kUnarySrc0OpIdx);
ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(Src);
if (!EVI) {
ValCtx.EmitInstrError(CI, ValidationRule::InstrCheckAccessFullyMapped);
} else {
Value *V = EVI->getOperand(0);
bool isLegal = EVI->getNumIndices() == 1 &&
EVI->getIndices()[0] == DXIL::kResRetStatusIndex &&
ValCtx.DxilMod.GetOP()->IsResRetType(V->getType());
if (!isLegal) {
ValCtx.EmitInstrError(CI, ValidationRule::InstrCheckAccessFullyMapped);
}
}
} break;
case DXIL::OpCode::BufferStore: {
DxilInst_BufferStore bufSt(CI);
DXIL::ComponentType compTy;
DXIL::ResourceClass resClass;
DXIL::ResourceKind resKind =
GetResourceKindAndCompTy(bufSt.get_uav(), compTy, resClass, ValCtx);
if (resClass != DXIL::ResourceClass::UAV) {
ValCtx.EmitInstrError(CI, ValidationRule::InstrResourceClassForUAVStore);
}
ConstantInt *mask = dyn_cast<ConstantInt>(bufSt.get_mask());
unsigned stValMask =
StoreValueToMask({bufSt.get_value0(), bufSt.get_value1(),
bufSt.get_value2(), bufSt.get_value3()});
if (!ValidateStorageMasks(CI, opcode, mask, stValMask,
resKind == DXIL::ResourceKind::TypedBuffer ||
resKind == DXIL::ResourceKind::TBuffer,
ValCtx))
return;
Value *offset = bufSt.get_coord1();
switch (resKind) {
case DXIL::ResourceKind::RawBuffer:
if (!isa<UndefValue>(offset)) {
ValCtx.EmitInstrError(
CI, ValidationRule::InstrCoordinateCountForRawTypedBuf);
}
break;
case DXIL::ResourceKind::TypedBuffer:
case DXIL::ResourceKind::TBuffer:
if (!isa<UndefValue>(offset)) {
ValCtx.EmitInstrError(
CI, ValidationRule::InstrCoordinateCountForRawTypedBuf);
}
break;
case DXIL::ResourceKind::StructuredBuffer:
if (isa<UndefValue>(offset)) {
ValCtx.EmitInstrError(CI,
ValidationRule::InstrCoordinateCountForStructBuf);
}
break;
default:
ValCtx.EmitInstrError(
CI, ValidationRule::InstrResourceKindForBufferLoadStore);
break;
}
} break;
case DXIL::OpCode::TextureStore: {
DxilInst_TextureStore texSt(CI);
DXIL::ComponentType compTy;
DXIL::ResourceClass resClass;
DXIL::ResourceKind resKind =
GetResourceKindAndCompTy(texSt.get_srv(), compTy, resClass, ValCtx);
if (resClass != DXIL::ResourceClass::UAV) {
ValCtx.EmitInstrError(CI, ValidationRule::InstrResourceClassForUAVStore);
}
ConstantInt *mask = dyn_cast<ConstantInt>(texSt.get_mask());
unsigned stValMask =
StoreValueToMask({texSt.get_value0(), texSt.get_value1(),
texSt.get_value2(), texSt.get_value3()});
if (!ValidateStorageMasks(CI, opcode, mask, stValMask, true /*isTyped*/,
ValCtx))
return;
switch (resKind) {
case DXIL::ResourceKind::Texture1D:
case DXIL::ResourceKind::Texture1DArray:
case DXIL::ResourceKind::Texture2D:
case DXIL::ResourceKind::Texture2DArray:
case DXIL::ResourceKind::Texture2DMS:
case DXIL::ResourceKind::Texture2DMSArray:
case DXIL::ResourceKind::Texture3D:
break;
default:
ValCtx.EmitInstrError(CI,
ValidationRule::InstrResourceKindForTextureStore);
break;
}
} break;
case DXIL::OpCode::BufferLoad: {
DxilInst_BufferLoad bufLd(CI);
DXIL::ComponentType compTy;
DXIL::ResourceClass resClass;
DXIL::ResourceKind resKind =
GetResourceKindAndCompTy(bufLd.get_srv(), compTy, resClass, ValCtx);
if (resClass != DXIL::ResourceClass::SRV &&
resClass != DXIL::ResourceClass::UAV) {
ValCtx.EmitInstrError(CI, ValidationRule::InstrResourceClassForLoad);
}
Value *offset = bufLd.get_wot();
switch (resKind) {
case DXIL::ResourceKind::RawBuffer:
case DXIL::ResourceKind::TypedBuffer:
case DXIL::ResourceKind::TBuffer:
if (!isa<UndefValue>(offset)) {
ValCtx.EmitInstrError(
CI, ValidationRule::InstrCoordinateCountForRawTypedBuf);
}
break;
case DXIL::ResourceKind::StructuredBuffer:
if (isa<UndefValue>(offset)) {
ValCtx.EmitInstrError(CI,
ValidationRule::InstrCoordinateCountForStructBuf);
}
break;
default:
ValCtx.EmitInstrError(
CI, ValidationRule::InstrResourceKindForBufferLoadStore);
break;
}
} break;
case DXIL::OpCode::TextureLoad: {
DxilInst_TextureLoad texLd(CI);
DXIL::ComponentType compTy;
DXIL::ResourceClass resClass;
DXIL::ResourceKind resKind =
GetResourceKindAndCompTy(texLd.get_srv(), compTy, resClass, ValCtx);
Value *mipLevel = texLd.get_mipLevelOrSampleCount();
if (resClass == DXIL::ResourceClass::UAV) {
bool noOffset = isa<UndefValue>(texLd.get_offset0());
noOffset &= isa<UndefValue>(texLd.get_offset1());
noOffset &= isa<UndefValue>(texLd.get_offset2());
if (!noOffset) {
ValCtx.EmitInstrError(CI, ValidationRule::InstrOffsetOnUAVLoad);
}
if (!isa<UndefValue>(mipLevel)) {
if (resKind != DXIL::ResourceKind::Texture2DMS &&
resKind != DXIL::ResourceKind::Texture2DMSArray)
ValCtx.EmitInstrError(CI, ValidationRule::InstrMipOnUAVLoad);
}
} else {
if (resClass != DXIL::ResourceClass::SRV) {
ValCtx.EmitInstrError(CI, ValidationRule::InstrResourceClassForLoad);
}
}
switch (resKind) {
case DXIL::ResourceKind::Texture1D:
case DXIL::ResourceKind::Texture1DArray:
case DXIL::ResourceKind::Texture2D:
case DXIL::ResourceKind::Texture2DArray:
case DXIL::ResourceKind::Texture3D:
break;
case DXIL::ResourceKind::Texture2DMS:
case DXIL::ResourceKind::Texture2DMSArray: {
if (isa<UndefValue>(mipLevel)) {
ValCtx.EmitInstrError(CI, ValidationRule::InstrSampleIndexForLoad2DMS);
}
} break;
default:
ValCtx.EmitInstrError(CI,
ValidationRule::InstrResourceKindForTextureLoad);
return;
}
ValidateResourceOffset(
CI, resKind,
{texLd.get_offset0(), texLd.get_offset1(), texLd.get_offset2()},
ValCtx);
} break;
case DXIL::OpCode::CBufferLoad: {
DxilInst_CBufferLoad CBLoad(CI);
Value *regIndex = CBLoad.get_byteOffset();
if (ConstantInt *cIndex = dyn_cast<ConstantInt>(regIndex)) {
int offset = cIndex->getLimitedValue();
int size = GetCBufSize(CBLoad.get_handle(), ValCtx);
if (size > 0 && offset >= size) {
ValCtx.EmitInstrError(CI, ValidationRule::InstrCBufferOutOfBound);
}
}
} break;
case DXIL::OpCode::CBufferLoadLegacy: {
DxilInst_CBufferLoadLegacy CBLoad(CI);
Value *regIndex = CBLoad.get_regIndex();
if (ConstantInt *cIndex = dyn_cast<ConstantInt>(regIndex)) {
int offset = cIndex->getLimitedValue() * 16; // 16 bytes align
int size = GetCBufSize(CBLoad.get_handle(), ValCtx);
if (size > 0 && offset >= size) {
ValCtx.EmitInstrError(CI, ValidationRule::InstrCBufferOutOfBound);
}
}
} break;
case DXIL::OpCode::RawBufferLoad: {
if (!ValCtx.DxilMod.GetShaderModel()->IsSM63Plus()) {
Type *Ty = OP::GetOverloadType(DXIL::OpCode::RawBufferLoad,
CI->getCalledFunction());
if (ValCtx.DL.getTypeAllocSizeInBits(Ty) > 32) {
ValCtx.EmitInstrError(CI, ValidationRule::Sm64bitRawBufferLoadStore);
}
}
DxilInst_RawBufferLoad bufLd(CI);
DXIL::ComponentType compTy;
DXIL::ResourceClass resClass;
DXIL::ResourceKind resKind =
GetResourceKindAndCompTy(bufLd.get_srv(), compTy, resClass, ValCtx);
if (resClass != DXIL::ResourceClass::SRV &&
resClass != DXIL::ResourceClass::UAV) {
ValCtx.EmitInstrError(CI, ValidationRule::InstrResourceClassForLoad);
}
Value *offset = bufLd.get_elementOffset();
Value *align = bufLd.get_alignment();
unsigned alignSize = 0;
if (!isa<ConstantInt>(align)) {
ValCtx.EmitInstrError(CI,
ValidationRule::InstrCoordinateCountForRawTypedBuf);
} else {
alignSize = bufLd.get_alignment_val();
}
switch (resKind) {
case DXIL::ResourceKind::RawBuffer:
if (!isa<UndefValue>(offset)) {
ValCtx.EmitInstrError(
CI, ValidationRule::InstrCoordinateCountForRawTypedBuf);
}
break;
case DXIL::ResourceKind::StructuredBuffer:
if (isa<UndefValue>(offset)) {
ValCtx.EmitInstrError(CI,
ValidationRule::InstrCoordinateCountForStructBuf);
}
break;
default:
ValCtx.EmitInstrError(
CI, ValidationRule::InstrResourceKindForBufferLoadStore);
break;
}
} break;
case DXIL::OpCode::RawBufferStore: {
if (!ValCtx.DxilMod.GetShaderModel()->IsSM63Plus()) {
Type *Ty = OP::GetOverloadType(DXIL::OpCode::RawBufferStore,
CI->getCalledFunction());
if (ValCtx.DL.getTypeAllocSizeInBits(Ty) > 32) {
ValCtx.EmitInstrError(CI, ValidationRule::Sm64bitRawBufferLoadStore);
}
}
DxilInst_RawBufferStore bufSt(CI);
DXIL::ComponentType compTy;
DXIL::ResourceClass resClass;
DXIL::ResourceKind resKind =
GetResourceKindAndCompTy(bufSt.get_uav(), compTy, resClass, ValCtx);
if (resClass != DXIL::ResourceClass::UAV) {
ValCtx.EmitInstrError(CI, ValidationRule::InstrResourceClassForUAVStore);
}
ConstantInt *mask = dyn_cast<ConstantInt>(bufSt.get_mask());
unsigned stValMask =
StoreValueToMask({bufSt.get_value0(), bufSt.get_value1(),
bufSt.get_value2(), bufSt.get_value3()});
if (!ValidateStorageMasks(CI, opcode, mask, stValMask, false /*isTyped*/,
ValCtx))
return;
Value *offset = bufSt.get_elementOffset();
Value *align = bufSt.get_alignment();
unsigned alignSize = 0;
if (!isa<ConstantInt>(align)) {
ValCtx.EmitInstrError(CI,
ValidationRule::InstrCoordinateCountForRawTypedBuf);
} else {
alignSize = bufSt.get_alignment_val();
}
switch (resKind) {
case DXIL::ResourceKind::RawBuffer:
if (!isa<UndefValue>(offset)) {
ValCtx.EmitInstrError(
CI, ValidationRule::InstrCoordinateCountForRawTypedBuf);
}
break;
case DXIL::ResourceKind::StructuredBuffer:
if (isa<UndefValue>(offset)) {
ValCtx.EmitInstrError(CI,
ValidationRule::InstrCoordinateCountForStructBuf);
}
break;
default:
ValCtx.EmitInstrError(
CI, ValidationRule::InstrResourceKindForBufferLoadStore);
break;
}
} break;
case DXIL::OpCode::TraceRay: {
DxilInst_TraceRay traceRay(CI);
Value *hdl = traceRay.get_AccelerationStructure();
DxilResourceProperties RP = ValCtx.GetResourceFromVal(hdl);
if (RP.getResourceClass() == DXIL::ResourceClass::Invalid) {
ValCtx.EmitInstrError(CI, ValidationRule::InstrResourceKindForTraceRay);
return;
}
if (RP.getResourceKind() != DXIL::ResourceKind::RTAccelerationStructure) {
ValCtx.EmitInstrError(CI, ValidationRule::InstrResourceKindForTraceRay);
}
} break;
default:
break;
}
}
static void ValidateBarrierFlagArg(ValidationContext &ValCtx, CallInst *CI,
Value *Arg, unsigned validMask,
StringRef flagName, StringRef opName) {
if (ConstantInt *CArg = dyn_cast<ConstantInt>(Arg)) {
if ((CArg->getLimitedValue() & (uint32_t)(~validMask)) != 0) {
ValCtx.EmitInstrFormatError(CI, ValidationRule::InstrBarrierFlagInvalid,
{flagName, opName});
}
} else {
ValCtx.EmitInstrError(CI,
ValidationRule::InstrBarrierNonConstantFlagArgument);
}
}
std::string GetLaunchTypeStr(DXIL::NodeLaunchType LT) {
switch (LT) {
case DXIL::NodeLaunchType::Broadcasting:
return "Broadcasting";
case DXIL::NodeLaunchType::Coalescing:
return "Coalescing";
case DXIL::NodeLaunchType::Thread:
return "Thread";
default:
return "Invalid";
}
}
static void ValidateDxilOperationCallInProfile(CallInst *CI,
DXIL::OpCode opcode,
const ShaderModel *pSM,
ValidationContext &ValCtx) {
DXIL::ShaderKind shaderKind =
pSM ? pSM->GetKind() : DXIL::ShaderKind::Invalid;
llvm::Function *F = CI->getParent()->getParent();
DXIL::NodeLaunchType nodeLaunchType = DXIL::NodeLaunchType::Invalid;
if (DXIL::ShaderKind::Library == shaderKind) {
if (ValCtx.DxilMod.HasDxilFunctionProps(F)) {
DxilEntryProps &entryProps = ValCtx.DxilMod.GetDxilEntryProps(F);
shaderKind = ValCtx.DxilMod.GetDxilFunctionProps(F).shaderKind;
if (shaderKind == DXIL::ShaderKind::Node)
nodeLaunchType = entryProps.props.Node.LaunchType;
} else if (ValCtx.DxilMod.IsPatchConstantShader(F))
shaderKind = DXIL::ShaderKind::Hull;
}
// These shader models are treted like compute
bool isCSLike = shaderKind == DXIL::ShaderKind::Compute ||
shaderKind == DXIL::ShaderKind::Mesh ||
shaderKind == DXIL::ShaderKind::Amplification ||
shaderKind == DXIL::ShaderKind::Node;
// Is called from a library function
bool isLibFunc = shaderKind == DXIL::ShaderKind::Library;
ValidateHandleArgs(CI, opcode, ValCtx);
switch (opcode) {
// Imm input value validation.
case DXIL::OpCode::Asin:
case DXIL::OpCode::Acos:
case DXIL::OpCode::Log:
case DXIL::OpCode::DerivFineX:
case DXIL::OpCode::DerivFineY:
case DXIL::OpCode::DerivCoarseX:
case DXIL::OpCode::DerivCoarseY:
ValidateImmOperandForMathDxilOp(CI, opcode, ValCtx);
break;
// Resource validation.
case DXIL::OpCode::GetDimensions:
case DXIL::OpCode::CalculateLOD:
case DXIL::OpCode::TextureGather:
case DXIL::OpCode::TextureGatherCmp:
case DXIL::OpCode::Sample:
case DXIL::OpCode::SampleCmp:
case DXIL::OpCode::SampleCmpLevel:
case DXIL::OpCode::SampleCmpLevelZero:
case DXIL::OpCode::SampleBias:
case DXIL::OpCode::SampleGrad:
case DXIL::OpCode::SampleCmpBias:
case DXIL::OpCode::SampleCmpGrad:
case DXIL::OpCode::SampleLevel:
case DXIL::OpCode::CheckAccessFullyMapped:
case DXIL::OpCode::BufferStore:
case DXIL::OpCode::TextureStore:
case DXIL::OpCode::BufferLoad:
case DXIL::OpCode::TextureLoad:
case DXIL::OpCode::CBufferLoad:
case DXIL::OpCode::CBufferLoadLegacy:
case DXIL::OpCode::RawBufferLoad:
case DXIL::OpCode::RawBufferStore:
ValidateResourceDxilOp(CI, opcode, ValCtx);
break;
// Input output.
case DXIL::OpCode::LoadInput:
case DXIL::OpCode::DomainLocation:
case DXIL::OpCode::StoreOutput:
case DXIL::OpCode::StoreVertexOutput:
case DXIL::OpCode::StorePrimitiveOutput:
case DXIL::OpCode::OutputControlPointID:
case DXIL::OpCode::LoadOutputControlPoint:
case DXIL::OpCode::StorePatchConstant:
case DXIL::OpCode::Coverage:
case DXIL::OpCode::InnerCoverage:
case DXIL::OpCode::ViewID:
case DXIL::OpCode::EvalCentroid:
case DXIL::OpCode::EvalSampleIndex:
case DXIL::OpCode::EvalSnapped:
case DXIL::OpCode::AttributeAtVertex:
case DXIL::OpCode::EmitStream:
case DXIL::OpCode::EmitThenCutStream:
case DXIL::OpCode::CutStream:
ValidateSignatureDxilOp(CI, opcode, ValCtx);
break;
// Special.
case DXIL::OpCode::BufferUpdateCounter: {
DxilInst_BufferUpdateCounter updateCounter(CI);
Value *handle = updateCounter.get_uav();
DxilResourceProperties RP = ValCtx.GetResourceFromVal(handle);
if (!RP.isUAV()) {
ValCtx.EmitInstrError(CI, ValidationRule::InstrBufferUpdateCounterOnUAV);
}
if (!DXIL::IsStructuredBuffer(RP.getResourceKind())) {
ValCtx.EmitInstrError(CI, ValidationRule::SmCounterOnlyOnStructBuf);
}
if (!RP.Basic.SamplerCmpOrHasCounter) {
ValCtx.EmitInstrError(
CI, ValidationRule::InstrBufferUpdateCounterOnResHasCounter);
}
Value *inc = updateCounter.get_inc();
if (ConstantInt *cInc = dyn_cast<ConstantInt>(inc)) {
bool isInc = cInc->getLimitedValue() == 1;
if (!ValCtx.isLibProfile) {
auto it = ValCtx.HandleResIndexMap.find(handle);
if (it != ValCtx.HandleResIndexMap.end()) {
unsigned resIndex = it->second;
if (ValCtx.UavCounterIncMap.count(resIndex)) {
if (isInc != ValCtx.UavCounterIncMap[resIndex]) {
ValCtx.EmitInstrError(CI,
ValidationRule::InstrOnlyOneAllocConsume);
}
} else {
ValCtx.UavCounterIncMap[resIndex] = isInc;
}
}
} else {
// TODO: validate ValidationRule::InstrOnlyOneAllocConsume for lib
// profile.
}
} else {
ValCtx.EmitInstrFormatError(CI, ValidationRule::InstrOpConst,
{"inc", "BufferUpdateCounter"});
}
} break;
case DXIL::OpCode::Barrier: {
DxilInst_Barrier barrier(CI);
Value *mode = barrier.get_barrierMode();
ConstantInt *cMode = dyn_cast<ConstantInt>(mode);
if (!cMode) {
ValCtx.EmitInstrFormatError(CI, ValidationRule::InstrOpConst,
{"Mode", "Barrier"});
return;
}
const unsigned uglobal =
static_cast<unsigned>(DXIL::BarrierMode::UAVFenceGlobal);
const unsigned g = static_cast<unsigned>(DXIL::BarrierMode::TGSMFence);
const unsigned ut =
static_cast<unsigned>(DXIL::BarrierMode::UAVFenceThreadGroup);
unsigned barrierMode = cMode->getLimitedValue();
if (isCSLike || isLibFunc) {
bool bHasUGlobal = barrierMode & uglobal;
bool bHasGroup = barrierMode & g;
bool bHasUGroup = barrierMode & ut;
if (bHasUGlobal && bHasUGroup) {
ValCtx.EmitInstrError(CI,
ValidationRule::InstrBarrierModeUselessUGroup);
}
if (!bHasUGlobal && !bHasGroup && !bHasUGroup) {
ValCtx.EmitInstrError(CI, ValidationRule::InstrBarrierModeNoMemory);
}
} else {
if (uglobal != barrierMode) {
ValCtx.EmitInstrError(CI, ValidationRule::InstrBarrierModeForNonCS);
}
}
} break;
case DXIL::OpCode::BarrierByMemoryType: {
DxilInst_BarrierByMemoryType DI(CI);
ValidateBarrierFlagArg(ValCtx, CI, DI.get_MemoryTypeFlags(),
(unsigned)hlsl::DXIL::MemoryTypeFlag::ValidMask,
"memory type", "BarrierByMemoryType");
ValidateBarrierFlagArg(ValCtx, CI, DI.get_SemanticFlags(),
(unsigned)hlsl::DXIL::BarrierSemanticFlag::ValidMask,
"semantic", "BarrierByMemoryType");
if (!isLibFunc && shaderKind != DXIL::ShaderKind::Node &&
OP::BarrierRequiresNode(CI)) {
ValCtx.EmitInstrError(CI, ValidationRule::InstrBarrierRequiresNode);
}
if (!isCSLike && !isLibFunc && OP::BarrierRequiresGroup(CI)) {
ValCtx.EmitInstrError(CI, ValidationRule::InstrBarrierModeForNonCS);
}
} break;
case DXIL::OpCode::BarrierByNodeRecordHandle:
case DXIL::OpCode::BarrierByMemoryHandle: {
std::string opName = opcode == DXIL::OpCode::BarrierByNodeRecordHandle
? "barrierByNodeRecordHandle"
: "barrierByMemoryHandle";
DxilInst_BarrierByMemoryHandle DIMH(CI);
ValidateBarrierFlagArg(ValCtx, CI, DIMH.get_SemanticFlags(),
(unsigned)hlsl::DXIL::BarrierSemanticFlag::ValidMask,
"semantic", opName);
if (!isLibFunc && shaderKind != DXIL::ShaderKind::Node &&
OP::BarrierRequiresNode(CI)) {
ValCtx.EmitInstrError(CI, ValidationRule::InstrBarrierRequiresNode);
}
if (!isCSLike && !isLibFunc && OP::BarrierRequiresGroup(CI)) {
ValCtx.EmitInstrError(CI, ValidationRule::InstrBarrierModeForNonCS);
}
} break;
case DXIL::OpCode::CreateHandleForLib:
if (!ValCtx.isLibProfile) {
ValCtx.EmitInstrFormatError(CI, ValidationRule::SmOpcodeInInvalidFunction,
{"CreateHandleForLib", "Library"});
}
break;
case DXIL::OpCode::AtomicBinOp:
case DXIL::OpCode::AtomicCompareExchange: {
Type *pOverloadType = OP::GetOverloadType(opcode, CI->getCalledFunction());
if ((pOverloadType->isIntegerTy(64)) && !pSM->IsSM66Plus())
ValCtx.EmitInstrFormatError(
CI, ValidationRule::SmOpcodeInInvalidFunction,
{"64-bit atomic operations", "Shader Model 6.6+"});
Value *Handle = CI->getOperand(DXIL::OperandIndex::kAtomicBinOpHandleOpIdx);
if (!isa<CallInst>(Handle) ||
ValCtx.GetResourceFromVal(Handle).getResourceClass() !=
DXIL::ResourceClass::UAV)
ValCtx.EmitInstrError(CI, ValidationRule::InstrAtomicIntrinNonUAV);
} break;
case DXIL::OpCode::CreateHandle:
if (ValCtx.isLibProfile) {
ValCtx.EmitInstrFormatError(CI, ValidationRule::SmOpcodeInInvalidFunction,
{"CreateHandle", "non-library targets"});
}
// CreateHandle should not be used in SM 6.6 and above:
if (DXIL::CompareVersions(ValCtx.m_DxilMajor, ValCtx.m_DxilMinor, 1, 5) >
0) {
ValCtx.EmitInstrFormatError(
CI, ValidationRule::SmOpcodeInInvalidFunction,
{"CreateHandle", "Shader model 6.5 and below"});
}
break;
case DXIL::OpCode::ThreadId: // SV_DispatchThreadID
if (shaderKind != DXIL::ShaderKind::Node) {
break;
}
if (nodeLaunchType == DXIL::NodeLaunchType::Broadcasting)
break;
ValCtx.EmitInstrFormatError(
CI, ValidationRule::InstrSVConflictingLaunchMode,
{"ThreadId", "SV_DispatchThreadID", GetLaunchTypeStr(nodeLaunchType)});
break;
case DXIL::OpCode::GroupId: // SV_GroupId
if (shaderKind != DXIL::ShaderKind::Node) {
break;
}
if (nodeLaunchType == DXIL::NodeLaunchType::Broadcasting)
break;
ValCtx.EmitInstrFormatError(
CI, ValidationRule::InstrSVConflictingLaunchMode,
{"GroupId", "SV_GroupId", GetLaunchTypeStr(nodeLaunchType)});
break;
case DXIL::OpCode::ThreadIdInGroup: // SV_GroupThreadID
if (shaderKind != DXIL::ShaderKind::Node) {
break;
}
if (nodeLaunchType == DXIL::NodeLaunchType::Broadcasting ||
nodeLaunchType == DXIL::NodeLaunchType::Coalescing)
break;
ValCtx.EmitInstrFormatError(CI,
ValidationRule::InstrSVConflictingLaunchMode,
{"ThreadIdInGroup", "SV_GroupThreadID",
GetLaunchTypeStr(nodeLaunchType)});
break;
case DXIL::OpCode::FlattenedThreadIdInGroup: // SV_GroupIndex
if (shaderKind != DXIL::ShaderKind::Node) {
break;
}
if (nodeLaunchType == DXIL::NodeLaunchType::Broadcasting ||
nodeLaunchType == DXIL::NodeLaunchType::Coalescing)
break;
ValCtx.EmitInstrFormatError(CI,
ValidationRule::InstrSVConflictingLaunchMode,
{"FlattenedThreadIdInGroup", "SV_GroupIndex",
GetLaunchTypeStr(nodeLaunchType)});
break;
default:
// TODO: make sure every opcode is checked.
// Skip opcodes don't need special check.
break;
}
}
static bool IsDxilFunction(llvm::Function *F) {
unsigned argSize = F->arg_size();
if (argSize < 1) {
// Cannot be a DXIL operation.
return false;
}
return OP::IsDxilOpFunc(F);
}
static bool IsLifetimeIntrinsic(llvm::Function *F) {
return (F->isIntrinsic() &&
(F->getIntrinsicID() == Intrinsic::lifetime_start ||
F->getIntrinsicID() == Intrinsic::lifetime_end));
}
static void ValidateExternalFunction(Function *F, ValidationContext &ValCtx) {
if (DXIL::CompareVersions(ValCtx.m_DxilMajor, ValCtx.m_DxilMinor, 1, 6) >=
0 &&
IsLifetimeIntrinsic(F)) {
// TODO: validate lifetime intrinsic users
return;
}
if (!IsDxilFunction(F) && !ValCtx.isLibProfile) {
ValCtx.EmitFnFormatError(F, ValidationRule::DeclDxilFnExtern,
{F->getName()});
return;
}
if (F->use_empty()) {
ValCtx.EmitFnFormatError(F, ValidationRule::DeclUsedExternalFunction,
{F->getName()});
return;
}
const ShaderModel *pSM = ValCtx.DxilMod.GetShaderModel();
OP *hlslOP = ValCtx.DxilMod.GetOP();
bool isDxilOp = OP::IsDxilOpFunc(F);
Type *voidTy = Type::getVoidTy(F->getContext());
for (User *user : F->users()) {
CallInst *CI = dyn_cast<CallInst>(user);
if (!CI) {
ValCtx.EmitFnFormatError(F, ValidationRule::DeclFnIsCalled,
{F->getName()});
continue;
}
// Skip call to external user defined function
if (!isDxilOp)
continue;
Value *argOpcode = CI->getArgOperand(0);
ConstantInt *constOpcode = dyn_cast<ConstantInt>(argOpcode);
if (!constOpcode) {
// opcode not immediate; function body will validate this error.
continue;
}
unsigned opcode = constOpcode->getLimitedValue();
if (opcode >= (unsigned)DXIL::OpCode::NumOpCodes) {
// invalid opcode; function body will validate this error.
continue;
}
DXIL::OpCode dxilOpcode = (DXIL::OpCode)opcode;
// In some cases, no overloads are provided (void is exclusive to others)
Function *dxilFunc;
if (hlslOP->IsOverloadLegal(dxilOpcode, voidTy)) {
dxilFunc = hlslOP->GetOpFunc(dxilOpcode, voidTy);
} else {
Type *Ty = OP::GetOverloadType(dxilOpcode, CI->getCalledFunction());
try {
if (!hlslOP->IsOverloadLegal(dxilOpcode, Ty)) {
ValCtx.EmitInstrError(CI, ValidationRule::InstrOload);
continue;
}
} catch (...) {
ValCtx.EmitInstrError(CI, ValidationRule::InstrOload);
continue;
}
dxilFunc = hlslOP->GetOpFunc(dxilOpcode, Ty->getScalarType());
}
if (!dxilFunc) {
// Cannot find dxilFunction based on opcode and type.
ValCtx.EmitInstrError(CI, ValidationRule::InstrOload);
continue;
}
if (dxilFunc->getFunctionType() != F->getFunctionType()) {
ValCtx.EmitInstrFormatError(CI, ValidationRule::InstrCallOload,
{dxilFunc->getName()});
continue;
}
unsigned major = pSM->GetMajor();
unsigned minor = pSM->GetMinor();
if (ValCtx.isLibProfile) {
Function *callingFunction = CI->getParent()->getParent();
DXIL::ShaderKind SK = DXIL::ShaderKind::Library;
if (ValCtx.DxilMod.HasDxilFunctionProps(callingFunction))
SK = ValCtx.DxilMod.GetDxilFunctionProps(callingFunction).shaderKind;
else if (ValCtx.DxilMod.IsPatchConstantShader(callingFunction))
SK = DXIL::ShaderKind::Hull;
if (!ValidateOpcodeInProfile(dxilOpcode, SK, major, minor)) {
// Opcode not available in profile.
// produces: "lib_6_3(ps)", or "lib_6_3(anyhit)" for shader types
// Or: "lib_6_3(lib)" for library function
std::string shaderModel = pSM->GetName();
shaderModel += std::string("(") + ShaderModel::GetKindName(SK) + ")";
ValCtx.EmitInstrFormatError(
CI, ValidationRule::SmOpcode,
{hlslOP->GetOpCodeName(dxilOpcode), shaderModel});
continue;
}
} else {
if (!ValidateOpcodeInProfile(dxilOpcode, pSM->GetKind(), major, minor)) {
// Opcode not available in profile.
ValCtx.EmitInstrFormatError(
CI, ValidationRule::SmOpcode,
{hlslOP->GetOpCodeName(dxilOpcode), pSM->GetName()});
continue;
}
}
// Check more detail.
ValidateDxilOperationCallInProfile(CI, dxilOpcode, pSM, ValCtx);
}
}
///////////////////////////////////////////////////////////////////////////////
// Instruction validation functions. //
static bool IsDxilBuiltinStructType(StructType *ST, hlsl::OP *hlslOP) {
if (ST == hlslOP->GetBinaryWithCarryType())
return true;
if (ST == hlslOP->GetBinaryWithTwoOutputsType())
return true;
if (ST == hlslOP->GetFourI32Type())
return true;
if (ST == hlslOP->GetFourI16Type())
return true;
if (ST == hlslOP->GetDimensionsType())
return true;
if (ST == hlslOP->GetHandleType())
return true;
if (ST == hlslOP->GetSamplePosType())
return true;
if (ST == hlslOP->GetSplitDoubleType())
return true;
unsigned EltNum = ST->getNumElements();
switch (EltNum) {
case 2:
case 4:
case 8: { // 2 for doubles, 8 for halfs.
Type *EltTy = ST->getElementType(0);
return ST == hlslOP->GetCBufferRetType(EltTy);
} break;
case 5: {
Type *EltTy = ST->getElementType(0);
return ST == hlslOP->GetResRetType(EltTy);
} break;
default:
return false;
}
}
// outer type may be: [ptr to][1 dim array of]( UDT struct | scalar )
// inner type (UDT struct member) may be: [N dim array of]( UDT struct | scalar
// ) scalar type may be: ( float(16|32|64) | int(16|32|64) )
static bool ValidateType(Type *Ty, ValidationContext &ValCtx,
bool bInner = false) {
DXASSERT_NOMSG(Ty != nullptr);
if (Ty->isPointerTy()) {
Type *EltTy = Ty->getPointerElementType();
if (bInner || EltTy->isPointerTy()) {
ValCtx.EmitTypeError(Ty, ValidationRule::TypesNoPtrToPtr);
return false;
}
Ty = EltTy;
}
if (Ty->isArrayTy()) {
Type *EltTy = Ty->getArrayElementType();
if (!bInner && isa<ArrayType>(EltTy)) {
// Outermost array should be converted to single-dim,
// but arrays inside struct are allowed to be multi-dim
ValCtx.EmitTypeError(Ty, ValidationRule::TypesNoMultiDim);
return false;
}
while (EltTy->isArrayTy())
EltTy = EltTy->getArrayElementType();
Ty = EltTy;
}
if (Ty->isStructTy()) {
bool result = true;
StructType *ST = cast<StructType>(Ty);
StringRef Name = ST->getName();
if (Name.startswith("dx.")) {
// Allow handle type.
if (ValCtx.HandleTy == Ty)
return true;
hlsl::OP *hlslOP = ValCtx.DxilMod.GetOP();
if (IsDxilBuiltinStructType(ST, hlslOP)) {
ValCtx.EmitTypeError(Ty, ValidationRule::InstrDxilStructUser);
result = false;
}
ValCtx.EmitTypeError(Ty, ValidationRule::DeclDxilNsReserved);
result = false;
}
for (auto e : ST->elements()) {
if (!ValidateType(e, ValCtx, /*bInner*/ true)) {
result = false;
}
}
return result;
}
if (Ty->isFloatTy() || Ty->isHalfTy() || Ty->isDoubleTy()) {
return true;
}
if (Ty->isIntegerTy()) {
unsigned width = Ty->getIntegerBitWidth();
if (width != 1 && width != 8 && width != 16 && width != 32 && width != 64) {
ValCtx.EmitTypeError(Ty, ValidationRule::TypesIntWidth);
return false;
}
return true;
}
// Lib profile allow all types except those hit
// ValidationRule::InstrDxilStructUser.
if (ValCtx.isLibProfile)
return true;
if (Ty->isVectorTy()) {
ValCtx.EmitTypeError(Ty, ValidationRule::TypesNoVector);
return false;
}
ValCtx.EmitTypeError(Ty, ValidationRule::TypesDefined);
return false;
}
static bool GetNodeOperandAsInt(ValidationContext &ValCtx, MDNode *pMD,
unsigned index, uint64_t *pValue) {
*pValue = 0;
if (pMD->getNumOperands() < index) {
ValCtx.EmitMetaError(pMD, ValidationRule::MetaWellFormed);
return false;
}
ConstantAsMetadata *C = dyn_cast<ConstantAsMetadata>(pMD->getOperand(index));
if (C == nullptr) {
ValCtx.EmitMetaError(pMD, ValidationRule::MetaWellFormed);
return false;
}
ConstantInt *CI = dyn_cast<ConstantInt>(C->getValue());
if (CI == nullptr) {
ValCtx.EmitMetaError(pMD, ValidationRule::MetaWellFormed);
return false;
}
*pValue = CI->getValue().getZExtValue();
return true;
}
static bool IsPrecise(Instruction &I, ValidationContext &ValCtx) {
MDNode *pMD = I.getMetadata(DxilMDHelper::kDxilPreciseAttributeMDName);
if (pMD == nullptr) {
return false;
}
if (pMD->getNumOperands() != 1) {
ValCtx.EmitMetaError(pMD, ValidationRule::MetaWellFormed);
return false;
}
uint64_t val;
if (!GetNodeOperandAsInt(ValCtx, pMD, 0, &val)) {
return false;
}
if (val == 1) {
return true;
}
if (val != 0) {
ValCtx.EmitMetaError(pMD, ValidationRule::MetaValueRange);
}
return false;
}
static bool IsValueMinPrec(DxilModule &DxilMod, Value *V) {
DXASSERT(DxilMod.GetGlobalFlags() & DXIL::kEnableMinPrecision,
"else caller didn't check - currently this path should never be hit "
"otherwise");
(void)(DxilMod);
Type *Ty = V->getType();
if (Ty->isIntegerTy()) {
return 16 == Ty->getIntegerBitWidth();
}
return Ty->isHalfTy();
}
static void ValidateMsIntrinsics(Function *F, ValidationContext &ValCtx,
CallInst *setMeshOutputCounts,
CallInst *getMeshPayload) {
if (ValCtx.DxilMod.HasDxilFunctionProps(F)) {
DXIL::ShaderKind shaderKind =
ValCtx.DxilMod.GetDxilFunctionProps(F).shaderKind;
if (shaderKind != DXIL::ShaderKind::Mesh)
return;
} else {
return;
}
DominatorTreeAnalysis DTA;
DominatorTree DT = DTA.run(*F);
for (auto b = F->begin(), bend = F->end(); b != bend; ++b) {
bool foundSetMeshOutputCountsInCurrentBB = false;
for (auto i = b->begin(), iend = b->end(); i != iend; ++i) {
llvm::Instruction &I = *i;
// Calls to external functions.
CallInst *CI = dyn_cast<CallInst>(&I);
if (CI) {
Function *FCalled = CI->getCalledFunction();
if (!FCalled) {
ValCtx.EmitInstrError(&I, ValidationRule::InstrAllowed);
continue;
}
if (FCalled->isDeclaration()) {
// External function validation will diagnose.
if (!IsDxilFunction(FCalled)) {
continue;
}
if (CI == setMeshOutputCounts) {
foundSetMeshOutputCountsInCurrentBB = true;
}
Value *opcodeVal = CI->getOperand(0);
ConstantInt *OpcodeConst = dyn_cast<ConstantInt>(opcodeVal);
unsigned opcode = OpcodeConst->getLimitedValue();
DXIL::OpCode dxilOpcode = (DXIL::OpCode)opcode;
if (dxilOpcode == DXIL::OpCode::StoreVertexOutput ||
dxilOpcode == DXIL::OpCode::StorePrimitiveOutput ||
dxilOpcode == DXIL::OpCode::EmitIndices) {
if (setMeshOutputCounts == nullptr) {
ValCtx.EmitInstrError(
&I, ValidationRule::InstrMissingSetMeshOutputCounts);
} else if (!foundSetMeshOutputCountsInCurrentBB &&
!DT.dominates(setMeshOutputCounts->getParent(),
I.getParent())) {
ValCtx.EmitInstrError(
&I, ValidationRule::InstrNonDominatingSetMeshOutputCounts);
}
}
}
}
}
}
if (getMeshPayload) {
PointerType *payloadPTy = cast<PointerType>(getMeshPayload->getType());
StructType *payloadTy =
cast<StructType>(payloadPTy->getPointerElementType());
const DataLayout &DL = F->getParent()->getDataLayout();
unsigned payloadSize = DL.getTypeAllocSize(payloadTy);
DxilFunctionProps &prop = ValCtx.DxilMod.GetDxilFunctionProps(F);
if (prop.ShaderProps.MS.payloadSizeInBytes < payloadSize) {
ValCtx.EmitFnFormatError(
F, ValidationRule::SmMeshShaderPayloadSizeDeclared,
{F->getName(), std::to_string(payloadSize),
std::to_string(prop.ShaderProps.MS.payloadSizeInBytes)});
}
if (prop.ShaderProps.MS.payloadSizeInBytes > DXIL::kMaxMSASPayloadBytes) {
ValCtx.EmitFnFormatError(
F, ValidationRule::SmMeshShaderPayloadSize,
{F->getName(), std::to_string(prop.ShaderProps.MS.payloadSizeInBytes),
std::to_string(DXIL::kMaxMSASPayloadBytes)});
}
}
}
static void ValidateAsIntrinsics(Function *F, ValidationContext &ValCtx,
CallInst *dispatchMesh) {
if (ValCtx.DxilMod.HasDxilFunctionProps(F)) {
DXIL::ShaderKind shaderKind =
ValCtx.DxilMod.GetDxilFunctionProps(F).shaderKind;
if (shaderKind != DXIL::ShaderKind::Amplification)
return;
if (dispatchMesh) {
DxilInst_DispatchMesh dispatchMeshCall(dispatchMesh);
Value *operandVal = dispatchMeshCall.get_payload();
Type *payloadTy = operandVal->getType();
const DataLayout &DL = F->getParent()->getDataLayout();
unsigned payloadSize = DL.getTypeAllocSize(payloadTy);
DxilFunctionProps &prop = ValCtx.DxilMod.GetDxilFunctionProps(F);
if (prop.ShaderProps.AS.payloadSizeInBytes < payloadSize) {
ValCtx.EmitInstrFormatError(
dispatchMesh,
ValidationRule::SmAmplificationShaderPayloadSizeDeclared,
{F->getName(), std::to_string(payloadSize),
std::to_string(prop.ShaderProps.AS.payloadSizeInBytes)});
}
if (prop.ShaderProps.AS.payloadSizeInBytes > DXIL::kMaxMSASPayloadBytes) {
ValCtx.EmitInstrFormatError(
dispatchMesh, ValidationRule::SmAmplificationShaderPayloadSize,
{F->getName(),
std::to_string(prop.ShaderProps.AS.payloadSizeInBytes),
std::to_string(DXIL::kMaxMSASPayloadBytes)});
}
}
} else {
return;
}
if (dispatchMesh == nullptr) {
ValCtx.EmitFnError(F, ValidationRule::InstrNotOnceDispatchMesh);
return;
}
PostDominatorTree PDT;
PDT.runOnFunction(*F);
if (!PDT.dominates(dispatchMesh->getParent(), &F->getEntryBlock())) {
ValCtx.EmitInstrError(dispatchMesh,
ValidationRule::InstrNonDominatingDispatchMesh);
}
Function *dispatchMeshFunc = dispatchMesh->getCalledFunction();
FunctionType *dispatchMeshFuncTy = dispatchMeshFunc->getFunctionType();
PointerType *payloadPTy =
cast<PointerType>(dispatchMeshFuncTy->getParamType(4));
StructType *payloadTy = cast<StructType>(payloadPTy->getPointerElementType());
const DataLayout &DL = F->getParent()->getDataLayout();
unsigned payloadSize = DL.getTypeAllocSize(payloadTy);
if (payloadSize > DXIL::kMaxMSASPayloadBytes) {
ValCtx.EmitInstrFormatError(
dispatchMesh, ValidationRule::SmAmplificationShaderPayloadSize,
{F->getName(), std::to_string(payloadSize),
std::to_string(DXIL::kMaxMSASPayloadBytes)});
}
}
static void ValidateControlFlowHint(BasicBlock &bb, ValidationContext &ValCtx) {
// Validate controlflow hint.
TerminatorInst *TI = bb.getTerminator();
if (!TI)
return;
MDNode *pNode = TI->getMetadata(DxilMDHelper::kDxilControlFlowHintMDName);
if (!pNode)
return;
if (pNode->getNumOperands() < 3)
return;
bool bHasBranch = false;
bool bHasFlatten = false;
bool bForceCase = false;
for (unsigned i = 2; i < pNode->getNumOperands(); i++) {
uint64_t value = 0;
if (GetNodeOperandAsInt(ValCtx, pNode, i, &value)) {
DXIL::ControlFlowHint hint = static_cast<DXIL::ControlFlowHint>(value);
switch (hint) {
case DXIL::ControlFlowHint::Flatten:
bHasFlatten = true;
break;
case DXIL::ControlFlowHint::Branch:
bHasBranch = true;
break;
case DXIL::ControlFlowHint::ForceCase:
bForceCase = true;
break;
default:
ValCtx.EmitMetaError(pNode, ValidationRule::MetaInvalidControlFlowHint);
}
}
}
if (bHasBranch && bHasFlatten) {
ValCtx.EmitMetaError(pNode, ValidationRule::MetaBranchFlatten);
}
if (bForceCase && !isa<SwitchInst>(TI)) {
ValCtx.EmitMetaError(pNode, ValidationRule::MetaForceCaseOnSwitch);
}
}
static void ValidateTBAAMetadata(MDNode *Node, ValidationContext &ValCtx) {
switch (Node->getNumOperands()) {
case 1: {
if (Node->getOperand(0)->getMetadataID() != Metadata::MDStringKind) {
ValCtx.EmitMetaError(Node, ValidationRule::MetaWellFormed);
}
} break;
case 2: {
MDNode *rootNode = dyn_cast<MDNode>(Node->getOperand(1));
if (!rootNode) {
ValCtx.EmitMetaError(Node, ValidationRule::MetaWellFormed);
} else {
ValidateTBAAMetadata(rootNode, ValCtx);
}
} break;
case 3: {
MDNode *rootNode = dyn_cast<MDNode>(Node->getOperand(1));
if (!rootNode) {
ValCtx.EmitMetaError(Node, ValidationRule::MetaWellFormed);
} else {
ValidateTBAAMetadata(rootNode, ValCtx);
}
ConstantAsMetadata *pointsToConstMem =
dyn_cast<ConstantAsMetadata>(Node->getOperand(2));
if (!pointsToConstMem) {
ValCtx.EmitMetaError(Node, ValidationRule::MetaWellFormed);
} else {
ConstantInt *isConst =
dyn_cast<ConstantInt>(pointsToConstMem->getValue());
if (!isConst) {
ValCtx.EmitMetaError(Node, ValidationRule::MetaWellFormed);
} else if (isConst->getValue().getLimitedValue() > 1) {
ValCtx.EmitMetaError(Node, ValidationRule::MetaWellFormed);
}
}
} break;
default:
ValCtx.EmitMetaError(Node, ValidationRule::MetaWellFormed);
}
}
static void ValidateLoopMetadata(MDNode *Node, ValidationContext &ValCtx) {
if (Node->getNumOperands() == 0 || Node->getNumOperands() > 2) {
ValCtx.EmitMetaError(Node, ValidationRule::MetaWellFormed);
return;
}
if (Node != Node->getOperand(0).get()) {
ValCtx.EmitMetaError(Node, ValidationRule::MetaWellFormed);
return;
}
if (Node->getNumOperands() == 1) {
return;
}
MDNode *LoopNode = dyn_cast<MDNode>(Node->getOperand(1).get());
if (!LoopNode) {
ValCtx.EmitMetaError(Node, ValidationRule::MetaWellFormed);
return;
}
if (LoopNode->getNumOperands() < 1 || LoopNode->getNumOperands() > 2) {
ValCtx.EmitMetaError(LoopNode, ValidationRule::MetaWellFormed);
return;
}
if (LoopNode->getOperand(0) == LoopNode) {
ValidateLoopMetadata(LoopNode, ValCtx);
return;
}
MDString *LoopStr = dyn_cast<MDString>(LoopNode->getOperand(0));
if (!LoopStr) {
ValCtx.EmitMetaError(LoopNode, ValidationRule::MetaWellFormed);
return;
}
StringRef Name = LoopStr->getString();
if (Name != "llvm.loop.unroll.full" && Name != "llvm.loop.unroll.disable" &&
Name != "llvm.loop.unroll.count") {
ValCtx.EmitMetaError(LoopNode, ValidationRule::MetaWellFormed);
return;
}
if (Name == "llvm.loop.unroll.count") {
if (LoopNode->getNumOperands() != 2) {
ValCtx.EmitMetaError(LoopNode, ValidationRule::MetaWellFormed);
return;
}
ConstantAsMetadata *CountNode =
dyn_cast<ConstantAsMetadata>(LoopNode->getOperand(1));
if (!CountNode) {
ValCtx.EmitMetaError(LoopNode, ValidationRule::MetaWellFormed);
} else {
ConstantInt *Count = dyn_cast<ConstantInt>(CountNode->getValue());
if (!Count) {
ValCtx.EmitMetaError(CountNode, ValidationRule::MetaWellFormed);
}
}
}
}
static void ValidateNonUniformMetadata(Instruction &I, MDNode *pMD,
ValidationContext &ValCtx) {
if (!ValCtx.isLibProfile) {
ValCtx.EmitMetaError(pMD, ValidationRule::MetaUsed);
}
if (!isa<GetElementPtrInst>(I)) {
ValCtx.EmitMetaError(pMD, ValidationRule::MetaWellFormed);
}
if (pMD->getNumOperands() != 1) {
ValCtx.EmitMetaError(pMD, ValidationRule::MetaWellFormed);
}
uint64_t val;
if (!GetNodeOperandAsInt(ValCtx, pMD, 0, &val)) {
ValCtx.EmitMetaError(pMD, ValidationRule::MetaWellFormed);
}
if (val != 1) {
ValCtx.EmitMetaError(pMD, ValidationRule::MetaValueRange);
}
}
static void ValidateInstructionMetadata(Instruction *I,
ValidationContext &ValCtx) {
SmallVector<std::pair<unsigned, MDNode *>, 2> MDNodes;
I->getAllMetadataOtherThanDebugLoc(MDNodes);
for (auto &MD : MDNodes) {
if (MD.first == ValCtx.kDxilControlFlowHintMDKind) {
if (!isa<TerminatorInst>(I)) {
ValCtx.EmitInstrError(
I, ValidationRule::MetaControlFlowHintNotOnControlFlow);
}
} else if (MD.first == ValCtx.kDxilPreciseMDKind) {
// Validated in IsPrecise.
} else if (MD.first == ValCtx.kLLVMLoopMDKind) {
ValidateLoopMetadata(MD.second, ValCtx);
} else if (MD.first == LLVMContext::MD_tbaa) {
ValidateTBAAMetadata(MD.second, ValCtx);
} else if (MD.first == LLVMContext::MD_range) {
// Validated in Verifier.cpp.
} else if (MD.first == LLVMContext::MD_noalias ||
MD.first == LLVMContext::MD_alias_scope) {
// noalias for DXIL validator >= 1.2
} else if (MD.first == ValCtx.kDxilNonUniformMDKind) {
ValidateNonUniformMetadata(*I, MD.second, ValCtx);
} else {
ValCtx.EmitMetaError(MD.second, ValidationRule::MetaUsed);
}
}
}
static void ValidateFunctionAttribute(Function *F, ValidationContext &ValCtx) {
AttributeSet attrSet = F->getAttributes().getFnAttributes();
// fp32-denorm-mode
if (attrSet.hasAttribute(AttributeSet::FunctionIndex,
DXIL::kFP32DenormKindString)) {
Attribute attr = attrSet.getAttribute(AttributeSet::FunctionIndex,
DXIL::kFP32DenormKindString);
StringRef value = attr.getValueAsString();
if (!value.equals(DXIL::kFP32DenormValueAnyString) &&
!value.equals(DXIL::kFP32DenormValueFtzString) &&
!value.equals(DXIL::kFP32DenormValuePreserveString)) {
ValCtx.EmitFnAttributeError(F, attr.getKindAsString(),
attr.getValueAsString());
}
}
// TODO: If validating libraries, we should remove all unknown function
// attributes. For each attribute, check if it is a known attribute
for (unsigned I = 0, E = attrSet.getNumSlots(); I != E; ++I) {
for (auto AttrIter = attrSet.begin(I), AttrEnd = attrSet.end(I);
AttrIter != AttrEnd; ++AttrIter) {
if (!AttrIter->isStringAttribute()) {
continue;
}
StringRef kind = AttrIter->getKindAsString();
if (!kind.equals(DXIL::kFP32DenormKindString) &&
!kind.equals(DXIL::kWaveOpsIncludeHelperLanesString)) {
ValCtx.EmitFnAttributeError(F, AttrIter->getKindAsString(),
AttrIter->getValueAsString());
}
}
}
}
static void ValidateFunctionMetadata(Function *F, ValidationContext &ValCtx) {
SmallVector<std::pair<unsigned, MDNode *>, 2> MDNodes;
F->getAllMetadata(MDNodes);
for (auto &MD : MDNodes) {
ValCtx.EmitMetaError(MD.second, ValidationRule::MetaUsed);
}
}
static bool IsLLVMInstructionAllowedForLib(Instruction &I,
ValidationContext &ValCtx) {
if (!(ValCtx.isLibProfile || ValCtx.DxilMod.GetShaderModel()->IsMS() ||
ValCtx.DxilMod.GetShaderModel()->IsAS()))
return false;
switch (I.getOpcode()) {
case Instruction::InsertElement:
case Instruction::ExtractElement:
case Instruction::ShuffleVector:
return true;
case Instruction::Unreachable:
if (Instruction *Prev = I.getPrevNode()) {
if (CallInst *CI = dyn_cast<CallInst>(Prev)) {
Function *F = CI->getCalledFunction();
if (IsDxilFunction(F) &&
F->hasFnAttribute(Attribute::AttrKind::NoReturn)) {
return true;
}
}
}
return false;
default:
return false;
}
}
static void ValidateFunctionBody(Function *F, ValidationContext &ValCtx) {
bool SupportsMinPrecision =
ValCtx.DxilMod.GetGlobalFlags() & DXIL::kEnableMinPrecision;
bool SupportsLifetimeIntrinsics =
ValCtx.DxilMod.GetShaderModel()->IsSM66Plus();
SmallVector<CallInst *, 16> gradientOps;
SmallVector<CallInst *, 16> barriers;
CallInst *setMeshOutputCounts = nullptr;
CallInst *getMeshPayload = nullptr;
CallInst *dispatchMesh = nullptr;
hlsl::OP *hlslOP = ValCtx.DxilMod.GetOP();
for (auto b = F->begin(), bend = F->end(); b != bend; ++b) {
for (auto i = b->begin(), iend = b->end(); i != iend; ++i) {
llvm::Instruction &I = *i;
if (I.hasMetadata()) {
ValidateInstructionMetadata(&I, ValCtx);
}
// Instructions must be allowed.
if (!IsLLVMInstructionAllowed(I)) {
if (!IsLLVMInstructionAllowedForLib(I, ValCtx)) {
ValCtx.EmitInstrError(&I, ValidationRule::InstrAllowed);
continue;
}
}
// Instructions marked precise may not have minprecision arguments.
if (SupportsMinPrecision) {
if (IsPrecise(I, ValCtx)) {
for (auto &O : I.operands()) {
if (IsValueMinPrec(ValCtx.DxilMod, O)) {
ValCtx.EmitInstrError(
&I, ValidationRule::InstrMinPrecisionNotPrecise);
break;
}
}
}
}
// Calls to external functions.
CallInst *CI = dyn_cast<CallInst>(&I);
if (CI) {
Function *FCalled = CI->getCalledFunction();
if (FCalled->isDeclaration()) {
// External function validation will diagnose.
if (!IsDxilFunction(FCalled)) {
continue;
}
Value *opcodeVal = CI->getOperand(0);
ConstantInt *OpcodeConst = dyn_cast<ConstantInt>(opcodeVal);
if (OpcodeConst == nullptr) {
ValCtx.EmitInstrFormatError(&I, ValidationRule::InstrOpConst,
{"Opcode", "DXIL operation"});
continue;
}
unsigned opcode = OpcodeConst->getLimitedValue();
if (opcode >= static_cast<unsigned>(DXIL::OpCode::NumOpCodes)) {
ValCtx.EmitInstrFormatError(
&I, ValidationRule::InstrIllegalDXILOpCode,
{std::to_string((unsigned)DXIL::OpCode::NumOpCodes),
std::to_string(opcode)});
continue;
}
DXIL::OpCode dxilOpcode = (DXIL::OpCode)opcode;
bool IllegalOpFunc = true;
for (auto &it : hlslOP->GetOpFuncList(dxilOpcode)) {
if (it.second == FCalled) {
IllegalOpFunc = false;
break;
}
}
if (IllegalOpFunc) {
ValCtx.EmitInstrFormatError(
&I, ValidationRule::InstrIllegalDXILOpFunction,
{FCalled->getName(), OP::GetOpCodeName(dxilOpcode)});
continue;
}
if (OP::IsDxilOpGradient(dxilOpcode)) {
gradientOps.push_back(CI);
}
if (dxilOpcode == DXIL::OpCode::Barrier) {
barriers.push_back(CI);
}
// External function validation will check the parameter
// list. This function will check that the call does not
// violate any rules.
if (dxilOpcode == DXIL::OpCode::SetMeshOutputCounts) {
// validate the call count of SetMeshOutputCounts
if (setMeshOutputCounts != nullptr) {
ValCtx.EmitInstrError(
&I, ValidationRule::InstrMultipleSetMeshOutputCounts);
}
setMeshOutputCounts = CI;
}
if (dxilOpcode == DXIL::OpCode::GetMeshPayload) {
// validate the call count of GetMeshPayload
if (getMeshPayload != nullptr) {
ValCtx.EmitInstrError(
&I, ValidationRule::InstrMultipleGetMeshPayload);
}
getMeshPayload = CI;
}
if (dxilOpcode == DXIL::OpCode::DispatchMesh) {
// validate the call count of DispatchMesh
if (dispatchMesh != nullptr) {
ValCtx.EmitInstrError(&I,
ValidationRule::InstrNotOnceDispatchMesh);
}
dispatchMesh = CI;
}
}
continue;
}
for (Value *op : I.operands()) {
if (isa<UndefValue>(op)) {
bool legalUndef = isa<PHINode>(&I);
if (isa<InsertElementInst>(&I)) {
legalUndef = op == I.getOperand(0);
}
if (isa<ShuffleVectorInst>(&I)) {
legalUndef = op == I.getOperand(1);
}
if (isa<StoreInst>(&I)) {
legalUndef = op == I.getOperand(0);
}
if (!legalUndef)
ValCtx.EmitInstrError(&I,
ValidationRule::InstrNoReadingUninitialized);
} else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(op)) {
for (Value *opCE : CE->operands()) {
if (isa<UndefValue>(opCE)) {
ValCtx.EmitInstrError(
&I, ValidationRule::InstrNoReadingUninitialized);
}
}
}
if (IntegerType *IT = dyn_cast<IntegerType>(op->getType())) {
if (IT->getBitWidth() == 8) {
// We always fail if we see i8 as operand type of a non-lifetime
// instruction.
ValCtx.EmitInstrError(&I, ValidationRule::TypesI8);
}
}
}
Type *Ty = I.getType();
if (isa<PointerType>(Ty))
Ty = Ty->getPointerElementType();
while (isa<ArrayType>(Ty))
Ty = Ty->getArrayElementType();
if (IntegerType *IT = dyn_cast<IntegerType>(Ty)) {
if (IT->getBitWidth() == 8) {
// Allow i8* cast for llvm.lifetime.* intrinsics.
if (!SupportsLifetimeIntrinsics || !isa<BitCastInst>(I) ||
!onlyUsedByLifetimeMarkers(&I)) {
ValCtx.EmitInstrError(&I, ValidationRule::TypesI8);
}
}
}
unsigned opcode = I.getOpcode();
switch (opcode) {
case Instruction::Alloca: {
AllocaInst *AI = cast<AllocaInst>(&I);
// TODO: validate address space and alignment
Type *Ty = AI->getAllocatedType();
if (!ValidateType(Ty, ValCtx)) {
continue;
}
} break;
case Instruction::ExtractValue: {
ExtractValueInst *EV = cast<ExtractValueInst>(&I);
Type *Ty = EV->getAggregateOperand()->getType();
if (StructType *ST = dyn_cast<StructType>(Ty)) {
Value *Agg = EV->getAggregateOperand();
if (!isa<AtomicCmpXchgInst>(Agg) &&
!IsDxilBuiltinStructType(ST, ValCtx.DxilMod.GetOP())) {
ValCtx.EmitInstrError(EV, ValidationRule::InstrExtractValue);
}
} else {
ValCtx.EmitInstrError(EV, ValidationRule::InstrExtractValue);
}
} break;
case Instruction::Load: {
Type *Ty = I.getType();
if (!ValidateType(Ty, ValCtx)) {
continue;
}
} break;
case Instruction::Store: {
StoreInst *SI = cast<StoreInst>(&I);
Type *Ty = SI->getValueOperand()->getType();
if (!ValidateType(Ty, ValCtx)) {
continue;
}
} break;
case Instruction::GetElementPtr: {
Type *Ty = I.getType()->getPointerElementType();
if (!ValidateType(Ty, ValCtx)) {
continue;
}
GetElementPtrInst *GEP = cast<GetElementPtrInst>(&I);
bool allImmIndex = true;
for (auto Idx = GEP->idx_begin(), E = GEP->idx_end(); Idx != E; Idx++) {
if (!isa<ConstantInt>(Idx)) {
allImmIndex = false;
break;
}
}
if (allImmIndex) {
const DataLayout &DL = ValCtx.DL;
Value *Ptr = GEP->getPointerOperand();
unsigned size =
DL.getTypeAllocSize(Ptr->getType()->getPointerElementType());
unsigned valSize =
DL.getTypeAllocSize(GEP->getType()->getPointerElementType());
SmallVector<Value *, 8> Indices(GEP->idx_begin(), GEP->idx_end());
unsigned offset =
DL.getIndexedOffset(GEP->getPointerOperandType(), Indices);
if ((offset + valSize) > size) {
ValCtx.EmitInstrError(GEP, ValidationRule::InstrInBoundsAccess);
}
}
} break;
case Instruction::SDiv: {
BinaryOperator *BO = cast<BinaryOperator>(&I);
Value *V = BO->getOperand(1);
if (ConstantInt *imm = dyn_cast<ConstantInt>(V)) {
if (imm->getValue().getLimitedValue() == 0) {
ValCtx.EmitInstrError(BO, ValidationRule::InstrNoIDivByZero);
}
}
} break;
case Instruction::UDiv: {
BinaryOperator *BO = cast<BinaryOperator>(&I);
Value *V = BO->getOperand(1);
if (ConstantInt *imm = dyn_cast<ConstantInt>(V)) {
if (imm->getValue().getLimitedValue() == 0) {
ValCtx.EmitInstrError(BO, ValidationRule::InstrNoUDivByZero);
}
}
} break;
case Instruction::AddrSpaceCast: {
AddrSpaceCastInst *Cast = cast<AddrSpaceCastInst>(&I);
unsigned ToAddrSpace = Cast->getType()->getPointerAddressSpace();
unsigned FromAddrSpace =
Cast->getOperand(0)->getType()->getPointerAddressSpace();
if (ToAddrSpace != DXIL::kGenericPointerAddrSpace &&
FromAddrSpace != DXIL::kGenericPointerAddrSpace) {
ValCtx.EmitInstrError(Cast,
ValidationRule::InstrNoGenericPtrAddrSpaceCast);
}
} break;
case Instruction::BitCast: {
BitCastInst *Cast = cast<BitCastInst>(&I);
Type *FromTy = Cast->getOperand(0)->getType();
Type *ToTy = Cast->getType();
// Allow i8* cast for llvm.lifetime.* intrinsics.
if (SupportsLifetimeIntrinsics &&
ToTy == Type::getInt8PtrTy(ToTy->getContext()))
continue;
if (isa<PointerType>(FromTy)) {
FromTy = FromTy->getPointerElementType();
ToTy = ToTy->getPointerElementType();
unsigned FromSize = ValCtx.DL.getTypeAllocSize(FromTy);
unsigned ToSize = ValCtx.DL.getTypeAllocSize(ToTy);
if (FromSize != ToSize) {
ValCtx.EmitInstrError(Cast, ValidationRule::InstrPtrBitCast);
continue;
}
while (isa<ArrayType>(FromTy)) {
FromTy = FromTy->getArrayElementType();
}
while (isa<ArrayType>(ToTy)) {
ToTy = ToTy->getArrayElementType();
}
}
if ((isa<StructType>(FromTy) || isa<StructType>(ToTy)) &&
!ValCtx.isLibProfile) {
ValCtx.EmitInstrError(Cast, ValidationRule::InstrStructBitCast);
continue;
}
bool IsMinPrecisionTy = (ValCtx.DL.getTypeStoreSize(FromTy) < 4 ||
ValCtx.DL.getTypeStoreSize(ToTy) < 4) &&
ValCtx.DxilMod.GetUseMinPrecision();
if (IsMinPrecisionTy) {
ValCtx.EmitInstrError(Cast, ValidationRule::InstrMinPrecisonBitCast);
}
} break;
case Instruction::AtomicCmpXchg:
case Instruction::AtomicRMW: {
Value *Ptr = I.getOperand(AtomicRMWInst::getPointerOperandIndex());
PointerType *ptrType = cast<PointerType>(Ptr->getType());
Type *elType = ptrType->getElementType();
const ShaderModel *pSM = ValCtx.DxilMod.GetShaderModel();
if ((elType->isIntegerTy(64)) && !pSM->IsSM66Plus())
ValCtx.EmitInstrFormatError(
&I, ValidationRule::SmOpcodeInInvalidFunction,
{"64-bit atomic operations", "Shader Model 6.6+"});
if (ptrType->getAddressSpace() != DXIL::kTGSMAddrSpace &&
ptrType->getAddressSpace() != DXIL::kNodeRecordAddrSpace)
ValCtx.EmitInstrError(
&I, ValidationRule::InstrAtomicOpNonGroupsharedOrRecord);
// Drill through GEP and bitcasts
while (true) {
if (GEPOperator *GEP = dyn_cast<GEPOperator>(Ptr)) {
Ptr = GEP->getPointerOperand();
continue;
}
if (BitCastInst *BC = dyn_cast<BitCastInst>(Ptr)) {
Ptr = BC->getOperand(0);
continue;
}
break;
}
if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Ptr)) {
if (GV->isConstant())
ValCtx.EmitInstrError(&I, ValidationRule::InstrAtomicConst);
}
} break;
}
if (PointerType *PT = dyn_cast<PointerType>(I.getType())) {
if (PT->getAddressSpace() == DXIL::kTGSMAddrSpace) {
if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(&I)) {
Value *Ptr = GEP->getPointerOperand();
// Allow inner constant GEP
if (isa<ConstantExpr>(Ptr) && isa<GEPOperator>(Ptr))
Ptr = cast<GEPOperator>(Ptr)->getPointerOperand();
if (!isa<GlobalVariable>(Ptr)) {
ValCtx.EmitInstrError(
&I, ValidationRule::InstrFailToResloveTGSMPointer);
}
} else if (BitCastInst *BCI = dyn_cast<BitCastInst>(&I)) {
Value *Ptr = BCI->getOperand(0);
// Allow inner constant GEP
if (isa<ConstantExpr>(Ptr) && isa<GEPOperator>(Ptr))
Ptr = cast<GEPOperator>(Ptr)->getPointerOperand();
if (!isa<GetElementPtrInst>(Ptr) && !isa<GlobalVariable>(Ptr)) {
ValCtx.EmitInstrError(
&I, ValidationRule::InstrFailToResloveTGSMPointer);
}
} else {
ValCtx.EmitInstrError(
&I, ValidationRule::InstrFailToResloveTGSMPointer);
}
}
}
}
ValidateControlFlowHint(*b, ValCtx);
}
ValidateMsIntrinsics(F, ValCtx, setMeshOutputCounts, getMeshPayload);
ValidateAsIntrinsics(F, ValCtx, dispatchMesh);
}
static void ValidateNodeInputRecord(Function *F, ValidationContext &ValCtx) {
// if there are no function props or LaunchType is Invalid, there is nothing
// to do here
if (!ValCtx.DxilMod.HasDxilFunctionProps(F))
return;
auto &props = ValCtx.DxilMod.GetDxilFunctionProps(F);
if (!props.IsNode())
return;
if (props.InputNodes.size() > 1) {
ValCtx.EmitFnFormatError(
F, ValidationRule::DeclMultipleNodeInputs,
{F->getName(), std::to_string(props.InputNodes.size())});
}
for (auto &input : props.InputNodes) {
if (!input.Flags.RecordTypeMatchesLaunchType(props.Node.LaunchType)) {
// We allow EmptyNodeInput here, as that may have been added implicitly
// if there was no input specified
if (input.Flags.IsEmptyInput())
continue;
llvm::StringRef validInputs = "";
switch (props.Node.LaunchType) {
case DXIL::NodeLaunchType::Broadcasting:
validInputs = "{RW}DispatchNodeInputRecord";
break;
case DXIL::NodeLaunchType::Coalescing:
validInputs = "{RW}GroupNodeInputRecords or EmptyNodeInput";
break;
case DXIL::NodeLaunchType::Thread:
validInputs = "{RW}ThreadNodeInputRecord";
break;
default:
llvm_unreachable("invalid launch type");
}
ValCtx.EmitFnFormatError(
F, ValidationRule::DeclNodeLaunchInputType,
{ShaderModel::GetNodeLaunchTypeName(props.Node.LaunchType),
F->getName(), validInputs});
}
}
}
static void ValidateFunction(Function &F, ValidationContext &ValCtx) {
if (F.isDeclaration()) {
ValidateExternalFunction(&F, ValCtx);
if (F.isIntrinsic() || IsDxilFunction(&F))
return;
} else {
DXIL::ShaderKind shaderKind = DXIL::ShaderKind::Library;
bool isShader = ValCtx.DxilMod.HasDxilFunctionProps(&F);
unsigned numUDTShaderArgs = 0;
if (isShader) {
shaderKind = ValCtx.DxilMod.GetDxilFunctionProps(&F).shaderKind;
switch (shaderKind) {
case DXIL::ShaderKind::AnyHit:
case DXIL::ShaderKind::ClosestHit:
numUDTShaderArgs = 2;
break;
case DXIL::ShaderKind::Miss:
case DXIL::ShaderKind::Callable:
numUDTShaderArgs = 1;
break;
case DXIL::ShaderKind::Compute: {
DxilModule &DM = ValCtx.DxilMod;
if (DM.HasDxilEntryProps(&F)) {
DxilEntryProps &entryProps = DM.GetDxilEntryProps(&F);
// Check that compute has no node metadata
if (entryProps.props.IsNode()) {
ValCtx.EmitFnFormatError(&F, ValidationRule::MetaComputeWithNode,
{F.getName()});
}
}
break;
}
default:
break;
}
} else {
isShader = ValCtx.DxilMod.IsPatchConstantShader(&F);
}
// Entry function should not have parameter.
if (isShader && 0 == numUDTShaderArgs && !F.arg_empty())
ValCtx.EmitFnFormatError(&F, ValidationRule::FlowFunctionCall,
{F.getName()});
// Shader functions should return void.
if (isShader && !F.getReturnType()->isVoidTy())
ValCtx.EmitFnFormatError(&F, ValidationRule::DeclShaderReturnVoid,
{F.getName()});
auto ArgFormatError = [&](Function &F, Argument &arg, ValidationRule rule) {
if (arg.hasName())
ValCtx.EmitFnFormatError(&F, rule, {arg.getName().str(), F.getName()});
else
ValCtx.EmitFnFormatError(&F, rule,
{std::to_string(arg.getArgNo()), F.getName()});
};
unsigned numArgs = 0;
for (auto &arg : F.args()) {
Type *argTy = arg.getType();
if (argTy->isPointerTy())
argTy = argTy->getPointerElementType();
numArgs++;
if (numUDTShaderArgs) {
if (arg.getArgNo() >= numUDTShaderArgs) {
ArgFormatError(F, arg, ValidationRule::DeclExtraArgs);
} else if (!argTy->isStructTy()) {
switch (shaderKind) {
case DXIL::ShaderKind::Callable:
ArgFormatError(F, arg, ValidationRule::DeclParamStruct);
break;
default:
ArgFormatError(F, arg,
arg.getArgNo() == 0
? ValidationRule::DeclPayloadStruct
: ValidationRule::DeclAttrStruct);
}
}
continue;
}
while (argTy->isArrayTy()) {
argTy = argTy->getArrayElementType();
}
if (argTy->isStructTy() && !ValCtx.isLibProfile) {
ArgFormatError(F, arg, ValidationRule::DeclFnFlattenParam);
break;
}
}
if (numArgs < numUDTShaderArgs && shaderKind != DXIL::ShaderKind::Node) {
StringRef argType[2] = {
shaderKind == DXIL::ShaderKind::Callable ? "params" : "payload",
"attributes"};
for (unsigned i = numArgs; i < numUDTShaderArgs; i++) {
ValCtx.EmitFnFormatError(
&F, ValidationRule::DeclShaderMissingArg,
{ShaderModel::GetKindName(shaderKind), F.getName(), argType[i]});
}
}
if (ValCtx.DxilMod.HasDxilFunctionProps(&F) &&
ValCtx.DxilMod.GetDxilFunctionProps(&F).IsNode()) {
ValidateNodeInputRecord(&F, ValCtx);
}
ValidateFunctionBody(&F, ValCtx);
}
// function params & return type must not contain resources
if (dxilutil::ContainsHLSLObjectType(F.getReturnType())) {
ValCtx.EmitFnFormatError(&F, ValidationRule::DeclResourceInFnSig,
{F.getName()});
return;
}
for (auto &Arg : F.args()) {
if (dxilutil::ContainsHLSLObjectType(Arg.getType())) {
ValCtx.EmitFnFormatError(&F, ValidationRule::DeclResourceInFnSig,
{F.getName()});
return;
}
}
// TODO: Remove attribute for lib?
if (!ValCtx.isLibProfile)
ValidateFunctionAttribute(&F, ValCtx);
if (F.hasMetadata()) {
ValidateFunctionMetadata(&F, ValCtx);
}
}
static void ValidateGlobalVariable(GlobalVariable &GV,
ValidationContext &ValCtx) {
bool isInternalGV =
dxilutil::IsStaticGlobal(&GV) || dxilutil::IsSharedMemoryGlobal(&GV);
if (ValCtx.isLibProfile) {
auto isCBufferGlobal =
[&](const std::vector<std::unique_ptr<DxilCBuffer>> &ResTab) -> bool {
for (auto &Res : ResTab)
if (Res->GetGlobalSymbol() == &GV)
return true;
return false;
};
auto isResourceGlobal =
[&](const std::vector<std::unique_ptr<DxilResource>> &ResTab) -> bool {
for (auto &Res : ResTab)
if (Res->GetGlobalSymbol() == &GV)
return true;
return false;
};
auto isSamplerGlobal =
[&](const std::vector<std::unique_ptr<DxilSampler>> &ResTab) -> bool {
for (auto &Res : ResTab)
if (Res->GetGlobalSymbol() == &GV)
return true;
return false;
};
bool isRes = isCBufferGlobal(ValCtx.DxilMod.GetCBuffers());
isRes |= isResourceGlobal(ValCtx.DxilMod.GetUAVs());
isRes |= isResourceGlobal(ValCtx.DxilMod.GetSRVs());
isRes |= isSamplerGlobal(ValCtx.DxilMod.GetSamplers());
isInternalGV |= isRes;
// Allow special dx.ishelper for library target
if (GV.getName().compare(DXIL::kDxIsHelperGlobalName) == 0) {
Type *Ty = GV.getType()->getPointerElementType();
if (Ty->isIntegerTy() && Ty->getScalarSizeInBits() == 32) {
isInternalGV = true;
}
}
}
if (!isInternalGV) {
if (!GV.user_empty()) {
bool hasInstructionUser = false;
for (User *U : GV.users()) {
if (isa<Instruction>(U)) {
hasInstructionUser = true;
break;
}
}
// External GV should not have instruction user.
if (hasInstructionUser) {
ValCtx.EmitGlobalVariableFormatError(
&GV, ValidationRule::DeclNotUsedExternal, {GV.getName()});
}
}
// Must have metadata description for each variable.
} else {
// Internal GV must have user.
if (GV.user_empty()) {
ValCtx.EmitGlobalVariableFormatError(
&GV, ValidationRule::DeclUsedInternal, {GV.getName()});
}
// Validate type for internal globals.
if (dxilutil::IsStaticGlobal(&GV) || dxilutil::IsSharedMemoryGlobal(&GV)) {
Type *Ty = GV.getType()->getPointerElementType();
ValidateType(Ty, ValCtx);
}
}
}
static void CollectFixAddressAccess(Value *V,
std::vector<StoreInst *> &fixAddrTGSMList) {
for (User *U : V->users()) {
if (GEPOperator *GEP = dyn_cast<GEPOperator>(U)) {
if (isa<ConstantExpr>(GEP) || GEP->hasAllConstantIndices()) {
CollectFixAddressAccess(GEP, fixAddrTGSMList);
}
} else if (StoreInst *SI = dyn_cast<StoreInst>(U)) {
fixAddrTGSMList.emplace_back(SI);
}
}
}
static bool IsDivergent(Value *V) {
// TODO: return correct result.
return false;
}
static void ValidateTGSMRaceCondition(std::vector<StoreInst *> &fixAddrTGSMList,
ValidationContext &ValCtx) {
std::unordered_set<Function *> fixAddrTGSMFuncSet;
for (StoreInst *I : fixAddrTGSMList) {
BasicBlock *BB = I->getParent();
fixAddrTGSMFuncSet.insert(BB->getParent());
}
for (auto &F : ValCtx.DxilMod.GetModule()->functions()) {
if (F.isDeclaration() || !fixAddrTGSMFuncSet.count(&F))
continue;
PostDominatorTree PDT;
PDT.runOnFunction(F);
BasicBlock *Entry = &F.getEntryBlock();
for (StoreInst *SI : fixAddrTGSMList) {
BasicBlock *BB = SI->getParent();
if (BB->getParent() == &F) {
if (PDT.dominates(BB, Entry)) {
if (IsDivergent(SI->getValueOperand()))
ValCtx.EmitInstrError(SI, ValidationRule::InstrTGSMRaceCond);
}
}
}
}
}
static void ValidateGlobalVariables(ValidationContext &ValCtx) {
DxilModule &M = ValCtx.DxilMod;
const ShaderModel *pSM = ValCtx.DxilMod.GetShaderModel();
bool TGSMAllowed = pSM->IsCS() || pSM->IsAS() || pSM->IsMS() || pSM->IsLib();
unsigned TGSMSize = 0;
std::vector<StoreInst *> fixAddrTGSMList;
const DataLayout &DL = M.GetModule()->getDataLayout();
for (GlobalVariable &GV : M.GetModule()->globals()) {
ValidateGlobalVariable(GV, ValCtx);
if (GV.getType()->getAddressSpace() == DXIL::kTGSMAddrSpace) {
if (!TGSMAllowed)
ValCtx.EmitGlobalVariableFormatError(
&GV, ValidationRule::SmTGSMUnsupported,
{std::string("in Shader Model ") + M.GetShaderModel()->GetName()});
// Lib targets need to check the usage to know if it's allowed
if (pSM->IsLib()) {
for (User *U : GV.users()) {
if (Instruction *I = dyn_cast<Instruction>(U)) {
llvm::Function *F = I->getParent()->getParent();
if (M.HasDxilEntryProps(F)) {
DxilFunctionProps &props = M.GetDxilEntryProps(F).props;
if (!props.IsCS() && !props.IsAS() && !props.IsMS() &&
!props.IsNode()) {
ValCtx.EmitInstrFormatError(I,
ValidationRule::SmTGSMUnsupported,
{"from non-compute entry points"});
}
}
}
}
}
TGSMSize += DL.getTypeAllocSize(GV.getType()->getElementType());
CollectFixAddressAccess(&GV, fixAddrTGSMList);
}
}
ValidationRule Rule = ValidationRule::SmMaxTGSMSize;
unsigned MaxSize = DXIL::kMaxTGSMSize;
if (M.GetShaderModel()->IsMS()) {
Rule = ValidationRule::SmMaxMSSMSize;
MaxSize = DXIL::kMaxMSSMSize;
}
if (TGSMSize > MaxSize) {
Module::global_iterator GI = M.GetModule()->global_end();
GlobalVariable *GV = &*GI;
do {
GI--;
GV = &*GI;
if (GV->getType()->getAddressSpace() == hlsl::DXIL::kTGSMAddrSpace)
break;
} while (GI != M.GetModule()->global_begin());
ValCtx.EmitGlobalVariableFormatError(
GV, Rule, {std::to_string(TGSMSize), std::to_string(MaxSize)});
}
if (!fixAddrTGSMList.empty()) {
ValidateTGSMRaceCondition(fixAddrTGSMList, ValCtx);
}
}
static void ValidateValidatorVersion(ValidationContext &ValCtx) {
Module *pModule = &ValCtx.M;
NamedMDNode *pNode = pModule->getNamedMetadata("dx.valver");
if (pNode == nullptr) {
return;
}
if (pNode->getNumOperands() == 1) {
MDTuple *pVerValues = dyn_cast<MDTuple>(pNode->getOperand(0));
if (pVerValues != nullptr && pVerValues->getNumOperands() == 2) {
uint64_t majorVer, minorVer;
if (GetNodeOperandAsInt(ValCtx, pVerValues, 0, &majorVer) &&
GetNodeOperandAsInt(ValCtx, pVerValues, 1, &minorVer)) {
unsigned curMajor, curMinor;
GetValidationVersion(&curMajor, &curMinor);
// This will need to be updated as major/minor versions evolve,
// depending on the degree of compat across versions.
if (majorVer == curMajor && minorVer <= curMinor) {
return;
} else {
ValCtx.EmitFormatError(
ValidationRule::MetaVersionSupported,
{"Validator", std::to_string(majorVer), std::to_string(minorVer),
std::to_string(curMajor), std::to_string(curMinor)});
return;
}
}
}
}
ValCtx.EmitError(ValidationRule::MetaWellFormed);
}
static void ValidateDxilVersion(ValidationContext &ValCtx) {
Module *pModule = &ValCtx.M;
NamedMDNode *pNode = pModule->getNamedMetadata("dx.version");
if (pNode == nullptr) {
return;
}
if (pNode->getNumOperands() == 1) {
MDTuple *pVerValues = dyn_cast<MDTuple>(pNode->getOperand(0));
if (pVerValues != nullptr && pVerValues->getNumOperands() == 2) {
uint64_t majorVer, minorVer;
if (GetNodeOperandAsInt(ValCtx, pVerValues, 0, &majorVer) &&
GetNodeOperandAsInt(ValCtx, pVerValues, 1, &minorVer)) {
// This will need to be updated as dxil major/minor versions evolve,
// depending on the degree of compat across versions.
if ((majorVer == DXIL::kDxilMajor && minorVer <= DXIL::kDxilMinor) &&
(majorVer == ValCtx.m_DxilMajor &&
minorVer == ValCtx.m_DxilMinor)) {
return;
} else {
ValCtx.EmitFormatError(ValidationRule::MetaVersionSupported,
{"Dxil", std::to_string(majorVer),
std::to_string(minorVer),
std::to_string(DXIL::kDxilMajor),
std::to_string(DXIL::kDxilMinor)});
return;
}
}
}
}
// ValCtx.EmitMetaError(pNode, ValidationRule::MetaWellFormed);
ValCtx.EmitError(ValidationRule::MetaWellFormed);
}
static void ValidateTypeAnnotation(ValidationContext &ValCtx) {
if (ValCtx.m_DxilMajor == 1 && ValCtx.m_DxilMinor >= 2) {
Module *pModule = &ValCtx.M;
NamedMDNode *TA = pModule->getNamedMetadata("dx.typeAnnotations");
if (TA == nullptr)
return;
for (unsigned i = 0, end = TA->getNumOperands(); i < end; ++i) {
MDTuple *TANode = dyn_cast<MDTuple>(TA->getOperand(i));
if (TANode->getNumOperands() < 3) {
ValCtx.EmitMetaError(TANode, ValidationRule::MetaWellFormed);
return;
}
ConstantInt *tag = mdconst::extract<ConstantInt>(TANode->getOperand(0));
uint64_t tagValue = tag->getZExtValue();
if (tagValue != DxilMDHelper::kDxilTypeSystemStructTag &&
tagValue != DxilMDHelper::kDxilTypeSystemFunctionTag) {
ValCtx.EmitMetaError(TANode, ValidationRule::MetaWellFormed);
return;
}
}
}
}
static void ValidateBitcode(ValidationContext &ValCtx) {
std::string diagStr;
raw_string_ostream diagStream(diagStr);
if (llvm::verifyModule(ValCtx.M, &diagStream)) {
ValCtx.EmitError(ValidationRule::BitcodeValid);
dxilutil::EmitErrorOnContext(ValCtx.M.getContext(), diagStream.str());
}
}
static void ValidateWaveSize(ValidationContext &ValCtx,
const hlsl::ShaderModel *SM, Module *pModule) {
// Don't do this validation if the shader is non-compute
if (!(SM->IsCS() || SM->IsLib()))
return;
NamedMDNode *EPs = pModule->getNamedMetadata("dx.entryPoints");
if (!EPs)
return;
for (unsigned i = 0, end = EPs->getNumOperands(); i < end; ++i) {
MDTuple *EPNodeRef = dyn_cast<MDTuple>(EPs->getOperand(i));
if (EPNodeRef->getNumOperands() < 5) {
ValCtx.EmitMetaError(EPNodeRef, ValidationRule::MetaWellFormed);
return;
}
// get access to the digit that represents the metadata number that
// would store entry properties
const llvm::MDOperand &mOp =
EPNodeRef->getOperand(EPNodeRef->getNumOperands() - 1);
// the final operand to the entry points tuple should be a tuple.
if (mOp == nullptr || (mOp.get())->getMetadataID() != Metadata::MDTupleKind)
continue;
// get access to the node that stores entry properties
MDTuple *EPropNode = dyn_cast<MDTuple>(
EPNodeRef->getOperand(EPNodeRef->getNumOperands() - 1));
// find any incompatible tags inside the entry properties
// increment j by 2 to only analyze tags, not values
bool foundTag = false;
for (unsigned j = 0, end2 = EPropNode->getNumOperands(); j < end2; j += 2) {
const MDOperand &propertyTagOp = EPropNode->getOperand(j);
// note, we are only looking for tags, which will be a constant
// integer
DXASSERT(!(propertyTagOp == nullptr ||
(propertyTagOp.get())->getMetadataID() !=
Metadata::ConstantAsMetadataKind),
"tag operand should be a constant integer.");
ConstantInt *tag = mdconst::extract<ConstantInt>(propertyTagOp);
uint64_t tagValue = tag->getZExtValue();
// legacy wavesize is only supported between 6.6 and 6.7, so we
// should fail if we find the ranged wave size metadata tag
if (tagValue == DxilMDHelper::kDxilRangedWaveSizeTag) {
// if this tag is already present in the
// current entry point, emit an error
if (foundTag) {
ValCtx.EmitFormatError(ValidationRule::SmWaveSizeTagDuplicate, {});
return;
}
foundTag = true;
if (SM->IsSM66Plus() && !SM->IsSM68Plus()) {
ValCtx.EmitFormatError(ValidationRule::SmWaveSizeRangeNeedsSM68Plus,
{});
return;
}
// get the metadata that contains the
// parameters to the wavesize attribute
MDTuple *WaveTuple = dyn_cast<MDTuple>(EPropNode->getOperand(j + 1));
if (WaveTuple->getNumOperands() != 3) {
ValCtx.EmitFormatError(
ValidationRule::SmWaveSizeRangeExpectsThreeParams, {});
return;
}
for (int k = 0; k < 3; k++) {
const MDOperand ¶m = WaveTuple->getOperand(k);
if (param->getMetadataID() != Metadata::ConstantAsMetadataKind) {
ValCtx.EmitFormatError(
ValidationRule::SmWaveSizeNeedsConstantOperands, {});
return;
}
}
} else if (tagValue == DxilMDHelper::kDxilWaveSizeTag) {
// if this tag is already present in the
// current entry point, emit an error
if (foundTag) {
ValCtx.EmitFormatError(ValidationRule::SmWaveSizeTagDuplicate, {});
return;
}
foundTag = true;
MDTuple *WaveTuple = dyn_cast<MDTuple>(EPropNode->getOperand(j + 1));
if (WaveTuple->getNumOperands() != 1) {
ValCtx.EmitFormatError(ValidationRule::SmWaveSizeExpectsOneParam, {});
return;
}
const MDOperand ¶m = WaveTuple->getOperand(0);
if (param->getMetadataID() != Metadata::ConstantAsMetadataKind) {
ValCtx.EmitFormatError(
ValidationRule::SmWaveSizeNeedsConstantOperands, {});
return;
}
// if the shader model is anything but 6.6 or 6.7, then we do not
// expect to encounter the legacy wave size tag.
if (!(SM->IsSM66Plus() && !SM->IsSM68Plus())) {
ValCtx.EmitFormatError(ValidationRule::SmWaveSizeNeedsSM66or67, {});
return;
}
}
}
}
}
static void ValidateMetadata(ValidationContext &ValCtx) {
ValidateValidatorVersion(ValCtx);
ValidateDxilVersion(ValCtx);
Module *pModule = &ValCtx.M;
const std::string &target = pModule->getTargetTriple();
if (target != "dxil-ms-dx") {
ValCtx.EmitFormatError(ValidationRule::MetaTarget, {target});
}
// The llvm.dbg.(cu/contents/defines/mainFileName/arg) named metadata nodes
// are only available in debug modules, not in the validated ones.
// llvm.bitsets is also disallowed.
//
// These are verified in lib/IR/Verifier.cpp.
StringMap<bool> llvmNamedMeta;
llvmNamedMeta["llvm.ident"];
llvmNamedMeta["llvm.module.flags"];
for (auto &NamedMetaNode : pModule->named_metadata()) {
if (!DxilModule::IsKnownNamedMetaData(NamedMetaNode)) {
StringRef name = NamedMetaNode.getName();
if (!name.startswith_lower("llvm.")) {
ValCtx.EmitFormatError(ValidationRule::MetaKnown, {name.str()});
} else {
if (llvmNamedMeta.count(name) == 0) {
ValCtx.EmitFormatError(ValidationRule::MetaKnown, {name.str()});
}
}
}
}
const hlsl::ShaderModel *SM = ValCtx.DxilMod.GetShaderModel();
// validate that any wavesize tags don't appear outside their expected shader
// models. Validate only 1 tag exists per entry point.
ValidateWaveSize(ValCtx, SM, pModule);
if (!SM->IsValidForDxil()) {
ValCtx.EmitFormatError(ValidationRule::SmName,
{ValCtx.DxilMod.GetShaderModel()->GetName()});
}
if (SM->GetMajor() == 6) {
// Make sure DxilVersion matches the shader model.
unsigned SMDxilMajor, SMDxilMinor;
SM->GetDxilVersion(SMDxilMajor, SMDxilMinor);
if (ValCtx.m_DxilMajor != SMDxilMajor ||
ValCtx.m_DxilMinor != SMDxilMinor) {
ValCtx.EmitFormatError(
ValidationRule::SmDxilVersion,
{std::to_string(SMDxilMajor), std::to_string(SMDxilMinor)});
}
}
ValidateTypeAnnotation(ValCtx);
}
static void ValidateResourceOverlap(
hlsl::DxilResourceBase &res,
SpacesAllocator<unsigned, DxilResourceBase> &spaceAllocator,
ValidationContext &ValCtx) {
unsigned base = res.GetLowerBound();
if (ValCtx.isLibProfile && !res.IsAllocated()) {
// Skip unallocated resource for library.
return;
}
unsigned size = res.GetRangeSize();
unsigned space = res.GetSpaceID();
auto &allocator = spaceAllocator.Get(space);
unsigned end = base + size - 1;
// unbounded
if (end < base)
end = size;
const DxilResourceBase *conflictRes = allocator.Insert(&res, base, end);
if (conflictRes) {
ValCtx.EmitFormatError(
ValidationRule::SmResourceRangeOverlap,
{ValCtx.GetResourceName(&res), std::to_string(base),
std::to_string(size), std::to_string(conflictRes->GetLowerBound()),
std::to_string(conflictRes->GetRangeSize()), std::to_string(space)});
}
}
static void ValidateResource(hlsl::DxilResource &res,
ValidationContext &ValCtx) {
switch (res.GetKind()) {
case DXIL::ResourceKind::RawBuffer:
case DXIL::ResourceKind::TypedBuffer:
case DXIL::ResourceKind::TBuffer:
case DXIL::ResourceKind::StructuredBuffer:
case DXIL::ResourceKind::Texture1D:
case DXIL::ResourceKind::Texture1DArray:
case DXIL::ResourceKind::Texture2D:
case DXIL::ResourceKind::Texture2DArray:
case DXIL::ResourceKind::Texture3D:
case DXIL::ResourceKind::TextureCube:
case DXIL::ResourceKind::TextureCubeArray:
if (res.GetSampleCount() > 0) {
ValCtx.EmitResourceError(&res, ValidationRule::SmSampleCountOnlyOn2DMS);
}
break;
case DXIL::ResourceKind::Texture2DMS:
case DXIL::ResourceKind::Texture2DMSArray:
break;
case DXIL::ResourceKind::RTAccelerationStructure:
// TODO: check profile.
break;
case DXIL::ResourceKind::FeedbackTexture2D:
case DXIL::ResourceKind::FeedbackTexture2DArray:
if (res.GetSamplerFeedbackType() >= DXIL::SamplerFeedbackType::LastEntry)
ValCtx.EmitResourceError(&res,
ValidationRule::SmInvalidSamplerFeedbackType);
break;
default:
ValCtx.EmitResourceError(&res, ValidationRule::SmInvalidResourceKind);
break;
}
switch (res.GetCompType().GetKind()) {
case DXIL::ComponentType::F32:
case DXIL::ComponentType::SNormF32:
case DXIL::ComponentType::UNormF32:
case DXIL::ComponentType::F64:
case DXIL::ComponentType::I32:
case DXIL::ComponentType::I64:
case DXIL::ComponentType::U32:
case DXIL::ComponentType::U64:
case DXIL::ComponentType::F16:
case DXIL::ComponentType::I16:
case DXIL::ComponentType::U16:
break;
default:
if (!res.IsStructuredBuffer() && !res.IsRawBuffer() &&
!res.IsFeedbackTexture())
ValCtx.EmitResourceError(&res, ValidationRule::SmInvalidResourceCompType);
break;
}
if (res.IsStructuredBuffer()) {
unsigned stride = res.GetElementStride();
bool alignedTo4Bytes = (stride & 3) == 0;
if (!alignedTo4Bytes && ValCtx.M.GetDxilModule().GetUseMinPrecision()) {
ValCtx.EmitResourceFormatError(
&res, ValidationRule::MetaStructBufAlignment,
{std::to_string(4), std::to_string(stride)});
}
if (stride > DXIL::kMaxStructBufferStride) {
ValCtx.EmitResourceFormatError(
&res, ValidationRule::MetaStructBufAlignmentOutOfBound,
{std::to_string(DXIL::kMaxStructBufferStride),
std::to_string(stride)});
}
}
if (res.IsAnyTexture() || res.IsTypedBuffer()) {
Type *RetTy = res.GetRetType();
unsigned size =
ValCtx.DxilMod.GetModule()->getDataLayout().getTypeAllocSize(RetTy);
if (size > 4 * 4) {
ValCtx.EmitResourceError(&res, ValidationRule::MetaTextureType);
}
}
}
static void CollectCBufferRanges(
DxilStructAnnotation *annotation,
SpanAllocator<unsigned, DxilFieldAnnotation> &constAllocator, unsigned base,
DxilTypeSystem &typeSys, StringRef cbName, ValidationContext &ValCtx) {
DXASSERT(((base + 15) & ~(0xf)) == base,
"otherwise, base for struct is not aligned");
unsigned cbSize = annotation->GetCBufferSize();
const StructType *ST = annotation->GetStructType();
for (int i = annotation->GetNumFields() - 1; i >= 0; i--) {
DxilFieldAnnotation &fieldAnnotation = annotation->GetFieldAnnotation(i);
Type *EltTy = ST->getElementType(i);
unsigned offset = fieldAnnotation.GetCBufferOffset();
unsigned EltSize = dxilutil::GetLegacyCBufferFieldElementSize(
fieldAnnotation, EltTy, typeSys);
bool bOutOfBound = false;
if (!EltTy->isAggregateType()) {
bOutOfBound = (offset + EltSize) > cbSize;
if (!bOutOfBound) {
if (constAllocator.Insert(&fieldAnnotation, base + offset,
base + offset + EltSize - 1)) {
ValCtx.EmitFormatError(ValidationRule::SmCBufferOffsetOverlap,
{cbName, std::to_string(base + offset)});
}
}
} else if (isa<ArrayType>(EltTy)) {
if (((offset + 15) & ~(0xf)) != offset) {
ValCtx.EmitFormatError(ValidationRule::SmCBufferArrayOffsetAlignment,
{cbName, std::to_string(offset)});
continue;
}
unsigned arrayCount = 1;
while (isa<ArrayType>(EltTy)) {
arrayCount *= EltTy->getArrayNumElements();
EltTy = EltTy->getArrayElementType();
}
DxilStructAnnotation *EltAnnotation = nullptr;
if (StructType *EltST = dyn_cast<StructType>(EltTy))
EltAnnotation = typeSys.GetStructAnnotation(EltST);
unsigned alignedEltSize = ((EltSize + 15) & ~(0xf));
unsigned arraySize = ((arrayCount - 1) * alignedEltSize) + EltSize;
bOutOfBound = (offset + arraySize) > cbSize;
if (!bOutOfBound) {
// If we didn't care about gaps where elements could be placed with user
// offsets, we could: recurse once if EltAnnotation, then allocate the
// rest if arrayCount > 1
unsigned arrayBase = base + offset;
if (!EltAnnotation) {
if (EltSize > 0 &&
nullptr != constAllocator.Insert(&fieldAnnotation, arrayBase,
arrayBase + arraySize - 1)) {
ValCtx.EmitFormatError(ValidationRule::SmCBufferOffsetOverlap,
{cbName, std::to_string(arrayBase)});
}
} else {
for (unsigned idx = 0; idx < arrayCount; idx++) {
CollectCBufferRanges(EltAnnotation, constAllocator, arrayBase,
typeSys, cbName, ValCtx);
arrayBase += alignedEltSize;
}
}
}
} else {
StructType *EltST = cast<StructType>(EltTy);
unsigned structBase = base + offset;
bOutOfBound = (offset + EltSize) > cbSize;
if (!bOutOfBound) {
if (DxilStructAnnotation *EltAnnotation =
typeSys.GetStructAnnotation(EltST)) {
CollectCBufferRanges(EltAnnotation, constAllocator, structBase,
typeSys, cbName, ValCtx);
} else {
if (EltSize > 0 &&
nullptr != constAllocator.Insert(&fieldAnnotation, structBase,
structBase + EltSize - 1)) {
ValCtx.EmitFormatError(ValidationRule::SmCBufferOffsetOverlap,
{cbName, std::to_string(structBase)});
}
}
}
}
if (bOutOfBound) {
ValCtx.EmitFormatError(ValidationRule::SmCBufferElementOverflow,
{cbName, std::to_string(base + offset)});
}
}
}
static void ValidateCBuffer(DxilCBuffer &cb, ValidationContext &ValCtx) {
Type *Ty = cb.GetHLSLType()->getPointerElementType();
if (cb.GetRangeSize() != 1 || Ty->isArrayTy()) {
Ty = Ty->getArrayElementType();
}
if (!isa<StructType>(Ty)) {
ValCtx.EmitResourceError(&cb,
ValidationRule::SmCBufferTemplateTypeMustBeStruct);
return;
}
if (cb.GetSize() > (DXIL::kMaxCBufferSize << 4)) {
ValCtx.EmitResourceFormatError(&cb, ValidationRule::SmCBufferSize,
{std::to_string(cb.GetSize())});
return;
}
StructType *ST = cast<StructType>(Ty);
DxilTypeSystem &typeSys = ValCtx.DxilMod.GetTypeSystem();
DxilStructAnnotation *annotation = typeSys.GetStructAnnotation(ST);
if (!annotation)
return;
// Collect constant ranges.
std::vector<std::pair<unsigned, unsigned>> constRanges;
SpanAllocator<unsigned, DxilFieldAnnotation> constAllocator(
0,
// 4096 * 16 bytes.
DXIL::kMaxCBufferSize << 4);
CollectCBufferRanges(annotation, constAllocator, 0, typeSys,
ValCtx.GetResourceName(&cb), ValCtx);
}
static void ValidateResources(ValidationContext &ValCtx) {
const vector<unique_ptr<DxilResource>> &uavs = ValCtx.DxilMod.GetUAVs();
SpacesAllocator<unsigned, DxilResourceBase> uavAllocator;
for (auto &uav : uavs) {
if (uav->IsROV()) {
if (!ValCtx.DxilMod.GetShaderModel()->IsPS() && !ValCtx.isLibProfile) {
ValCtx.EmitResourceError(uav.get(), ValidationRule::SmROVOnlyInPS);
}
}
switch (uav->GetKind()) {
case DXIL::ResourceKind::TextureCube:
case DXIL::ResourceKind::TextureCubeArray:
ValCtx.EmitResourceError(uav.get(),
ValidationRule::SmInvalidTextureKindOnUAV);
break;
default:
break;
}
if (uav->HasCounter() && !uav->IsStructuredBuffer()) {
ValCtx.EmitResourceError(uav.get(),
ValidationRule::SmCounterOnlyOnStructBuf);
}
if (uav->HasCounter() && uav->IsGloballyCoherent())
ValCtx.EmitResourceFormatError(uav.get(),
ValidationRule::MetaGlcNotOnAppendConsume,
{ValCtx.GetResourceName(uav.get())});
ValidateResource(*uav, ValCtx);
ValidateResourceOverlap(*uav, uavAllocator, ValCtx);
}
SpacesAllocator<unsigned, DxilResourceBase> srvAllocator;
const vector<unique_ptr<DxilResource>> &srvs = ValCtx.DxilMod.GetSRVs();
for (auto &srv : srvs) {
ValidateResource(*srv, ValCtx);
ValidateResourceOverlap(*srv, srvAllocator, ValCtx);
}
hlsl::DxilResourceBase *pNonDense;
if (!AreDxilResourcesDense(&ValCtx.M, &pNonDense)) {
ValCtx.EmitResourceError(pNonDense, ValidationRule::MetaDenseResIDs);
}
SpacesAllocator<unsigned, DxilResourceBase> samplerAllocator;
for (auto &sampler : ValCtx.DxilMod.GetSamplers()) {
if (sampler->GetSamplerKind() == DXIL::SamplerKind::Invalid) {
ValCtx.EmitResourceError(sampler.get(),
ValidationRule::MetaValidSamplerMode);
}
ValidateResourceOverlap(*sampler, samplerAllocator, ValCtx);
}
SpacesAllocator<unsigned, DxilResourceBase> cbufferAllocator;
for (auto &cbuffer : ValCtx.DxilMod.GetCBuffers()) {
ValidateCBuffer(*cbuffer, ValCtx);
ValidateResourceOverlap(*cbuffer, cbufferAllocator, ValCtx);
}
}
static void ValidateShaderFlags(ValidationContext &ValCtx) {
ShaderFlags calcFlags;
ValCtx.DxilMod.CollectShaderFlagsForModule(calcFlags);
// Special case for validator version prior to 1.8.
// If DXR 1.1 flag is set, but our computed flags do not have this set, then
// this is due to prior versions setting the flag based on DXR 1.1 subobjects,
// which are gone by this point. Set the flag and the rest should match.
unsigned valMajor, valMinor;
ValCtx.DxilMod.GetValidatorVersion(valMajor, valMinor);
if (DXIL::CompareVersions(valMajor, valMinor, 1, 5) >= 0 &&
DXIL::CompareVersions(valMajor, valMinor, 1, 8) < 0 &&
ValCtx.DxilMod.m_ShaderFlags.GetRaytracingTier1_1() &&
!calcFlags.GetRaytracingTier1_1()) {
calcFlags.SetRaytracingTier1_1(true);
}
const uint64_t mask = ShaderFlags::GetShaderFlagsRawForCollection();
uint64_t declaredFlagsRaw = ValCtx.DxilMod.m_ShaderFlags.GetShaderFlagsRaw();
uint64_t calcFlagsRaw = calcFlags.GetShaderFlagsRaw();
declaredFlagsRaw &= mask;
calcFlagsRaw &= mask;
if (declaredFlagsRaw == calcFlagsRaw) {
return;
}
ValCtx.EmitError(ValidationRule::MetaFlagsUsage);
dxilutil::EmitNoteOnContext(ValCtx.M.getContext(),
Twine("Flags declared=") +
Twine(declaredFlagsRaw) + Twine(", actual=") +
Twine(calcFlagsRaw));
}
static void ValidateSignatureElement(DxilSignatureElement &SE,
ValidationContext &ValCtx) {
DXIL::SemanticKind semanticKind = SE.GetSemantic()->GetKind();
CompType::Kind compKind = SE.GetCompType().GetKind();
DXIL::InterpolationMode Mode = SE.GetInterpolationMode()->GetKind();
StringRef Name = SE.GetName();
if (Name.size() < 1 || Name.size() > 64) {
ValCtx.EmitSignatureError(&SE, ValidationRule::MetaSemanticLen);
}
if (semanticKind > DXIL::SemanticKind::Arbitrary &&
semanticKind < DXIL::SemanticKind::Invalid) {
if (semanticKind != Semantic::GetByName(SE.GetName())->GetKind()) {
ValCtx.EmitFormatError(ValidationRule::MetaSemaKindMatchesName,
{SE.GetName(), SE.GetSemantic()->GetName()});
}
}
unsigned compWidth = 0;
bool compFloat = false;
bool compInt = false;
bool compBool = false;
switch (compKind) {
case CompType::Kind::U64:
compWidth = 64;
compInt = true;
break;
case CompType::Kind::I64:
compWidth = 64;
compInt = true;
break;
// These should be translated for signatures:
// case CompType::Kind::PackedS8x32:
// case CompType::Kind::PackedU8x32:
case CompType::Kind::U32:
compWidth = 32;
compInt = true;
break;
case CompType::Kind::I32:
compWidth = 32;
compInt = true;
break;
case CompType::Kind::U16:
compWidth = 16;
compInt = true;
break;
case CompType::Kind::I16:
compWidth = 16;
compInt = true;
break;
case CompType::Kind::I1:
compWidth = 1;
compBool = true;
break;
case CompType::Kind::F64:
compWidth = 64;
compFloat = true;
break;
case CompType::Kind::F32:
compWidth = 32;
compFloat = true;
break;
case CompType::Kind::F16:
compWidth = 16;
compFloat = true;
break;
case CompType::Kind::SNormF64:
compWidth = 64;
compFloat = true;
break;
case CompType::Kind::SNormF32:
compWidth = 32;
compFloat = true;
break;
case CompType::Kind::SNormF16:
compWidth = 16;
compFloat = true;
break;
case CompType::Kind::UNormF64:
compWidth = 64;
compFloat = true;
break;
case CompType::Kind::UNormF32:
compWidth = 32;
compFloat = true;
break;
case CompType::Kind::UNormF16:
compWidth = 16;
compFloat = true;
break;
case CompType::Kind::Invalid:
default:
ValCtx.EmitFormatError(ValidationRule::MetaSignatureCompType,
{SE.GetName()});
break;
}
if (compInt || compBool) {
switch (Mode) {
case DXIL::InterpolationMode::Linear:
case DXIL::InterpolationMode::LinearCentroid:
case DXIL::InterpolationMode::LinearNoperspective:
case DXIL::InterpolationMode::LinearNoperspectiveCentroid:
case DXIL::InterpolationMode::LinearSample:
case DXIL::InterpolationMode::LinearNoperspectiveSample: {
ValCtx.EmitFormatError(ValidationRule::MetaIntegerInterpMode,
{SE.GetName()});
} break;
default:
break;
}
}
// Elements that should not appear in the Dxil signature:
bool bAllowedInSig = true;
bool bShouldBeAllocated = true;
switch (SE.GetInterpretation()) {
case DXIL::SemanticInterpretationKind::NA:
case DXIL::SemanticInterpretationKind::NotInSig:
case DXIL::SemanticInterpretationKind::Invalid:
bAllowedInSig = false;
LLVM_FALLTHROUGH;
case DXIL::SemanticInterpretationKind::NotPacked:
case DXIL::SemanticInterpretationKind::Shadow:
bShouldBeAllocated = false;
break;
default:
break;
}
const char *inputOutput = nullptr;
if (SE.IsInput())
inputOutput = "Input";
else if (SE.IsOutput())
inputOutput = "Output";
else
inputOutput = "PatchConstant";
if (!bAllowedInSig) {
ValCtx.EmitFormatError(ValidationRule::SmSemantic,
{SE.GetName(),
ValCtx.DxilMod.GetShaderModel()->GetKindName(),
inputOutput});
} else if (bShouldBeAllocated && !SE.IsAllocated()) {
ValCtx.EmitFormatError(ValidationRule::MetaSemanticShouldBeAllocated,
{inputOutput, SE.GetName()});
} else if (!bShouldBeAllocated && SE.IsAllocated()) {
ValCtx.EmitFormatError(ValidationRule::MetaSemanticShouldNotBeAllocated,
{inputOutput, SE.GetName()});
}
bool bIsClipCull = false;
bool bIsTessfactor = false;
bool bIsBarycentric = false;
switch (semanticKind) {
case DXIL::SemanticKind::Depth:
case DXIL::SemanticKind::DepthGreaterEqual:
case DXIL::SemanticKind::DepthLessEqual:
if (!compFloat || compWidth > 32 || SE.GetCols() != 1) {
ValCtx.EmitFormatError(ValidationRule::MetaSemanticCompType,
{SE.GetSemantic()->GetName(), "float"});
}
break;
case DXIL::SemanticKind::Coverage:
DXASSERT(!SE.IsInput() || !bAllowedInSig,
"else internal inconsistency between semantic interpretation "
"table and validation code");
LLVM_FALLTHROUGH;
case DXIL::SemanticKind::InnerCoverage:
case DXIL::SemanticKind::OutputControlPointID:
if (compKind != CompType::Kind::U32 || SE.GetCols() != 1) {
ValCtx.EmitFormatError(ValidationRule::MetaSemanticCompType,
{SE.GetSemantic()->GetName(), "uint"});
}
break;
case DXIL::SemanticKind::Position:
if (!compFloat || compWidth > 32 || SE.GetCols() != 4) {
ValCtx.EmitFormatError(ValidationRule::MetaSemanticCompType,
{SE.GetSemantic()->GetName(), "float4"});
}
break;
case DXIL::SemanticKind::Target:
if (compWidth > 32) {
ValCtx.EmitFormatError(ValidationRule::MetaSemanticCompType,
{SE.GetSemantic()->GetName(), "float/int/uint"});
}
break;
case DXIL::SemanticKind::ClipDistance:
case DXIL::SemanticKind::CullDistance:
bIsClipCull = true;
if (!compFloat || compWidth > 32) {
ValCtx.EmitFormatError(ValidationRule::MetaSemanticCompType,
{SE.GetSemantic()->GetName(), "float"});
}
// NOTE: clip cull distance size is checked at ValidateSignature.
break;
case DXIL::SemanticKind::IsFrontFace: {
if (!(compInt && compWidth == 32) || SE.GetCols() != 1) {
ValCtx.EmitFormatError(ValidationRule::MetaSemanticCompType,
{SE.GetSemantic()->GetName(), "uint"});
}
} break;
case DXIL::SemanticKind::RenderTargetArrayIndex:
case DXIL::SemanticKind::ViewPortArrayIndex:
case DXIL::SemanticKind::VertexID:
case DXIL::SemanticKind::PrimitiveID:
case DXIL::SemanticKind::InstanceID:
case DXIL::SemanticKind::GSInstanceID:
case DXIL::SemanticKind::SampleIndex:
case DXIL::SemanticKind::StencilRef:
case DXIL::SemanticKind::ShadingRate:
if ((compKind != CompType::Kind::U32 && compKind != CompType::Kind::U16) ||
SE.GetCols() != 1) {
ValCtx.EmitFormatError(ValidationRule::MetaSemanticCompType,
{SE.GetSemantic()->GetName(), "uint"});
}
break;
case DXIL::SemanticKind::CullPrimitive: {
if (!(compBool && compWidth == 1) || SE.GetCols() != 1) {
ValCtx.EmitFormatError(ValidationRule::MetaSemanticCompType,
{SE.GetSemantic()->GetName(), "bool"});
}
} break;
case DXIL::SemanticKind::TessFactor:
case DXIL::SemanticKind::InsideTessFactor:
// NOTE: the size check is at CheckPatchConstantSemantic.
bIsTessfactor = true;
if (!compFloat || compWidth > 32) {
ValCtx.EmitFormatError(ValidationRule::MetaSemanticCompType,
{SE.GetSemantic()->GetName(), "float"});
}
break;
case DXIL::SemanticKind::Arbitrary:
break;
case DXIL::SemanticKind::DomainLocation:
case DXIL::SemanticKind::Invalid:
DXASSERT(!bAllowedInSig, "else internal inconsistency between semantic "
"interpretation table and validation code");
break;
case DXIL::SemanticKind::Barycentrics:
bIsBarycentric = true;
if (!compFloat || compWidth > 32) {
ValCtx.EmitFormatError(ValidationRule::MetaSemanticCompType,
{SE.GetSemantic()->GetName(), "float"});
}
if (Mode != InterpolationMode::Kind::Linear &&
Mode != InterpolationMode::Kind::LinearCentroid &&
Mode != InterpolationMode::Kind::LinearNoperspective &&
Mode != InterpolationMode::Kind::LinearNoperspectiveCentroid &&
Mode != InterpolationMode::Kind::LinearNoperspectiveSample &&
Mode != InterpolationMode::Kind::LinearSample) {
ValCtx.EmitSignatureError(&SE,
ValidationRule::MetaBarycentricsInterpolation);
}
if (SE.GetCols() != 3) {
ValCtx.EmitSignatureError(&SE, ValidationRule::MetaBarycentricsFloat3);
}
break;
default:
ValCtx.EmitSignatureError(&SE, ValidationRule::MetaSemaKindValid);
break;
}
if (ValCtx.DxilMod.GetShaderModel()->IsGS() && SE.IsOutput()) {
if (SE.GetOutputStream() >= DXIL::kNumOutputStreams) {
ValCtx.EmitFormatError(ValidationRule::SmStreamIndexRange,
{std::to_string(SE.GetOutputStream()),
std::to_string(DXIL::kNumOutputStreams - 1)});
}
} else {
if (SE.GetOutputStream() > 0) {
ValCtx.EmitFormatError(ValidationRule::SmStreamIndexRange,
{std::to_string(SE.GetOutputStream()), "0"});
}
}
if (ValCtx.DxilMod.GetShaderModel()->IsGS()) {
if (SE.GetOutputStream() != 0) {
if (ValCtx.DxilMod.GetStreamPrimitiveTopology() !=
DXIL::PrimitiveTopology::PointList) {
ValCtx.EmitSignatureError(&SE,
ValidationRule::SmMultiStreamMustBePoint);
}
}
}
if (semanticKind == DXIL::SemanticKind::Target) {
// Verify packed row == semantic index
unsigned row = SE.GetStartRow();
for (unsigned i : SE.GetSemanticIndexVec()) {
if (row != i) {
ValCtx.EmitSignatureError(&SE,
ValidationRule::SmPSTargetIndexMatchesRow);
}
++row;
}
// Verify packed col is 0
if (SE.GetStartCol() != 0) {
ValCtx.EmitSignatureError(&SE, ValidationRule::SmPSTargetCol0);
}
// Verify max row used < 8
if (SE.GetStartRow() + SE.GetRows() > 8) {
ValCtx.EmitFormatError(ValidationRule::MetaSemanticIndexMax,
{"SV_Target", "7"});
}
} else if (bAllowedInSig && semanticKind != DXIL::SemanticKind::Arbitrary) {
if (bIsBarycentric) {
if (SE.GetSemanticStartIndex() > 1) {
ValCtx.EmitFormatError(ValidationRule::MetaSemanticIndexMax,
{SE.GetSemantic()->GetName(), "1"});
}
} else if (!bIsClipCull && SE.GetSemanticStartIndex() > 0) {
ValCtx.EmitFormatError(ValidationRule::MetaSemanticIndexMax,
{SE.GetSemantic()->GetName(), "0"});
}
// Maximum rows is 1 for system values other than Target
// with the exception of tessfactors, which are validated in
// CheckPatchConstantSemantic and ClipDistance/CullDistance, which have
// other custom constraints.
if (!bIsTessfactor && !bIsClipCull && SE.GetRows() > 1) {
ValCtx.EmitSignatureError(&SE, ValidationRule::MetaSystemValueRows);
}
}
if (SE.GetCols() + (SE.IsAllocated() ? SE.GetStartCol() : 0) > 4) {
unsigned size = (SE.GetRows() - 1) * 4 + SE.GetCols();
ValCtx.EmitFormatError(ValidationRule::MetaSignatureOutOfRange,
{SE.GetName(), std::to_string(SE.GetStartRow()),
std::to_string(SE.GetStartCol()),
std::to_string(size)});
}
if (!SE.GetInterpolationMode()->IsValid()) {
ValCtx.EmitSignatureError(&SE, ValidationRule::MetaInterpModeValid);
}
}
static void ValidateSignatureOverlap(DxilSignatureElement &E,
unsigned maxScalars,
DxilSignatureAllocator &allocator,
ValidationContext &ValCtx) {
// Skip entries that are not or should not be allocated. Validation occurs in
// ValidateSignatureElement.
if (!E.IsAllocated())
return;
switch (E.GetInterpretation()) {
case DXIL::SemanticInterpretationKind::NA:
case DXIL::SemanticInterpretationKind::NotInSig:
case DXIL::SemanticInterpretationKind::Invalid:
case DXIL::SemanticInterpretationKind::NotPacked:
case DXIL::SemanticInterpretationKind::Shadow:
return;
default:
break;
}
DxilPackElement PE(&E, allocator.UseMinPrecision());
DxilSignatureAllocator::ConflictType conflict =
allocator.DetectRowConflict(&PE, E.GetStartRow());
if (conflict == DxilSignatureAllocator::kNoConflict ||
conflict == DxilSignatureAllocator::kInsufficientFreeComponents)
conflict =
allocator.DetectColConflict(&PE, E.GetStartRow(), E.GetStartCol());
switch (conflict) {
case DxilSignatureAllocator::kNoConflict:
allocator.PlaceElement(&PE, E.GetStartRow(), E.GetStartCol());
break;
case DxilSignatureAllocator::kConflictsWithIndexed:
ValCtx.EmitFormatError(ValidationRule::MetaSignatureIndexConflict,
{E.GetName(), std::to_string(E.GetStartRow()),
std::to_string(E.GetStartCol()),
std::to_string(E.GetRows()),
std::to_string(E.GetCols())});
break;
case DxilSignatureAllocator::kConflictsWithIndexedTessFactor:
ValCtx.EmitFormatError(ValidationRule::MetaSignatureIndexConflict,
{E.GetName(), std::to_string(E.GetStartRow()),
std::to_string(E.GetStartCol()),
std::to_string(E.GetRows()),
std::to_string(E.GetCols())});
break;
case DxilSignatureAllocator::kConflictsWithInterpolationMode:
ValCtx.EmitFormatError(ValidationRule::MetaInterpModeInOneRow,
{E.GetName(), std::to_string(E.GetStartRow()),
std::to_string(E.GetStartCol()),
std::to_string(E.GetRows()),
std::to_string(E.GetCols())});
break;
case DxilSignatureAllocator::kInsufficientFreeComponents:
DXASSERT(false, "otherwise, conflict not translated");
break;
case DxilSignatureAllocator::kOverlapElement:
ValCtx.EmitFormatError(ValidationRule::MetaSignatureOverlap,
{E.GetName(), std::to_string(E.GetStartRow()),
std::to_string(E.GetStartCol()),
std::to_string(E.GetRows()),
std::to_string(E.GetCols())});
break;
case DxilSignatureAllocator::kIllegalComponentOrder:
ValCtx.EmitFormatError(ValidationRule::MetaSignatureIllegalComponentOrder,
{E.GetName(), std::to_string(E.GetStartRow()),
std::to_string(E.GetStartCol()),
std::to_string(E.GetRows()),
std::to_string(E.GetCols())});
break;
case DxilSignatureAllocator::kConflictFit:
ValCtx.EmitFormatError(ValidationRule::MetaSignatureOutOfRange,
{E.GetName(), std::to_string(E.GetStartRow()),
std::to_string(E.GetStartCol()),
std::to_string(E.GetRows()),
std::to_string(E.GetCols())});
break;
case DxilSignatureAllocator::kConflictDataWidth:
ValCtx.EmitFormatError(ValidationRule::MetaSignatureDataWidth,
{E.GetName(), std::to_string(E.GetStartRow()),
std::to_string(E.GetStartCol()),
std::to_string(E.GetRows()),
std::to_string(E.GetCols())});
break;
default:
DXASSERT(
false,
"otherwise, unrecognized conflict type from DxilSignatureAllocator");
}
}
static void ValidateSignature(ValidationContext &ValCtx, const DxilSignature &S,
EntryStatus &Status, unsigned maxScalars) {
DxilSignatureAllocator allocator[DXIL::kNumOutputStreams] = {
{32, ValCtx.DxilMod.GetUseMinPrecision()},
{32, ValCtx.DxilMod.GetUseMinPrecision()},
{32, ValCtx.DxilMod.GetUseMinPrecision()},
{32, ValCtx.DxilMod.GetUseMinPrecision()}};
unordered_set<unsigned> semanticUsageSet[DXIL::kNumOutputStreams];
StringMap<unordered_set<unsigned>> semanticIndexMap[DXIL::kNumOutputStreams];
unordered_set<unsigned> clipcullRowSet[DXIL::kNumOutputStreams];
unsigned clipcullComponents[DXIL::kNumOutputStreams] = {0, 0, 0, 0};
bool isOutput = S.IsOutput();
unsigned TargetMask = 0;
DXIL::SemanticKind DepthKind = DXIL::SemanticKind::Invalid;
const InterpolationMode *prevBaryInterpMode = nullptr;
unsigned numBarycentrics = 0;
for (auto &E : S.GetElements()) {
DXIL::SemanticKind semanticKind = E->GetSemantic()->GetKind();
ValidateSignatureElement(*E, ValCtx);
// Avoid OOB indexing on streamId.
unsigned streamId = E->GetOutputStream();
if (streamId >= DXIL::kNumOutputStreams || !isOutput ||
!ValCtx.DxilMod.GetShaderModel()->IsGS()) {
streamId = 0;
}
// Semantic index overlap check, keyed by name.
std::string nameUpper(E->GetName());
std::transform(nameUpper.begin(), nameUpper.end(), nameUpper.begin(),
::toupper);
unordered_set<unsigned> &semIdxSet = semanticIndexMap[streamId][nameUpper];
for (unsigned semIdx : E->GetSemanticIndexVec()) {
if (semIdxSet.count(semIdx) > 0) {
ValCtx.EmitFormatError(ValidationRule::MetaNoSemanticOverlap,
{E->GetName(), std::to_string(semIdx)});
return;
} else
semIdxSet.insert(semIdx);
}
// SV_Target has special rules
if (semanticKind == DXIL::SemanticKind::Target) {
// Validate target overlap
if (E->GetStartRow() + E->GetRows() <= 8) {
unsigned mask = ((1 << E->GetRows()) - 1) << E->GetStartRow();
if (TargetMask & mask) {
ValCtx.EmitFormatError(
ValidationRule::MetaNoSemanticOverlap,
{"SV_Target", std::to_string(E->GetStartRow())});
}
TargetMask = TargetMask | mask;
}
if (E->GetRows() > 1) {
ValCtx.EmitSignatureError(E.get(), ValidationRule::SmNoPSOutputIdx);
}
continue;
}
if (E->GetSemantic()->IsInvalid())
continue;
// validate system value semantic rules
switch (semanticKind) {
case DXIL::SemanticKind::Arbitrary:
break;
case DXIL::SemanticKind::ClipDistance:
case DXIL::SemanticKind::CullDistance:
// Validate max 8 components across 2 rows (registers)
for (unsigned rowIdx = 0; rowIdx < E->GetRows(); rowIdx++)
clipcullRowSet[streamId].insert(E->GetStartRow() + rowIdx);
if (clipcullRowSet[streamId].size() > 2) {
ValCtx.EmitSignatureError(E.get(), ValidationRule::MetaClipCullMaxRows);
}
clipcullComponents[streamId] += E->GetCols();
if (clipcullComponents[streamId] > 8) {
ValCtx.EmitSignatureError(E.get(),
ValidationRule::MetaClipCullMaxComponents);
}
break;
case DXIL::SemanticKind::Depth:
case DXIL::SemanticKind::DepthGreaterEqual:
case DXIL::SemanticKind::DepthLessEqual:
if (DepthKind != DXIL::SemanticKind::Invalid) {
ValCtx.EmitSignatureError(E.get(),
ValidationRule::SmPSMultipleDepthSemantic);
}
DepthKind = semanticKind;
break;
case DXIL::SemanticKind::Barycentrics: {
// There can only be up to two SV_Barycentrics
// with differeent perspective interpolation modes.
if (numBarycentrics++ > 1) {
ValCtx.EmitSignatureError(
E.get(), ValidationRule::MetaBarycentricsTwoPerspectives);
break;
}
const InterpolationMode *mode = E->GetInterpolationMode();
if (prevBaryInterpMode) {
if ((mode->IsAnyNoPerspective() &&
prevBaryInterpMode->IsAnyNoPerspective()) ||
(!mode->IsAnyNoPerspective() &&
!prevBaryInterpMode->IsAnyNoPerspective())) {
ValCtx.EmitSignatureError(
E.get(), ValidationRule::MetaBarycentricsTwoPerspectives);
}
}
prevBaryInterpMode = mode;
break;
}
default:
if (semanticUsageSet[streamId].count(
static_cast<unsigned>(semanticKind)) > 0) {
ValCtx.EmitFormatError(ValidationRule::MetaDuplicateSysValue,
{E->GetSemantic()->GetName()});
}
semanticUsageSet[streamId].insert(static_cast<unsigned>(semanticKind));
break;
}
// Packed element overlap check.
ValidateSignatureOverlap(*E.get(), maxScalars, allocator[streamId], ValCtx);
if (isOutput && semanticKind == DXIL::SemanticKind::Position) {
Status.hasOutputPosition[E->GetOutputStream()] = true;
}
}
if (Status.hasViewID && S.IsInput() &&
ValCtx.DxilMod.GetShaderModel()->GetKind() == DXIL::ShaderKind::Pixel) {
// Ensure sufficient space for ViewID:
DxilSignatureAllocator::DummyElement viewID;
viewID.rows = 1;
viewID.cols = 1;
viewID.kind = DXIL::SemanticKind::Arbitrary;
viewID.interpolation = DXIL::InterpolationMode::Constant;
viewID.interpretation = DXIL::SemanticInterpretationKind::SGV;
allocator[0].PackNext(&viewID, 0, 32);
if (!viewID.IsAllocated()) {
ValCtx.EmitError(ValidationRule::SmViewIDNeedsSlot);
}
}
}
static void ValidateNoInterpModeSignature(ValidationContext &ValCtx,
const DxilSignature &S) {
for (auto &E : S.GetElements()) {
if (!E->GetInterpolationMode()->IsUndefined()) {
ValCtx.EmitSignatureError(E.get(), ValidationRule::SmNoInterpMode);
}
}
}
static void ValidateConstantInterpModeSignature(ValidationContext &ValCtx,
const DxilSignature &S) {
for (auto &E : S.GetElements()) {
if (!E->GetInterpolationMode()->IsConstant()) {
ValCtx.EmitSignatureError(E.get(), ValidationRule::SmConstantInterpMode);
}
}
}
static void ValidateEntrySignatures(ValidationContext &ValCtx,
const DxilEntryProps &entryProps,
EntryStatus &Status, Function &F) {
const DxilFunctionProps &props = entryProps.props;
const DxilEntrySignature &S = entryProps.sig;
if (props.IsRay()) {
// No signatures allowed
if (!S.InputSignature.GetElements().empty() ||
!S.OutputSignature.GetElements().empty() ||
!S.PatchConstOrPrimSignature.GetElements().empty()) {
ValCtx.EmitFnFormatError(&F, ValidationRule::SmRayShaderSignatures,
{F.getName()});
}
// Validate payload/attribute/params sizes
unsigned payloadSize = 0;
unsigned attrSize = 0;
auto itPayload = F.arg_begin();
auto itAttr = itPayload;
if (itAttr != F.arg_end())
itAttr++;
DataLayout DL(F.getParent());
switch (props.shaderKind) {
case DXIL::ShaderKind::AnyHit:
case DXIL::ShaderKind::ClosestHit:
if (itAttr != F.arg_end()) {
Type *Ty = itAttr->getType();
if (Ty->isPointerTy())
Ty = Ty->getPointerElementType();
attrSize =
(unsigned)std::min(DL.getTypeAllocSize(Ty), (uint64_t)UINT_MAX);
}
LLVM_FALLTHROUGH;
case DXIL::ShaderKind::Miss:
case DXIL::ShaderKind::Callable:
if (itPayload != F.arg_end()) {
Type *Ty = itPayload->getType();
if (Ty->isPointerTy())
Ty = Ty->getPointerElementType();
payloadSize =
(unsigned)std::min(DL.getTypeAllocSize(Ty), (uint64_t)UINT_MAX);
}
break;
}
if (props.ShaderProps.Ray.payloadSizeInBytes < payloadSize) {
ValCtx.EmitFnFormatError(
&F, ValidationRule::SmRayShaderPayloadSize,
{F.getName(), props.IsCallable() ? "params" : "payload"});
}
if (props.ShaderProps.Ray.attributeSizeInBytes < attrSize) {
ValCtx.EmitFnFormatError(&F, ValidationRule::SmRayShaderPayloadSize,
{F.getName(), "attribute"});
}
return;
}
bool isPS = props.IsPS();
bool isVS = props.IsVS();
bool isGS = props.IsGS();
bool isCS = props.IsCS();
bool isMS = props.IsMS();
if (isPS) {
// PS output no interp mode.
ValidateNoInterpModeSignature(ValCtx, S.OutputSignature);
} else if (isVS) {
// VS input no interp mode.
ValidateNoInterpModeSignature(ValCtx, S.InputSignature);
}
if (isMS) {
// primitive output constant interp mode.
ValidateConstantInterpModeSignature(ValCtx, S.PatchConstOrPrimSignature);
} else {
// patch constant no interp mode.
ValidateNoInterpModeSignature(ValCtx, S.PatchConstOrPrimSignature);
}
unsigned maxInputScalars = DXIL::kMaxInputTotalScalars;
unsigned maxOutputScalars = 0;
unsigned maxPatchConstantScalars = 0;
switch (props.shaderKind) {
case DXIL::ShaderKind::Compute:
break;
case DXIL::ShaderKind::Vertex:
case DXIL::ShaderKind::Geometry:
case DXIL::ShaderKind::Pixel:
maxOutputScalars = DXIL::kMaxOutputTotalScalars;
break;
case DXIL::ShaderKind::Hull:
case DXIL::ShaderKind::Domain:
maxOutputScalars = DXIL::kMaxOutputTotalScalars;
maxPatchConstantScalars = DXIL::kMaxHSOutputPatchConstantTotalScalars;
break;
case DXIL::ShaderKind::Mesh:
maxOutputScalars = DXIL::kMaxOutputTotalScalars;
maxPatchConstantScalars = DXIL::kMaxOutputTotalScalars;
break;
case DXIL::ShaderKind::Amplification:
default:
break;
}
ValidateSignature(ValCtx, S.InputSignature, Status, maxInputScalars);
ValidateSignature(ValCtx, S.OutputSignature, Status, maxOutputScalars);
ValidateSignature(ValCtx, S.PatchConstOrPrimSignature, Status,
maxPatchConstantScalars);
if (isPS) {
// Gather execution information.
hlsl::PSExecutionInfo PSExec;
DxilSignatureElement *PosInterpSE = nullptr;
for (auto &E : S.InputSignature.GetElements()) {
if (E->GetKind() == DXIL::SemanticKind::SampleIndex) {
PSExec.SuperSampling = true;
continue;
}
const InterpolationMode *IM = E->GetInterpolationMode();
if (IM->IsLinearSample() || IM->IsLinearNoperspectiveSample()) {
PSExec.SuperSampling = true;
}
if (E->GetKind() == DXIL::SemanticKind::Position) {
PSExec.PositionInterpolationMode = IM;
PosInterpSE = E.get();
}
}
for (auto &E : S.OutputSignature.GetElements()) {
if (E->IsAnyDepth()) {
PSExec.OutputDepthKind = E->GetKind();
break;
}
}
if (!PSExec.SuperSampling &&
PSExec.OutputDepthKind != DXIL::SemanticKind::Invalid &&
PSExec.OutputDepthKind != DXIL::SemanticKind::Depth) {
if (PSExec.PositionInterpolationMode != nullptr) {
if (!PSExec.PositionInterpolationMode->IsUndefined() &&
!PSExec.PositionInterpolationMode
->IsLinearNoperspectiveCentroid() &&
!PSExec.PositionInterpolationMode->IsLinearNoperspectiveSample()) {
ValCtx.EmitFnFormatError(&F, ValidationRule::SmPSConsistentInterp,
{PosInterpSE->GetName()});
}
}
}
// Validate PS output semantic.
const DxilSignature &outputSig = S.OutputSignature;
for (auto &SE : outputSig.GetElements()) {
Semantic::Kind semanticKind = SE->GetSemantic()->GetKind();
switch (semanticKind) {
case Semantic::Kind::Target:
case Semantic::Kind::Coverage:
case Semantic::Kind::Depth:
case Semantic::Kind::DepthGreaterEqual:
case Semantic::Kind::DepthLessEqual:
case Semantic::Kind::StencilRef:
break;
default: {
ValCtx.EmitFnFormatError(&F, ValidationRule::SmPSOutputSemantic,
{SE->GetName()});
} break;
}
}
}
if (isGS) {
unsigned maxVertexCount = props.ShaderProps.GS.maxVertexCount;
unsigned outputScalarCount = 0;
const DxilSignature &outSig = S.OutputSignature;
for (auto &SE : outSig.GetElements()) {
outputScalarCount += SE->GetRows() * SE->GetCols();
}
unsigned totalOutputScalars = maxVertexCount * outputScalarCount;
if (totalOutputScalars > DXIL::kMaxGSOutputTotalScalars) {
ValCtx.EmitFnFormatError(
&F, ValidationRule::SmGSTotalOutputVertexDataRange,
{std::to_string(maxVertexCount), std::to_string(outputScalarCount),
std::to_string(totalOutputScalars),
std::to_string(DXIL::kMaxGSOutputTotalScalars)});
}
}
if (isCS) {
if (!S.InputSignature.GetElements().empty() ||
!S.OutputSignature.GetElements().empty() ||
!S.PatchConstOrPrimSignature.GetElements().empty()) {
ValCtx.EmitFnError(&F, ValidationRule::SmCSNoSignatures);
}
}
if (isMS) {
unsigned VertexSignatureRows = S.OutputSignature.GetRowCount();
if (VertexSignatureRows > DXIL::kMaxMSVSigRows) {
ValCtx.EmitFnFormatError(
&F, ValidationRule::SmMeshVSigRowCount,
{F.getName(), std::to_string(DXIL::kMaxMSVSigRows)});
}
unsigned PrimitiveSignatureRows = S.PatchConstOrPrimSignature.GetRowCount();
if (PrimitiveSignatureRows > DXIL::kMaxMSPSigRows) {
ValCtx.EmitFnFormatError(
&F, ValidationRule::SmMeshPSigRowCount,
{F.getName(), std::to_string(DXIL::kMaxMSPSigRows)});
}
if (VertexSignatureRows + PrimitiveSignatureRows >
DXIL::kMaxMSTotalSigRows) {
ValCtx.EmitFnFormatError(
&F, ValidationRule::SmMeshTotalSigRowCount,
{F.getName(), std::to_string(DXIL::kMaxMSTotalSigRows)});
}
const unsigned kScalarSizeForMSAttributes = 4;
#define ALIGN32(n) (((n) + 31) & ~31)
unsigned maxAlign32VertexCount =
ALIGN32(props.ShaderProps.MS.maxVertexCount);
unsigned maxAlign32PrimitiveCount =
ALIGN32(props.ShaderProps.MS.maxPrimitiveCount);
unsigned totalOutputScalars = 0;
for (auto &SE : S.OutputSignature.GetElements()) {
totalOutputScalars +=
SE->GetRows() * SE->GetCols() * maxAlign32VertexCount;
}
for (auto &SE : S.PatchConstOrPrimSignature.GetElements()) {
totalOutputScalars +=
SE->GetRows() * SE->GetCols() * maxAlign32PrimitiveCount;
}
if (totalOutputScalars * kScalarSizeForMSAttributes >
DXIL::kMaxMSOutputTotalBytes) {
ValCtx.EmitFnFormatError(
&F, ValidationRule::SmMeshShaderOutputSize,
{F.getName(), std::to_string(DXIL::kMaxMSOutputTotalBytes)});
}
unsigned totalInputOutputBytes =
totalOutputScalars * kScalarSizeForMSAttributes +
props.ShaderProps.MS.payloadSizeInBytes;
if (totalInputOutputBytes > DXIL::kMaxMSInputOutputTotalBytes) {
ValCtx.EmitFnFormatError(
&F, ValidationRule::SmMeshShaderInOutSize,
{F.getName(), std::to_string(DXIL::kMaxMSInputOutputTotalBytes)});
}
}
}
static void ValidateEntrySignatures(ValidationContext &ValCtx) {
DxilModule &DM = ValCtx.DxilMod;
if (ValCtx.isLibProfile) {
for (Function &F : DM.GetModule()->functions()) {
if (DM.HasDxilEntryProps(&F)) {
DxilEntryProps &entryProps = DM.GetDxilEntryProps(&F);
EntryStatus &Status = ValCtx.GetEntryStatus(&F);
ValidateEntrySignatures(ValCtx, entryProps, Status, F);
}
}
} else {
Function *Entry = DM.GetEntryFunction();
if (!DM.HasDxilEntryProps(Entry)) {
// must have props.
ValCtx.EmitFnError(Entry, ValidationRule::MetaNoEntryPropsForEntry);
return;
}
EntryStatus &Status = ValCtx.GetEntryStatus(Entry);
DxilEntryProps &entryProps = DM.GetDxilEntryProps(Entry);
ValidateEntrySignatures(ValCtx, entryProps, Status, *Entry);
}
}
// CompatibilityChecker is used to identify incompatibilities in an entry
// function and any functions called by that entry function.
struct CompatibilityChecker {
ValidationContext &ValCtx;
Function *EntryFn;
const DxilFunctionProps &props;
DXIL::ShaderKind shaderKind;
// These masks identify the potential conflict flags based on the entry
// function's shader kind and properties when either UsesDerivatives or
// RequiresGroup flags are set in ShaderCompatInfo.
uint32_t maskForDeriv = 0;
uint32_t maskForGroup = 0;
enum class ConflictKind : uint32_t {
Stage,
ShaderModel,
DerivLaunch,
DerivThreadGroupDim,
DerivInComputeShaderModel,
RequiresGroup,
};
enum class ConflictFlags : uint32_t {
Stage = 1 << (uint32_t)ConflictKind::Stage,
ShaderModel = 1 << (uint32_t)ConflictKind::ShaderModel,
DerivLaunch = 1 << (uint32_t)ConflictKind::DerivLaunch,
DerivThreadGroupDim = 1 << (uint32_t)ConflictKind::DerivThreadGroupDim,
DerivInComputeShaderModel =
1 << (uint32_t)ConflictKind::DerivInComputeShaderModel,
RequiresGroup = 1 << (uint32_t)ConflictKind::RequiresGroup,
};
CompatibilityChecker(ValidationContext &ValCtx, Function *EntryFn)
: ValCtx(ValCtx), EntryFn(EntryFn),
props(ValCtx.DxilMod.GetDxilEntryProps(EntryFn).props),
shaderKind(props.shaderKind) {
// Precompute potential incompatibilities based on shader stage, shader kind
// and entry attributes. These will turn into full conflicts if the entry
// point's shader flags indicate that they use relevant features.
if (!ValCtx.DxilMod.GetShaderModel()->IsSM66Plus() &&
(shaderKind == DXIL::ShaderKind::Mesh ||
shaderKind == DXIL::ShaderKind::Amplification ||
shaderKind == DXIL::ShaderKind::Compute)) {
maskForDeriv |=
static_cast<uint32_t>(ConflictFlags::DerivInComputeShaderModel);
} else if (shaderKind == DXIL::ShaderKind::Node) {
// Only broadcasting launch supports derivatives.
if (props.Node.LaunchType != DXIL::NodeLaunchType::Broadcasting)
maskForDeriv |= static_cast<uint32_t>(ConflictFlags::DerivLaunch);
// Thread launch node has no group.
if (props.Node.LaunchType == DXIL::NodeLaunchType::Thread)
maskForGroup |= static_cast<uint32_t>(ConflictFlags::RequiresGroup);
}
if (shaderKind == DXIL::ShaderKind::Mesh ||
shaderKind == DXIL::ShaderKind::Amplification ||
shaderKind == DXIL::ShaderKind::Compute ||
shaderKind == DXIL::ShaderKind::Node) {
// All compute-like stages
// Thread dimensions must be either 1D and X is multiple of 4, or 2D
// and X and Y must be multiples of 2.
if (props.numThreads[1] == 1 && props.numThreads[2] == 1) {
if ((props.numThreads[0] & 0x3) != 0)
maskForDeriv |=
static_cast<uint32_t>(ConflictFlags::DerivThreadGroupDim);
} else if ((props.numThreads[0] & 0x1) || (props.numThreads[1] & 0x1))
maskForDeriv |=
static_cast<uint32_t>(ConflictFlags::DerivThreadGroupDim);
} else {
// other stages have no group
maskForGroup |= static_cast<uint32_t>(ConflictFlags::RequiresGroup);
}
}
uint32_t
IdentifyConflict(const DxilModule::ShaderCompatInfo &compatInfo) const {
uint32_t conflictMask = 0;
// Compatibility check said this shader kind is not compatible.
if (0 == ((1 << (uint32_t)shaderKind) & compatInfo.mask))
conflictMask |= (uint32_t)ConflictFlags::Stage;
// Compatibility check said this shader model is not compatible.
if (DXIL::CompareVersions(ValCtx.DxilMod.GetShaderModel()->GetMajor(),
ValCtx.DxilMod.GetShaderModel()->GetMinor(),
compatInfo.minMajor, compatInfo.minMinor) < 0)
conflictMask |= (uint32_t)ConflictFlags::ShaderModel;
if (compatInfo.shaderFlags.GetUsesDerivatives())
conflictMask |= maskForDeriv;
if (compatInfo.shaderFlags.GetRequiresGroup())
conflictMask |= maskForGroup;
return conflictMask;
}
void Diagnose(Function *F, uint32_t conflictMask, ConflictKind conflict,
ValidationRule rule, ArrayRef<StringRef> args = {}) {
if (conflictMask & (1 << (unsigned)conflict))
ValCtx.EmitFnFormatError(F, rule, args);
}
void DiagnoseConflicts(Function *F, uint32_t conflictMask) {
// Emit a diagnostic indicating that either the entry function or a function
// called by the entry function contains a disallowed operation.
if (F == EntryFn)
ValCtx.EmitFnError(EntryFn, ValidationRule::SmIncompatibleOperation);
else
ValCtx.EmitFnError(EntryFn, ValidationRule::SmIncompatibleCallInEntry);
// Emit diagnostics for each conflict found in this function.
Diagnose(F, conflictMask, ConflictKind::Stage,
ValidationRule::SmIncompatibleStage,
{ShaderModel::GetKindName(props.shaderKind)});
Diagnose(F, conflictMask, ConflictKind::ShaderModel,
ValidationRule::SmIncompatibleShaderModel);
Diagnose(F, conflictMask, ConflictKind::DerivLaunch,
ValidationRule::SmIncompatibleDerivLaunch,
{GetLaunchTypeStr(props.Node.LaunchType)});
Diagnose(F, conflictMask, ConflictKind::DerivThreadGroupDim,
ValidationRule::SmIncompatibleThreadGroupDim,
{std::to_string(props.numThreads[0]),
std::to_string(props.numThreads[1]),
std::to_string(props.numThreads[2])});
Diagnose(F, conflictMask, ConflictKind::DerivInComputeShaderModel,
ValidationRule::SmIncompatibleDerivInComputeShaderModel);
Diagnose(F, conflictMask, ConflictKind::RequiresGroup,
ValidationRule::SmIncompatibleRequiresGroup);
}
// Visit function and all functions called by it.
// Emit diagnostics for incompatibilities found in a function when no
// functions called by that function introduced the conflict.
// In those cases, the called functions themselves will emit the diagnostic.
// Return conflict mask for this function.
uint32_t Visit(Function *F, uint32_t &remainingMask,
llvm::SmallPtrSet<Function *, 8> &visited, CallGraph &CG) {
// Recursive check looks for where a conflict is found and not present
// in functions called by the current function.
// - When a source is found, emit diagnostics and clear the conflict
// flags introduced by this function from the working mask so we don't
// report this conflict again.
// - When the remainingMask is 0, we are done.
if (remainingMask == 0)
return 0; // Nothing left to search for.
if (!visited.insert(F).second)
return 0; // Already visited.
const DxilModule::ShaderCompatInfo *compatInfo =
ValCtx.DxilMod.GetCompatInfoForFunction(F);
DXASSERT(compatInfo, "otherwise, compat info not computed in module");
if (!compatInfo)
return 0;
uint32_t maskForThisFunction = IdentifyConflict(*compatInfo);
uint32_t maskForCalls = 0;
if (CallGraphNode *CGNode = CG[F]) {
for (auto &Call : *CGNode) {
Function *called = Call.second->getFunction();
if (called->isDeclaration())
continue;
maskForCalls |= Visit(called, remainingMask, visited, CG);
if (remainingMask == 0)
return 0; // Nothing left to search for.
}
}
// Mask of incompatibilities introduced by this function.
uint32_t conflictsIntroduced =
remainingMask & maskForThisFunction & ~maskForCalls;
if (conflictsIntroduced) {
// This function introduces at least one conflict.
DiagnoseConflicts(F, conflictsIntroduced);
// Mask off diagnosed incompatibilities.
remainingMask &= ~conflictsIntroduced;
}
return maskForThisFunction;
}
void FindIncompatibleCall(const DxilModule::ShaderCompatInfo &compatInfo) {
uint32_t conflictMask = IdentifyConflict(compatInfo);
if (conflictMask == 0)
return;
CallGraph &CG = ValCtx.GetCallGraph();
llvm::SmallPtrSet<Function *, 8> visited;
Visit(EntryFn, conflictMask, visited, CG);
}
};
static void ValidateEntryCompatibility(ValidationContext &ValCtx) {
// Make sure functions called from each entry are compatible with that entry.
DxilModule &DM = ValCtx.DxilMod;
for (Function &F : DM.GetModule()->functions()) {
if (DM.HasDxilEntryProps(&F)) {
const DxilModule::ShaderCompatInfo *compatInfo =
DM.GetCompatInfoForFunction(&F);
DXASSERT(compatInfo, "otherwise, compat info not computed in module");
if (!compatInfo)
continue;
CompatibilityChecker checker(ValCtx, &F);
checker.FindIncompatibleCall(*compatInfo);
}
}
}
static void CheckPatchConstantSemantic(ValidationContext &ValCtx,
const DxilEntryProps &EntryProps,
EntryStatus &Status, Function *F) {
const DxilFunctionProps &props = EntryProps.props;
bool isHS = props.IsHS();
DXIL::TessellatorDomain domain =
isHS ? props.ShaderProps.HS.domain : props.ShaderProps.DS.domain;
const DxilSignature &patchConstantSig =
EntryProps.sig.PatchConstOrPrimSignature;
const unsigned kQuadEdgeSize = 4;
const unsigned kQuadInsideSize = 2;
const unsigned kQuadDomainLocSize = 2;
const unsigned kTriEdgeSize = 3;
const unsigned kTriInsideSize = 1;
const unsigned kTriDomainLocSize = 3;
const unsigned kIsolineEdgeSize = 2;
const unsigned kIsolineInsideSize = 0;
const unsigned kIsolineDomainLocSize = 3;
const char *domainName = "";
DXIL::SemanticKind kEdgeSemantic = DXIL::SemanticKind::TessFactor;
unsigned edgeSize = 0;
DXIL::SemanticKind kInsideSemantic = DXIL::SemanticKind::InsideTessFactor;
unsigned insideSize = 0;
Status.domainLocSize = 0;
switch (domain) {
case DXIL::TessellatorDomain::IsoLine:
domainName = "IsoLine";
edgeSize = kIsolineEdgeSize;
insideSize = kIsolineInsideSize;
Status.domainLocSize = kIsolineDomainLocSize;
break;
case DXIL::TessellatorDomain::Tri:
domainName = "Tri";
edgeSize = kTriEdgeSize;
insideSize = kTriInsideSize;
Status.domainLocSize = kTriDomainLocSize;
break;
case DXIL::TessellatorDomain::Quad:
domainName = "Quad";
edgeSize = kQuadEdgeSize;
insideSize = kQuadInsideSize;
Status.domainLocSize = kQuadDomainLocSize;
break;
default:
// Don't bother with other tests if domain is invalid
return;
}
bool bFoundEdgeSemantic = false;
bool bFoundInsideSemantic = false;
for (auto &SE : patchConstantSig.GetElements()) {
Semantic::Kind kind = SE->GetSemantic()->GetKind();
if (kind == kEdgeSemantic) {
bFoundEdgeSemantic = true;
if (SE->GetRows() != edgeSize || SE->GetCols() > 1) {
ValCtx.EmitFnFormatError(F, ValidationRule::SmTessFactorSizeMatchDomain,
{std::to_string(SE->GetRows()),
std::to_string(SE->GetCols()), domainName,
std::to_string(edgeSize)});
}
} else if (kind == kInsideSemantic) {
bFoundInsideSemantic = true;
if (SE->GetRows() != insideSize || SE->GetCols() > 1) {
ValCtx.EmitFnFormatError(
F, ValidationRule::SmInsideTessFactorSizeMatchDomain,
{std::to_string(SE->GetRows()), std::to_string(SE->GetCols()),
domainName, std::to_string(insideSize)});
}
}
}
if (isHS) {
if (!bFoundEdgeSemantic) {
ValCtx.EmitFnError(F, ValidationRule::SmTessFactorForDomain);
}
if (!bFoundInsideSemantic && domain != DXIL::TessellatorDomain::IsoLine) {
ValCtx.EmitFnError(F, ValidationRule::SmTessFactorForDomain);
}
}
}
static void ValidatePassThruHS(ValidationContext &ValCtx,
const DxilEntryProps &entryProps, Function *F) {
// Check pass thru HS.
if (F->isDeclaration()) {
const auto &props = entryProps.props;
if (props.IsHS()) {
const auto &HS = props.ShaderProps.HS;
if (HS.inputControlPoints < HS.outputControlPoints) {
ValCtx.EmitFnError(
F, ValidationRule::SmHullPassThruControlPointCountMatch);
}
// Check declared control point outputs storage amounts are ok to pass
// through (less output storage than input for control points).
const DxilSignature &outSig = entryProps.sig.OutputSignature;
unsigned totalOutputCPScalars = 0;
for (auto &SE : outSig.GetElements()) {
totalOutputCPScalars += SE->GetRows() * SE->GetCols();
}
if (totalOutputCPScalars * HS.outputControlPoints >
DXIL::kMaxHSOutputControlPointsTotalScalars) {
ValCtx.EmitFnError(F,
ValidationRule::SmOutputControlPointsTotalScalars);
// TODO: add number at end. need format fn error?
}
} else {
ValCtx.EmitFnError(F, ValidationRule::MetaEntryFunction);
}
}
}
// validate wave size (currently allowed only on CS and node shaders but might
// be supported on other shader types in the future)
static void ValidateWaveSize(ValidationContext &ValCtx,
const DxilEntryProps &entryProps, Function *F) {
const DxilFunctionProps &props = entryProps.props;
const hlsl::DxilWaveSize &waveSize = props.WaveSize;
switch (waveSize.Validate()) {
case hlsl::DxilWaveSize::ValidationResult::Success:
break;
case hlsl::DxilWaveSize::ValidationResult::InvalidMin:
ValCtx.EmitFnFormatError(F, ValidationRule::SmWaveSizeValue,
{"Min", std::to_string(waveSize.Min),
std::to_string(DXIL::kMinWaveSize),
std::to_string(DXIL::kMaxWaveSize)});
break;
case hlsl::DxilWaveSize::ValidationResult::InvalidMax:
ValCtx.EmitFnFormatError(F, ValidationRule::SmWaveSizeValue,
{"Max", std::to_string(waveSize.Max),
std::to_string(DXIL::kMinWaveSize),
std::to_string(DXIL::kMaxWaveSize)});
break;
case hlsl::DxilWaveSize::ValidationResult::InvalidPreferred:
ValCtx.EmitFnFormatError(F, ValidationRule::SmWaveSizeValue,
{"Preferred", std::to_string(waveSize.Preferred),
std::to_string(DXIL::kMinWaveSize),
std::to_string(DXIL::kMaxWaveSize)});
break;
case hlsl::DxilWaveSize::ValidationResult::MaxOrPreferredWhenUndefined:
ValCtx.EmitFnFormatError(
F, ValidationRule::SmWaveSizeAllZeroWhenUndefined,
{std::to_string(waveSize.Max), std::to_string(waveSize.Preferred)});
break;
case hlsl::DxilWaveSize::ValidationResult::MaxEqualsMin:
// This case is allowed because users may disable the ErrorDefault warning.
break;
case hlsl::DxilWaveSize::ValidationResult::PreferredWhenNoRange:
ValCtx.EmitFnFormatError(
F, ValidationRule::SmWaveSizeMaxAndPreferredZeroWhenNoRange,
{std::to_string(waveSize.Max), std::to_string(waveSize.Preferred)});
break;
case hlsl::DxilWaveSize::ValidationResult::MaxLessThanMin:
ValCtx.EmitFnFormatError(
F, ValidationRule::SmWaveSizeMaxGreaterThanMin,
{std::to_string(waveSize.Max), std::to_string(waveSize.Min)});
break;
case hlsl::DxilWaveSize::ValidationResult::PreferredOutOfRange:
ValCtx.EmitFnFormatError(F, ValidationRule::SmWaveSizePreferredInRange,
{std::to_string(waveSize.Preferred),
std::to_string(waveSize.Min),
std::to_string(waveSize.Max)});
break;
}
// Check shader model and kind.
if (waveSize.IsDefined()) {
if (!props.IsCS() && !props.IsNode()) {
ValCtx.EmitFnError(F, ValidationRule::SmWaveSizeOnComputeOrNode);
}
}
}
static void ValidateEntryProps(ValidationContext &ValCtx,
const DxilEntryProps &entryProps,
EntryStatus &Status, Function *F) {
const DxilFunctionProps &props = entryProps.props;
DXIL::ShaderKind ShaderType = props.shaderKind;
ValidateWaveSize(ValCtx, entryProps, F);
if (ShaderType == DXIL::ShaderKind::Compute || props.IsNode()) {
unsigned x = props.numThreads[0];
unsigned y = props.numThreads[1];
unsigned z = props.numThreads[2];
unsigned threadsInGroup = x * y * z;
if ((x < DXIL::kMinCSThreadGroupX) || (x > DXIL::kMaxCSThreadGroupX)) {
ValCtx.EmitFnFormatError(F, ValidationRule::SmThreadGroupChannelRange,
{"X", std::to_string(x),
std::to_string(DXIL::kMinCSThreadGroupX),
std::to_string(DXIL::kMaxCSThreadGroupX)});
}
if ((y < DXIL::kMinCSThreadGroupY) || (y > DXIL::kMaxCSThreadGroupY)) {
ValCtx.EmitFnFormatError(F, ValidationRule::SmThreadGroupChannelRange,
{"Y", std::to_string(y),
std::to_string(DXIL::kMinCSThreadGroupY),
std::to_string(DXIL::kMaxCSThreadGroupY)});
}
if ((z < DXIL::kMinCSThreadGroupZ) || (z > DXIL::kMaxCSThreadGroupZ)) {
ValCtx.EmitFnFormatError(F, ValidationRule::SmThreadGroupChannelRange,
{"Z", std::to_string(z),
std::to_string(DXIL::kMinCSThreadGroupZ),
std::to_string(DXIL::kMaxCSThreadGroupZ)});
}
if (threadsInGroup > DXIL::kMaxCSThreadsPerGroup) {
ValCtx.EmitFnFormatError(F, ValidationRule::SmMaxTheadGroup,
{std::to_string(threadsInGroup),
std::to_string(DXIL::kMaxCSThreadsPerGroup)});
}
// type of threadID, thread group ID take care by DXIL operation overload
// check.
} else if (ShaderType == DXIL::ShaderKind::Mesh) {
const auto &MS = props.ShaderProps.MS;
unsigned x = props.numThreads[0];
unsigned y = props.numThreads[1];
unsigned z = props.numThreads[2];
unsigned threadsInGroup = x * y * z;
if ((x < DXIL::kMinMSASThreadGroupX) || (x > DXIL::kMaxMSASThreadGroupX)) {
ValCtx.EmitFnFormatError(F, ValidationRule::SmThreadGroupChannelRange,
{"X", std::to_string(x),
std::to_string(DXIL::kMinMSASThreadGroupX),
std::to_string(DXIL::kMaxMSASThreadGroupX)});
}
if ((y < DXIL::kMinMSASThreadGroupY) || (y > DXIL::kMaxMSASThreadGroupY)) {
ValCtx.EmitFnFormatError(F, ValidationRule::SmThreadGroupChannelRange,
{"Y", std::to_string(y),
std::to_string(DXIL::kMinMSASThreadGroupY),
std::to_string(DXIL::kMaxMSASThreadGroupY)});
}
if ((z < DXIL::kMinMSASThreadGroupZ) || (z > DXIL::kMaxMSASThreadGroupZ)) {
ValCtx.EmitFnFormatError(F, ValidationRule::SmThreadGroupChannelRange,
{"Z", std::to_string(z),
std::to_string(DXIL::kMinMSASThreadGroupZ),
std::to_string(DXIL::kMaxMSASThreadGroupZ)});
}
if (threadsInGroup > DXIL::kMaxMSASThreadsPerGroup) {
ValCtx.EmitFnFormatError(F, ValidationRule::SmMaxTheadGroup,
{std::to_string(threadsInGroup),
std::to_string(DXIL::kMaxMSASThreadsPerGroup)});
}
// type of threadID, thread group ID take care by DXIL operation overload
// check.
unsigned maxVertexCount = MS.maxVertexCount;
if (maxVertexCount > DXIL::kMaxMSOutputVertexCount) {
ValCtx.EmitFnFormatError(F, ValidationRule::SmMeshShaderMaxVertexCount,
{std::to_string(DXIL::kMaxMSOutputVertexCount),
std::to_string(maxVertexCount)});
}
unsigned maxPrimitiveCount = MS.maxPrimitiveCount;
if (maxPrimitiveCount > DXIL::kMaxMSOutputPrimitiveCount) {
ValCtx.EmitFnFormatError(
F, ValidationRule::SmMeshShaderMaxPrimitiveCount,
{std::to_string(DXIL::kMaxMSOutputPrimitiveCount),
std::to_string(maxPrimitiveCount)});
}
} else if (ShaderType == DXIL::ShaderKind::Amplification) {
unsigned x = props.numThreads[0];
unsigned y = props.numThreads[1];
unsigned z = props.numThreads[2];
unsigned threadsInGroup = x * y * z;
if ((x < DXIL::kMinMSASThreadGroupX) || (x > DXIL::kMaxMSASThreadGroupX)) {
ValCtx.EmitFnFormatError(F, ValidationRule::SmThreadGroupChannelRange,
{"X", std::to_string(x),
std::to_string(DXIL::kMinMSASThreadGroupX),
std::to_string(DXIL::kMaxMSASThreadGroupX)});
}
if ((y < DXIL::kMinMSASThreadGroupY) || (y > DXIL::kMaxMSASThreadGroupY)) {
ValCtx.EmitFnFormatError(F, ValidationRule::SmThreadGroupChannelRange,
{"Y", std::to_string(y),
std::to_string(DXIL::kMinMSASThreadGroupY),
std::to_string(DXIL::kMaxMSASThreadGroupY)});
}
if ((z < DXIL::kMinMSASThreadGroupZ) || (z > DXIL::kMaxMSASThreadGroupZ)) {
ValCtx.EmitFnFormatError(F, ValidationRule::SmThreadGroupChannelRange,
{"Z", std::to_string(z),
std::to_string(DXIL::kMinMSASThreadGroupZ),
std::to_string(DXIL::kMaxMSASThreadGroupZ)});
}
if (threadsInGroup > DXIL::kMaxMSASThreadsPerGroup) {
ValCtx.EmitFnFormatError(F, ValidationRule::SmMaxTheadGroup,
{std::to_string(threadsInGroup),
std::to_string(DXIL::kMaxMSASThreadsPerGroup)});
}
// type of threadID, thread group ID take care by DXIL operation overload
// check.
} else if (ShaderType == DXIL::ShaderKind::Domain) {
const auto &DS = props.ShaderProps.DS;
DXIL::TessellatorDomain domain = DS.domain;
if (domain >= DXIL::TessellatorDomain::LastEntry)
domain = DXIL::TessellatorDomain::Undefined;
unsigned inputControlPointCount = DS.inputControlPoints;
if (inputControlPointCount > DXIL::kMaxIAPatchControlPointCount) {
ValCtx.EmitFnFormatError(
F, ValidationRule::SmDSInputControlPointCountRange,
{std::to_string(DXIL::kMaxIAPatchControlPointCount),
std::to_string(inputControlPointCount)});
}
if (domain == DXIL::TessellatorDomain::Undefined) {
ValCtx.EmitFnError(F, ValidationRule::SmValidDomain);
}
CheckPatchConstantSemantic(ValCtx, entryProps, Status, F);
} else if (ShaderType == DXIL::ShaderKind::Hull) {
const auto &HS = props.ShaderProps.HS;
DXIL::TessellatorDomain domain = HS.domain;
if (domain >= DXIL::TessellatorDomain::LastEntry)
domain = DXIL::TessellatorDomain::Undefined;
unsigned inputControlPointCount = HS.inputControlPoints;
if (inputControlPointCount == 0) {
const DxilSignature &inputSig = entryProps.sig.InputSignature;
if (!inputSig.GetElements().empty()) {
ValCtx.EmitFnError(F,
ValidationRule::SmZeroHSInputControlPointWithInput);
}
} else if (inputControlPointCount > DXIL::kMaxIAPatchControlPointCount) {
ValCtx.EmitFnFormatError(
F, ValidationRule::SmHSInputControlPointCountRange,
{std::to_string(DXIL::kMaxIAPatchControlPointCount),
std::to_string(inputControlPointCount)});
}
unsigned outputControlPointCount = HS.outputControlPoints;
if (outputControlPointCount < DXIL::kMinIAPatchControlPointCount ||
outputControlPointCount > DXIL::kMaxIAPatchControlPointCount) {
ValCtx.EmitFnFormatError(
F, ValidationRule::SmOutputControlPointCountRange,
{std::to_string(DXIL::kMinIAPatchControlPointCount),
std::to_string(DXIL::kMaxIAPatchControlPointCount),
std::to_string(outputControlPointCount)});
}
if (domain == DXIL::TessellatorDomain::Undefined) {
ValCtx.EmitFnError(F, ValidationRule::SmValidDomain);
}
DXIL::TessellatorPartitioning partition = HS.partition;
if (partition == DXIL::TessellatorPartitioning::Undefined) {
ValCtx.EmitFnError(F, ValidationRule::MetaTessellatorPartition);
}
DXIL::TessellatorOutputPrimitive tessOutputPrimitive = HS.outputPrimitive;
if (tessOutputPrimitive == DXIL::TessellatorOutputPrimitive::Undefined ||
tessOutputPrimitive == DXIL::TessellatorOutputPrimitive::LastEntry) {
ValCtx.EmitFnError(F, ValidationRule::MetaTessellatorOutputPrimitive);
}
float maxTessFactor = HS.maxTessFactor;
if (maxTessFactor < DXIL::kHSMaxTessFactorLowerBound ||
maxTessFactor > DXIL::kHSMaxTessFactorUpperBound) {
ValCtx.EmitFnFormatError(
F, ValidationRule::MetaMaxTessFactor,
{std::to_string(DXIL::kHSMaxTessFactorLowerBound),
std::to_string(DXIL::kHSMaxTessFactorUpperBound),
std::to_string(maxTessFactor)});
}
// Domain and OutPrimivtive match.
switch (domain) {
case DXIL::TessellatorDomain::IsoLine:
switch (tessOutputPrimitive) {
case DXIL::TessellatorOutputPrimitive::TriangleCW:
case DXIL::TessellatorOutputPrimitive::TriangleCCW:
ValCtx.EmitFnError(F, ValidationRule::SmIsoLineOutputPrimitiveMismatch);
break;
default:
break;
}
break;
case DXIL::TessellatorDomain::Tri:
switch (tessOutputPrimitive) {
case DXIL::TessellatorOutputPrimitive::Line:
ValCtx.EmitFnError(F, ValidationRule::SmTriOutputPrimitiveMismatch);
break;
default:
break;
}
break;
case DXIL::TessellatorDomain::Quad:
switch (tessOutputPrimitive) {
case DXIL::TessellatorOutputPrimitive::Line:
ValCtx.EmitFnError(F, ValidationRule::SmTriOutputPrimitiveMismatch);
break;
default:
break;
}
break;
default:
ValCtx.EmitFnError(F, ValidationRule::SmValidDomain);
break;
}
CheckPatchConstantSemantic(ValCtx, entryProps, Status, F);
} else if (ShaderType == DXIL::ShaderKind::Geometry) {
const auto &GS = props.ShaderProps.GS;
unsigned maxVertexCount = GS.maxVertexCount;
if (maxVertexCount > DXIL::kMaxGSOutputVertexCount) {
ValCtx.EmitFnFormatError(F, ValidationRule::SmGSOutputVertexCountRange,
{std::to_string(DXIL::kMaxGSOutputVertexCount),
std::to_string(maxVertexCount)});
}
unsigned instanceCount = GS.instanceCount;
if (instanceCount > DXIL::kMaxGSInstanceCount || instanceCount < 1) {
ValCtx.EmitFnFormatError(F, ValidationRule::SmGSInstanceCountRange,
{std::to_string(DXIL::kMaxGSInstanceCount),
std::to_string(instanceCount)});
}
DXIL::PrimitiveTopology topo = DXIL::PrimitiveTopology::Undefined;
bool bTopoMismatch = false;
for (size_t i = 0; i < _countof(GS.streamPrimitiveTopologies); ++i) {
if (GS.streamPrimitiveTopologies[i] !=
DXIL::PrimitiveTopology::Undefined) {
if (topo == DXIL::PrimitiveTopology::Undefined)
topo = GS.streamPrimitiveTopologies[i];
else if (topo != GS.streamPrimitiveTopologies[i]) {
bTopoMismatch = true;
break;
}
}
}
if (bTopoMismatch)
topo = DXIL::PrimitiveTopology::Undefined;
switch (topo) {
case DXIL::PrimitiveTopology::PointList:
case DXIL::PrimitiveTopology::LineStrip:
case DXIL::PrimitiveTopology::TriangleStrip:
break;
default: {
ValCtx.EmitFnError(F, ValidationRule::SmGSValidOutputPrimitiveTopology);
} break;
}
DXIL::InputPrimitive inputPrimitive = GS.inputPrimitive;
unsigned VertexCount = GetNumVertices(inputPrimitive);
if (VertexCount == 0 && inputPrimitive != DXIL::InputPrimitive::Undefined) {
ValCtx.EmitFnError(F, ValidationRule::SmGSValidInputPrimitive);
}
}
}
static void ValidateShaderState(ValidationContext &ValCtx) {
DxilModule &DM = ValCtx.DxilMod;
if (ValCtx.isLibProfile) {
for (Function &F : DM.GetModule()->functions()) {
if (DM.HasDxilEntryProps(&F)) {
DxilEntryProps &entryProps = DM.GetDxilEntryProps(&F);
EntryStatus &Status = ValCtx.GetEntryStatus(&F);
ValidateEntryProps(ValCtx, entryProps, Status, &F);
ValidatePassThruHS(ValCtx, entryProps, &F);
}
}
} else {
Function *Entry = DM.GetEntryFunction();
if (!DM.HasDxilEntryProps(Entry)) {
// must have props.
ValCtx.EmitFnError(Entry, ValidationRule::MetaNoEntryPropsForEntry);
return;
}
EntryStatus &Status = ValCtx.GetEntryStatus(Entry);
DxilEntryProps &entryProps = DM.GetDxilEntryProps(Entry);
ValidateEntryProps(ValCtx, entryProps, Status, Entry);
ValidatePassThruHS(ValCtx, entryProps, Entry);
}
}
static CallGraphNode *
CalculateCallDepth(CallGraphNode *node,
std::unordered_map<CallGraphNode *, unsigned> &depthMap,
std::unordered_set<CallGraphNode *> &callStack,
std::unordered_set<Function *> &funcSet) {
unsigned depth = callStack.size();
funcSet.insert(node->getFunction());
for (auto it = node->begin(), ei = node->end(); it != ei; it++) {
CallGraphNode *toNode = it->second;
if (callStack.insert(toNode).second == false) {
// Recursive.
return toNode;
}
if (depthMap[toNode] < depth)
depthMap[toNode] = depth;
if (CallGraphNode *N =
CalculateCallDepth(toNode, depthMap, callStack, funcSet)) {
// Recursive
return N;
}
callStack.erase(toNode);
}
return nullptr;
}
static void ValidateCallGraph(ValidationContext &ValCtx) {
// Build CallGraph.
CallGraph &CG = ValCtx.GetCallGraph();
std::unordered_map<CallGraphNode *, unsigned> depthMap;
std::unordered_set<CallGraphNode *> callStack;
CallGraphNode *entryNode = CG[ValCtx.DxilMod.GetEntryFunction()];
depthMap[entryNode] = 0;
if (CallGraphNode *N = CalculateCallDepth(entryNode, depthMap, callStack,
ValCtx.entryFuncCallSet))
ValCtx.EmitFnError(N->getFunction(), ValidationRule::FlowNoRecursion);
if (ValCtx.DxilMod.GetShaderModel()->IsHS()) {
CallGraphNode *patchConstantNode =
CG[ValCtx.DxilMod.GetPatchConstantFunction()];
depthMap[patchConstantNode] = 0;
callStack.clear();
if (CallGraphNode *N =
CalculateCallDepth(patchConstantNode, depthMap, callStack,
ValCtx.patchConstFuncCallSet))
ValCtx.EmitFnError(N->getFunction(), ValidationRule::FlowNoRecursion);
}
}
static void ValidateFlowControl(ValidationContext &ValCtx) {
bool reducible =
IsReducible(*ValCtx.DxilMod.GetModule(), IrreducibilityAction::Ignore);
if (!reducible) {
ValCtx.EmitError(ValidationRule::FlowReducible);
return;
}
ValidateCallGraph(ValCtx);
for (llvm::Function &F : ValCtx.DxilMod.GetModule()->functions()) {
if (F.isDeclaration())
continue;
DominatorTreeAnalysis DTA;
DominatorTree DT = DTA.run(F);
LoopInfo LI;
LI.Analyze(DT);
for (auto loopIt = LI.begin(); loopIt != LI.end(); loopIt++) {
Loop *loop = *loopIt;
SmallVector<BasicBlock *, 4> exitBlocks;
loop->getExitBlocks(exitBlocks);
if (exitBlocks.empty())
ValCtx.EmitFnError(&F, ValidationRule::FlowDeadLoop);
}
// validate that there is no use of a value that has been output-completed
// for this function.
hlsl::OP *hlslOP = ValCtx.DxilMod.GetOP();
for (auto &it : hlslOP->GetOpFuncList(DXIL::OpCode::OutputComplete)) {
Function *pF = it.second;
if (!pF)
continue;
// first, collect all the output complete calls that are not dominated
// by another OutputComplete call for the same handle value
llvm::SmallMapVector<Value *, llvm::SmallPtrSet<CallInst *, 4>, 4>
handleToCI;
for (User *U : pF->users()) {
// all OutputComplete calls are instructions, and call instructions,
// so there shouldn't need to be a null check.
CallInst *CI = cast<CallInst>(U);
// verify that the function that contains this instruction is the same
// function that the DominatorTree was built on.
if (&F != CI->getParent()->getParent())
continue;
DxilInst_OutputComplete OutputComplete(CI);
Value *completedRecord = OutputComplete.get_output();
auto vIt = handleToCI.find(completedRecord);
if (vIt == handleToCI.end()) {
llvm::SmallPtrSet<CallInst *, 4> s;
s.insert(CI);
handleToCI.insert(std::make_pair(completedRecord, s));
} else {
// if the handle is already in the map, make sure the map's set of
// output complete calls that dominate the handle and do not dominate
// each other gets updated if necessary
bool CI_is_dominated = false;
for (auto ocIt = vIt->second.begin(); ocIt != vIt->second.end();) {
// if our new OC CI dominates an OC instruction in the set,
// then replace the instruction in the set with the new OC CI.
if (DT.dominates(CI, *ocIt)) {
auto cur_it = ocIt++;
vIt->second.erase(*cur_it);
continue;
}
// Remember if our new CI gets dominated by any CI in the set.
if (DT.dominates(*ocIt, CI)) {
CI_is_dominated = true;
break;
}
ocIt++;
}
// if no CI in the set dominates our new CI,
// the new CI should be added to the set
if (!CI_is_dominated)
vIt->second.insert(CI);
}
}
for (auto handle_iter = handleToCI.begin(), e = handleToCI.end();
handle_iter != e; handle_iter++) {
for (auto user_itr = handle_iter->first->user_begin();
user_itr != handle_iter->first->user_end(); user_itr++) {
User *pU = *user_itr;
Instruction *useInstr = cast<Instruction>(pU);
if (useInstr) {
if (CallInst *CI = dyn_cast<CallInst>(useInstr)) {
// if the user is an output complete call that is in the set of
// OutputComplete calls not dominated by another OutputComplete
// call for the same handle value, no diagnostics need to be
// emitted.
if (handle_iter->second.count(CI) == 1)
continue;
}
// make sure any output complete call in the set
// that dominates this use gets its diagnostic emitted.
for (auto ocIt = handle_iter->second.begin();
ocIt != handle_iter->second.end(); ocIt++) {
Instruction *ocInstr = cast<Instruction>(*ocIt);
if (DT.dominates(ocInstr, useInstr)) {
ValCtx.EmitInstrError(
useInstr,
ValidationRule::InstrNodeRecordHandleUseAfterComplete);
ValCtx.EmitInstrNote(
*ocIt, "record handle invalidated by OutputComplete");
break;
}
}
}
}
}
}
}
// fxc has ERR_CONTINUE_INSIDE_SWITCH to disallow continue in switch.
// Not do it for now.
}
static void ValidateUninitializedOutput(ValidationContext &ValCtx,
Function *F) {
DxilModule &DM = ValCtx.DxilMod;
DxilEntryProps &entryProps = DM.GetDxilEntryProps(F);
EntryStatus &Status = ValCtx.GetEntryStatus(F);
const DxilFunctionProps &props = entryProps.props;
// For HS only need to check Tessfactor which is in patch constant sig.
if (props.IsHS()) {
std::vector<unsigned> &patchConstOrPrimCols = Status.patchConstOrPrimCols;
const DxilSignature &patchConstSig =
entryProps.sig.PatchConstOrPrimSignature;
for (auto &E : patchConstSig.GetElements()) {
unsigned mask = patchConstOrPrimCols[E->GetID()];
unsigned requireMask = (1 << E->GetCols()) - 1;
// TODO: check other case uninitialized output is allowed.
if (mask != requireMask && !E->GetSemantic()->IsArbitrary()) {
ValCtx.EmitFnFormatError(F, ValidationRule::SmUndefinedOutput,
{E->GetName()});
}
}
return;
}
const DxilSignature &outSig = entryProps.sig.OutputSignature;
std::vector<unsigned> &outputCols = Status.outputCols;
for (auto &E : outSig.GetElements()) {
unsigned mask = outputCols[E->GetID()];
unsigned requireMask = (1 << E->GetCols()) - 1;
// TODO: check other case uninitialized output is allowed.
if (mask != requireMask && !E->GetSemantic()->IsArbitrary() &&
E->GetSemantic()->GetKind() != Semantic::Kind::Target) {
ValCtx.EmitFnFormatError(F, ValidationRule::SmUndefinedOutput,
{E->GetName()});
}
}
if (!props.IsGS()) {
unsigned posMask = Status.OutputPositionMask[0];
if (posMask != 0xf && Status.hasOutputPosition[0]) {
ValCtx.EmitFnError(F, ValidationRule::SmCompletePosition);
}
} else {
const auto &GS = props.ShaderProps.GS;
unsigned streamMask = 0;
for (size_t i = 0; i < _countof(GS.streamPrimitiveTopologies); ++i) {
if (GS.streamPrimitiveTopologies[i] !=
DXIL::PrimitiveTopology::Undefined) {
streamMask |= 1 << i;
}
}
for (unsigned i = 0; i < DXIL::kNumOutputStreams; i++) {
if (streamMask & (1 << i)) {
unsigned posMask = Status.OutputPositionMask[i];
if (posMask != 0xf && Status.hasOutputPosition[i]) {
ValCtx.EmitFnError(F, ValidationRule::SmCompletePosition);
}
}
}
}
}
static void ValidateUninitializedOutput(ValidationContext &ValCtx) {
DxilModule &DM = ValCtx.DxilMod;
if (ValCtx.isLibProfile) {
for (Function &F : DM.GetModule()->functions()) {
if (DM.HasDxilEntryProps(&F)) {
ValidateUninitializedOutput(ValCtx, &F);
}
}
} else {
Function *Entry = DM.GetEntryFunction();
if (!DM.HasDxilEntryProps(Entry)) {
// must have props.
ValCtx.EmitFnError(Entry, ValidationRule::MetaNoEntryPropsForEntry);
return;
}
ValidateUninitializedOutput(ValCtx, Entry);
}
}
uint32_t ValidateDxilModule(llvm::Module *pModule, llvm::Module *pDebugModule) {
DxilModule *pDxilModule = DxilModule::TryGetDxilModule(pModule);
if (!pDxilModule) {
return DXC_E_IR_VERIFICATION_FAILED;
}
if (pDxilModule->HasMetadataErrors()) {
dxilutil::EmitErrorOnContext(pModule->getContext(),
"Metadata error encountered in non-critical "
"metadata (such as Type Annotations).");
return DXC_E_IR_VERIFICATION_FAILED;
}
ValidationContext ValCtx(*pModule, pDebugModule, *pDxilModule);
ValidateBitcode(ValCtx);
ValidateMetadata(ValCtx);
ValidateShaderState(ValCtx);
ValidateGlobalVariables(ValCtx);
ValidateResources(ValCtx);
// Validate control flow and collect function call info.
// If has recursive call, call info collection will not finish.
ValidateFlowControl(ValCtx);
// Validate functions.
for (Function &F : pModule->functions()) {
ValidateFunction(F, ValCtx);
}
ValidateShaderFlags(ValCtx);
ValidateEntryCompatibility(ValCtx);
ValidateEntrySignatures(ValCtx);
ValidateUninitializedOutput(ValCtx);
// Ensure error messages are flushed out on error.
if (ValCtx.Failed) {
return DXC_E_IR_VERIFICATION_FAILED;
}
return S_OK;
}
} // namespace hlsl
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/Analysis/VectorUtils.cpp | //===----------- VectorUtils.cpp - Vectorizer utility functions -----------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines vectorizer utilities.
//
//===----------------------------------------------------------------------===//
#include "llvm/Analysis/LoopInfo.h"
#include "llvm/Analysis/ScalarEvolutionExpressions.h"
#include "llvm/Analysis/ScalarEvolution.h"
#include "llvm/Analysis/VectorUtils.h"
#include "llvm/IR/GetElementPtrTypeIterator.h"
#include "llvm/IR/PatternMatch.h"
#include "llvm/IR/Value.h"
/// \brief Identify if the intrinsic is trivially vectorizable.
/// This method returns true if the intrinsic's argument types are all
/// scalars for the scalar form of the intrinsic and all vectors for
/// the vector form of the intrinsic.
bool llvm::isTriviallyVectorizable(Intrinsic::ID ID) {
switch (ID) {
case Intrinsic::sqrt:
case Intrinsic::sin:
case Intrinsic::cos:
case Intrinsic::exp:
case Intrinsic::exp2:
case Intrinsic::log:
case Intrinsic::log10:
case Intrinsic::log2:
case Intrinsic::fabs:
case Intrinsic::minnum:
case Intrinsic::maxnum:
case Intrinsic::copysign:
case Intrinsic::floor:
case Intrinsic::ceil:
case Intrinsic::trunc:
case Intrinsic::rint:
case Intrinsic::nearbyint:
case Intrinsic::round:
case Intrinsic::bswap:
case Intrinsic::ctpop:
case Intrinsic::pow:
case Intrinsic::fma:
case Intrinsic::fmuladd:
case Intrinsic::ctlz:
case Intrinsic::cttz:
case Intrinsic::powi:
return true;
default:
return false;
}
}
/// \brief Identifies if the intrinsic has a scalar operand. It check for
/// ctlz,cttz and powi special intrinsics whose argument is scalar.
bool llvm::hasVectorInstrinsicScalarOpd(Intrinsic::ID ID,
unsigned ScalarOpdIdx) {
switch (ID) {
case Intrinsic::ctlz:
case Intrinsic::cttz:
case Intrinsic::powi:
return (ScalarOpdIdx == 1);
default:
return false;
}
}
/// \brief Check call has a unary float signature
/// It checks following:
/// a) call should have a single argument
/// b) argument type should be floating point type
/// c) call instruction type and argument type should be same
/// d) call should only reads memory.
/// If all these condition is met then return ValidIntrinsicID
/// else return not_intrinsic.
llvm::Intrinsic::ID
llvm::checkUnaryFloatSignature(const CallInst &I,
Intrinsic::ID ValidIntrinsicID) {
if (I.getNumArgOperands() != 1 ||
!I.getArgOperand(0)->getType()->isFloatingPointTy() ||
I.getType() != I.getArgOperand(0)->getType() || !I.onlyReadsMemory())
return Intrinsic::not_intrinsic;
return ValidIntrinsicID;
}
/// \brief Check call has a binary float signature
/// It checks following:
/// a) call should have 2 arguments.
/// b) arguments type should be floating point type
/// c) call instruction type and arguments type should be same
/// d) call should only reads memory.
/// If all these condition is met then return ValidIntrinsicID
/// else return not_intrinsic.
llvm::Intrinsic::ID
llvm::checkBinaryFloatSignature(const CallInst &I,
Intrinsic::ID ValidIntrinsicID) {
if (I.getNumArgOperands() != 2 ||
!I.getArgOperand(0)->getType()->isFloatingPointTy() ||
!I.getArgOperand(1)->getType()->isFloatingPointTy() ||
I.getType() != I.getArgOperand(0)->getType() ||
I.getType() != I.getArgOperand(1)->getType() || !I.onlyReadsMemory())
return Intrinsic::not_intrinsic;
return ValidIntrinsicID;
}
/// \brief Returns intrinsic ID for call.
/// For the input call instruction it finds mapping intrinsic and returns
/// its ID, in case it does not found it return not_intrinsic.
llvm::Intrinsic::ID llvm::getIntrinsicIDForCall(CallInst *CI,
const TargetLibraryInfo *TLI) {
// If we have an intrinsic call, check if it is trivially vectorizable.
if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(CI)) {
Intrinsic::ID ID = II->getIntrinsicID();
if (isTriviallyVectorizable(ID) || ID == Intrinsic::lifetime_start ||
ID == Intrinsic::lifetime_end || ID == Intrinsic::assume)
return ID;
return Intrinsic::not_intrinsic;
}
if (!TLI)
return Intrinsic::not_intrinsic;
LibFunc::Func Func;
Function *F = CI->getCalledFunction();
// We're going to make assumptions on the semantics of the functions, check
// that the target knows that it's available in this environment and it does
// not have local linkage.
if (!F || F->hasLocalLinkage() || !TLI->getLibFunc(F->getName(), Func))
return Intrinsic::not_intrinsic;
// Otherwise check if we have a call to a function that can be turned into a
// vector intrinsic.
switch (Func) {
default:
break;
case LibFunc::sin:
case LibFunc::sinf:
case LibFunc::sinl:
return checkUnaryFloatSignature(*CI, Intrinsic::sin);
case LibFunc::cos:
case LibFunc::cosf:
case LibFunc::cosl:
return checkUnaryFloatSignature(*CI, Intrinsic::cos);
case LibFunc::exp:
case LibFunc::expf:
case LibFunc::expl:
return checkUnaryFloatSignature(*CI, Intrinsic::exp);
case LibFunc::exp2:
case LibFunc::exp2f:
case LibFunc::exp2l:
return checkUnaryFloatSignature(*CI, Intrinsic::exp2);
case LibFunc::log:
case LibFunc::logf:
case LibFunc::logl:
return checkUnaryFloatSignature(*CI, Intrinsic::log);
case LibFunc::log10:
case LibFunc::log10f:
case LibFunc::log10l:
return checkUnaryFloatSignature(*CI, Intrinsic::log10);
case LibFunc::log2:
case LibFunc::log2f:
case LibFunc::log2l:
return checkUnaryFloatSignature(*CI, Intrinsic::log2);
case LibFunc::fabs:
case LibFunc::fabsf:
case LibFunc::fabsl:
return checkUnaryFloatSignature(*CI, Intrinsic::fabs);
case LibFunc::fmin:
case LibFunc::fminf:
case LibFunc::fminl:
return checkBinaryFloatSignature(*CI, Intrinsic::minnum);
case LibFunc::fmax:
case LibFunc::fmaxf:
case LibFunc::fmaxl:
return checkBinaryFloatSignature(*CI, Intrinsic::maxnum);
case LibFunc::copysign:
case LibFunc::copysignf:
case LibFunc::copysignl:
return checkBinaryFloatSignature(*CI, Intrinsic::copysign);
case LibFunc::floor:
case LibFunc::floorf:
case LibFunc::floorl:
return checkUnaryFloatSignature(*CI, Intrinsic::floor);
case LibFunc::ceil:
case LibFunc::ceilf:
case LibFunc::ceill:
return checkUnaryFloatSignature(*CI, Intrinsic::ceil);
case LibFunc::trunc:
case LibFunc::truncf:
case LibFunc::truncl:
return checkUnaryFloatSignature(*CI, Intrinsic::trunc);
case LibFunc::rint:
case LibFunc::rintf:
case LibFunc::rintl:
return checkUnaryFloatSignature(*CI, Intrinsic::rint);
case LibFunc::nearbyint:
case LibFunc::nearbyintf:
case LibFunc::nearbyintl:
return checkUnaryFloatSignature(*CI, Intrinsic::nearbyint);
case LibFunc::round:
case LibFunc::roundf:
case LibFunc::roundl:
return checkUnaryFloatSignature(*CI, Intrinsic::round);
case LibFunc::pow:
case LibFunc::powf:
case LibFunc::powl:
return checkBinaryFloatSignature(*CI, Intrinsic::pow);
}
return Intrinsic::not_intrinsic;
}
/// \brief Find the operand of the GEP that should be checked for consecutive
/// stores. This ignores trailing indices that have no effect on the final
/// pointer.
unsigned llvm::getGEPInductionOperand(const GetElementPtrInst *Gep) {
const DataLayout &DL = Gep->getModule()->getDataLayout();
unsigned LastOperand = Gep->getNumOperands() - 1;
unsigned GEPAllocSize = DL.getTypeAllocSize(
cast<PointerType>(Gep->getType()->getScalarType())->getElementType());
// Walk backwards and try to peel off zeros.
while (LastOperand > 1 &&
match(Gep->getOperand(LastOperand), llvm::PatternMatch::m_Zero())) {
// Find the type we're currently indexing into.
gep_type_iterator GEPTI = gep_type_begin(Gep);
std::advance(GEPTI, LastOperand - 1);
// If it's a type with the same allocation size as the result of the GEP we
// can peel off the zero index.
if (DL.getTypeAllocSize(*GEPTI) != GEPAllocSize)
break;
--LastOperand;
}
return LastOperand;
}
/// \brief If the argument is a GEP, then returns the operand identified by
/// getGEPInductionOperand. However, if there is some other non-loop-invariant
/// operand, it returns that instead.
llvm::Value *llvm::stripGetElementPtr(llvm::Value *Ptr, ScalarEvolution *SE,
Loop *Lp) {
GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr);
if (!GEP)
return Ptr;
unsigned InductionOperand = getGEPInductionOperand(GEP);
// Check that all of the gep indices are uniform except for our induction
// operand.
for (unsigned i = 0, e = GEP->getNumOperands(); i != e; ++i)
if (i != InductionOperand &&
!SE->isLoopInvariant(SE->getSCEV(GEP->getOperand(i)), Lp))
return Ptr;
return GEP->getOperand(InductionOperand);
}
/// \brief If a value has only one user that is a CastInst, return it.
llvm::Value *llvm::getUniqueCastUse(llvm::Value *Ptr, Loop *Lp, Type *Ty) {
llvm::Value *UniqueCast = nullptr;
for (User *U : Ptr->users()) {
CastInst *CI = dyn_cast<CastInst>(U);
if (CI && CI->getType() == Ty) {
if (!UniqueCast)
UniqueCast = CI;
else
return nullptr;
}
}
return UniqueCast;
}
/// \brief Get the stride of a pointer access in a loop. Looks for symbolic
/// strides "a[i*stride]". Returns the symbolic stride, or null otherwise.
llvm::Value *llvm::getStrideFromPointer(llvm::Value *Ptr, ScalarEvolution *SE,
Loop *Lp) {
const PointerType *PtrTy = dyn_cast<PointerType>(Ptr->getType());
if (!PtrTy || PtrTy->isAggregateType())
return nullptr;
// Try to remove a gep instruction to make the pointer (actually index at this
// point) easier analyzable. If OrigPtr is equal to Ptr we are analzying the
// pointer, otherwise, we are analyzing the index.
llvm::Value *OrigPtr = Ptr;
// The size of the pointer access.
int64_t PtrAccessSize = 1;
Ptr = stripGetElementPtr(Ptr, SE, Lp);
const SCEV *V = SE->getSCEV(Ptr);
if (Ptr != OrigPtr)
// Strip off casts.
while (const SCEVCastExpr *C = dyn_cast<SCEVCastExpr>(V))
V = C->getOperand();
const SCEVAddRecExpr *S = dyn_cast<SCEVAddRecExpr>(V);
if (!S)
return nullptr;
V = S->getStepRecurrence(*SE);
if (!V)
return nullptr;
// Strip off the size of access multiplication if we are still analyzing the
// pointer.
if (OrigPtr == Ptr) {
const DataLayout &DL = Lp->getHeader()->getModule()->getDataLayout();
DL.getTypeAllocSize(PtrTy->getElementType());
if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(V)) {
if (M->getOperand(0)->getSCEVType() != scConstant)
return nullptr;
const APInt &APStepVal =
cast<SCEVConstant>(M->getOperand(0))->getValue()->getValue();
// Huge step value - give up.
if (APStepVal.getBitWidth() > 64)
return nullptr;
int64_t StepVal = APStepVal.getSExtValue();
if (PtrAccessSize != StepVal)
return nullptr;
V = M->getOperand(1);
}
}
// Strip off casts.
Type *StripedOffRecurrenceCast = nullptr;
if (const SCEVCastExpr *C = dyn_cast<SCEVCastExpr>(V)) {
StripedOffRecurrenceCast = C->getType();
V = C->getOperand();
}
// Look for the loop invariant symbolic value.
const SCEVUnknown *U = dyn_cast<SCEVUnknown>(V);
if (!U)
return nullptr;
llvm::Value *Stride = U->getValue();
if (!Lp->isLoopInvariant(Stride))
return nullptr;
// If we have stripped off the recurrence cast we have to make sure that we
// return the value that is used in this loop so that we can replace it later.
if (StripedOffRecurrenceCast)
Stride = getUniqueCastUse(Stride, Lp, StripedOffRecurrenceCast);
return Stride;
}
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/Analysis/MemoryDependenceAnalysis.cpp | //===- MemoryDependenceAnalysis.cpp - Mem Deps Implementation -------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements an analysis that determines, for a given memory
// operation, what preceding memory operations it depends on. It builds on
// alias analysis information, and tries to provide a lazy, caching interface to
// a common kind of alias information query.
//
//===----------------------------------------------------------------------===//
#include "llvm/Analysis/MemoryDependenceAnalysis.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/Analysis/AssumptionCache.h"
#include "llvm/Analysis/InstructionSimplify.h"
#include "llvm/Analysis/MemoryBuiltins.h"
#include "llvm/Analysis/PHITransAddr.h"
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/Dominators.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/PredIteratorCache.h"
#include "llvm/Support/Debug.h"
using namespace llvm;
#define DEBUG_TYPE "memdep"
STATISTIC(NumCacheNonLocal, "Number of fully cached non-local responses");
STATISTIC(NumCacheDirtyNonLocal, "Number of dirty cached non-local responses");
STATISTIC(NumUncacheNonLocal, "Number of uncached non-local responses");
STATISTIC(NumCacheNonLocalPtr,
"Number of fully cached non-local ptr responses");
STATISTIC(NumCacheDirtyNonLocalPtr,
"Number of cached, but dirty, non-local ptr responses");
STATISTIC(NumUncacheNonLocalPtr,
"Number of uncached non-local ptr responses");
STATISTIC(NumCacheCompleteNonLocalPtr,
"Number of block queries that were completely cached");
// Limit for the number of instructions to scan in a block.
static const unsigned int BlockScanLimit = 500;
// Limit on the number of memdep results to process.
static const unsigned int NumResultsLimit = 100;
char MemoryDependenceAnalysis::ID = 0;
// Register this pass...
INITIALIZE_PASS_BEGIN(MemoryDependenceAnalysis, "memdep",
"Memory Dependence Analysis", false, true)
INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
INITIALIZE_PASS_END(MemoryDependenceAnalysis, "memdep",
"Memory Dependence Analysis", false, true)
MemoryDependenceAnalysis::MemoryDependenceAnalysis()
: FunctionPass(ID) {
initializeMemoryDependenceAnalysisPass(*PassRegistry::getPassRegistry());
}
MemoryDependenceAnalysis::~MemoryDependenceAnalysis() {
}
/// Clean up memory in between runs
void MemoryDependenceAnalysis::releaseMemory() {
LocalDeps.clear();
NonLocalDeps.clear();
NonLocalPointerDeps.clear();
ReverseLocalDeps.clear();
ReverseNonLocalDeps.clear();
ReverseNonLocalPtrDeps.clear();
PredCache.clear();
}
/// getAnalysisUsage - Does not modify anything. It uses Alias Analysis.
///
void MemoryDependenceAnalysis::getAnalysisUsage(AnalysisUsage &AU) const {
AU.setPreservesAll();
AU.addRequired<AssumptionCacheTracker>();
AU.addRequiredTransitive<AliasAnalysis>();
}
bool MemoryDependenceAnalysis::runOnFunction(Function &F) {
AA = &getAnalysis<AliasAnalysis>();
AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
DominatorTreeWrapperPass *DTWP =
getAnalysisIfAvailable<DominatorTreeWrapperPass>();
DT = DTWP ? &DTWP->getDomTree() : nullptr;
return false;
}
/// RemoveFromReverseMap - This is a helper function that removes Val from
/// 'Inst's set in ReverseMap. If the set becomes empty, remove Inst's entry.
template <typename KeyTy>
static void RemoveFromReverseMap(DenseMap<Instruction*,
SmallPtrSet<KeyTy, 4> > &ReverseMap,
Instruction *Inst, KeyTy Val) {
typename DenseMap<Instruction*, SmallPtrSet<KeyTy, 4> >::iterator
InstIt = ReverseMap.find(Inst);
assert(InstIt != ReverseMap.end() && "Reverse map out of sync?");
bool Found = InstIt->second.erase(Val);
assert(Found && "Invalid reverse map!"); (void)Found;
if (InstIt->second.empty())
ReverseMap.erase(InstIt);
}
/// GetLocation - If the given instruction references a specific memory
/// location, fill in Loc with the details, otherwise set Loc.Ptr to null.
/// Return a ModRefInfo value describing the general behavior of the
/// instruction.
static AliasAnalysis::ModRefResult
GetLocation(const Instruction *Inst, MemoryLocation &Loc, AliasAnalysis *AA) {
if (const LoadInst *LI = dyn_cast<LoadInst>(Inst)) {
if (LI->isUnordered()) {
Loc = MemoryLocation::get(LI);
return AliasAnalysis::Ref;
}
if (LI->getOrdering() == Monotonic) {
Loc = MemoryLocation::get(LI);
return AliasAnalysis::ModRef;
}
Loc = MemoryLocation();
return AliasAnalysis::ModRef;
}
if (const StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
if (SI->isUnordered()) {
Loc = MemoryLocation::get(SI);
return AliasAnalysis::Mod;
}
if (SI->getOrdering() == Monotonic) {
Loc = MemoryLocation::get(SI);
return AliasAnalysis::ModRef;
}
Loc = MemoryLocation();
return AliasAnalysis::ModRef;
}
if (const VAArgInst *V = dyn_cast<VAArgInst>(Inst)) {
Loc = MemoryLocation::get(V);
return AliasAnalysis::ModRef;
}
if (const CallInst *CI = isFreeCall(Inst, AA->getTargetLibraryInfo())) {
// calls to free() deallocate the entire structure
Loc = MemoryLocation(CI->getArgOperand(0));
return AliasAnalysis::Mod;
}
if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) {
AAMDNodes AAInfo;
switch (II->getIntrinsicID()) {
case Intrinsic::lifetime_start:
case Intrinsic::lifetime_end:
case Intrinsic::invariant_start:
II->getAAMetadata(AAInfo);
Loc = MemoryLocation(
II->getArgOperand(1),
cast<ConstantInt>(II->getArgOperand(0))->getZExtValue(), AAInfo);
// These intrinsics don't really modify the memory, but returning Mod
// will allow them to be handled conservatively.
return AliasAnalysis::Mod;
case Intrinsic::invariant_end:
II->getAAMetadata(AAInfo);
Loc = MemoryLocation(
II->getArgOperand(2),
cast<ConstantInt>(II->getArgOperand(1))->getZExtValue(), AAInfo);
// These intrinsics don't really modify the memory, but returning Mod
// will allow them to be handled conservatively.
return AliasAnalysis::Mod;
default:
break;
}
}
// Otherwise, just do the coarse-grained thing that always works.
if (Inst->mayWriteToMemory())
return AliasAnalysis::ModRef;
if (Inst->mayReadFromMemory())
return AliasAnalysis::Ref;
return AliasAnalysis::NoModRef;
}
/// getCallSiteDependencyFrom - Private helper for finding the local
/// dependencies of a call site.
MemDepResult MemoryDependenceAnalysis::
getCallSiteDependencyFrom(CallSite CS, bool isReadOnlyCall,
BasicBlock::iterator ScanIt, BasicBlock *BB) {
unsigned Limit = BlockScanLimit;
// Walk backwards through the block, looking for dependencies
while (ScanIt != BB->begin()) {
// HLSL Change - Begin
// Skip debug info
if (isa<DbgInfoIntrinsic>(*std::prev(ScanIt))) {
ScanIt--; continue;
}
// HLSL Change - End
// Limit the amount of scanning we do so we don't end up with quadratic
// running time on extreme testcases.
--Limit;
if (!Limit)
return MemDepResult::getUnknown();
Instruction *Inst = --ScanIt;
// If this inst is a memory op, get the pointer it accessed
MemoryLocation Loc;
AliasAnalysis::ModRefResult MR = GetLocation(Inst, Loc, AA);
if (Loc.Ptr) {
// A simple instruction.
if (AA->getModRefInfo(CS, Loc) != AliasAnalysis::NoModRef)
return MemDepResult::getClobber(Inst);
continue;
}
if (auto InstCS = CallSite(Inst)) {
// Debug intrinsics don't cause dependences.
if (isa<DbgInfoIntrinsic>(Inst)) continue;
// If these two calls do not interfere, look past it.
switch (AA->getModRefInfo(CS, InstCS)) {
case AliasAnalysis::NoModRef:
// If the two calls are the same, return InstCS as a Def, so that
// CS can be found redundant and eliminated.
if (isReadOnlyCall && !(MR & AliasAnalysis::Mod) &&
CS.getInstruction()->isIdenticalToWhenDefined(Inst))
return MemDepResult::getDef(Inst);
// Otherwise if the two calls don't interact (e.g. InstCS is readnone)
// keep scanning.
continue;
default:
return MemDepResult::getClobber(Inst);
}
}
// If we could not obtain a pointer for the instruction and the instruction
// touches memory then assume that this is a dependency.
if (MR != AliasAnalysis::NoModRef)
return MemDepResult::getClobber(Inst);
}
// No dependence found. If this is the entry block of the function, it is
// unknown, otherwise it is non-local.
if (BB != &BB->getParent()->getEntryBlock())
return MemDepResult::getNonLocal();
return MemDepResult::getNonFuncLocal();
}
/// isLoadLoadClobberIfExtendedToFullWidth - Return true if LI is a load that
/// would fully overlap MemLoc if done as a wider legal integer load.
///
/// MemLocBase, MemLocOffset are lazily computed here the first time the
/// base/offs of memloc is needed.
static bool isLoadLoadClobberIfExtendedToFullWidth(const MemoryLocation &MemLoc,
const Value *&MemLocBase,
int64_t &MemLocOffs,
const LoadInst *LI) {
const DataLayout &DL = LI->getModule()->getDataLayout();
// If we haven't already computed the base/offset of MemLoc, do so now.
if (!MemLocBase)
MemLocBase = GetPointerBaseWithConstantOffset(MemLoc.Ptr, MemLocOffs, DL);
unsigned Size = MemoryDependenceAnalysis::getLoadLoadClobberFullWidthSize(
MemLocBase, MemLocOffs, MemLoc.Size, LI);
return Size != 0;
}
/// getLoadLoadClobberFullWidthSize - This is a little bit of analysis that
/// looks at a memory location for a load (specified by MemLocBase, Offs,
/// and Size) and compares it against a load. If the specified load could
/// be safely widened to a larger integer load that is 1) still efficient,
/// 2) safe for the target, and 3) would provide the specified memory
/// location value, then this function returns the size in bytes of the
/// load width to use. If not, this returns zero.
unsigned MemoryDependenceAnalysis::getLoadLoadClobberFullWidthSize(
const Value *MemLocBase, int64_t MemLocOffs, unsigned MemLocSize,
const LoadInst *LI) {
// We can only extend simple integer loads.
if (!isa<IntegerType>(LI->getType()) || !LI->isSimple()) return 0;
// Load widening is hostile to ThreadSanitizer: it may cause false positives
// or make the reports more cryptic (access sizes are wrong).
if (LI->getParent()->getParent()->hasFnAttribute(Attribute::SanitizeThread))
return 0;
const DataLayout &DL = LI->getModule()->getDataLayout();
// Get the base of this load.
int64_t LIOffs = 0;
const Value *LIBase =
GetPointerBaseWithConstantOffset(LI->getPointerOperand(), LIOffs, DL);
// If the two pointers are not based on the same pointer, we can't tell that
// they are related.
if (LIBase != MemLocBase) return 0;
// Okay, the two values are based on the same pointer, but returned as
// no-alias. This happens when we have things like two byte loads at "P+1"
// and "P+3". Check to see if increasing the size of the "LI" load up to its
// alignment (or the largest native integer type) will allow us to load all
// the bits required by MemLoc.
// If MemLoc is before LI, then no widening of LI will help us out.
if (MemLocOffs < LIOffs) return 0;
// Get the alignment of the load in bytes. We assume that it is safe to load
// any legal integer up to this size without a problem. For example, if we're
// looking at an i8 load on x86-32 that is known 1024 byte aligned, we can
// widen it up to an i32 load. If it is known 2-byte aligned, we can widen it
// to i16.
unsigned LoadAlign = LI->getAlignment();
int64_t MemLocEnd = MemLocOffs+MemLocSize;
// If no amount of rounding up will let MemLoc fit into LI, then bail out.
if (LIOffs+LoadAlign < MemLocEnd) return 0;
// This is the size of the load to try. Start with the next larger power of
// two.
unsigned NewLoadByteSize = LI->getType()->getPrimitiveSizeInBits()/8U;
NewLoadByteSize = NextPowerOf2(NewLoadByteSize);
while (1) {
// If this load size is bigger than our known alignment or would not fit
// into a native integer register, then we fail.
if (NewLoadByteSize > LoadAlign ||
!DL.fitsInLegalInteger(NewLoadByteSize*8))
return 0;
if (LIOffs + NewLoadByteSize > MemLocEnd &&
LI->getParent()->getParent()->hasFnAttribute(
Attribute::SanitizeAddress))
// We will be reading past the location accessed by the original program.
// While this is safe in a regular build, Address Safety analysis tools
// may start reporting false warnings. So, don't do widening.
return 0;
// If a load of this width would include all of MemLoc, then we succeed.
if (LIOffs+NewLoadByteSize >= MemLocEnd)
return NewLoadByteSize;
NewLoadByteSize <<= 1;
}
}
static bool isVolatile(Instruction *Inst) {
if (LoadInst *LI = dyn_cast<LoadInst>(Inst))
return LI->isVolatile();
else if (StoreInst *SI = dyn_cast<StoreInst>(Inst))
return SI->isVolatile();
else if (AtomicCmpXchgInst *AI = dyn_cast<AtomicCmpXchgInst>(Inst))
return AI->isVolatile();
return false;
}
/// getPointerDependencyFrom - Return the instruction on which a memory
/// location depends. If isLoad is true, this routine ignores may-aliases with
/// read-only operations. If isLoad is false, this routine ignores may-aliases
/// with reads from read-only locations. If possible, pass the query
/// instruction as well; this function may take advantage of the metadata
/// annotated to the query instruction to refine the result.
MemDepResult MemoryDependenceAnalysis::getPointerDependencyFrom(
const MemoryLocation &MemLoc, bool isLoad, BasicBlock::iterator ScanIt,
BasicBlock *BB, Instruction *QueryInst, unsigned Limit) {
const Value *MemLocBase = nullptr;
int64_t MemLocOffset = 0;
bool isInvariantLoad = false;
unsigned DefaultLimit = BlockScanLimit;
if (Limit == 0)
Limit = DefaultLimit;
// We must be careful with atomic accesses, as they may allow another thread
// to touch this location, cloberring it. We are conservative: if the
// QueryInst is not a simple (non-atomic) memory access, we automatically
// return getClobber.
// If it is simple, we know based on the results of
// "Compiler testing via a theory of sound optimisations in the C11/C++11
// memory model" in PLDI 2013, that a non-atomic location can only be
// clobbered between a pair of a release and an acquire action, with no
// access to the location in between.
// Here is an example for giving the general intuition behind this rule.
// In the following code:
// store x 0;
// release action; [1]
// acquire action; [4]
// %val = load x;
// It is unsafe to replace %val by 0 because another thread may be running:
// acquire action; [2]
// store x 42;
// release action; [3]
// with synchronization from 1 to 2 and from 3 to 4, resulting in %val
// being 42. A key property of this program however is that if either
// 1 or 4 were missing, there would be a race between the store of 42
// either the store of 0 or the load (making the whole progam racy).
// The paper mentionned above shows that the same property is respected
// by every program that can detect any optimisation of that kind: either
// it is racy (undefined) or there is a release followed by an acquire
// between the pair of accesses under consideration.
// If the load is invariant, we "know" that it doesn't alias *any* write. We
// do want to respect mustalias results since defs are useful for value
// forwarding, but any mayalias write can be assumed to be noalias.
// Arguably, this logic should be pushed inside AliasAnalysis itself.
if (isLoad && QueryInst) {
LoadInst *LI = dyn_cast<LoadInst>(QueryInst);
if (LI && LI->getMetadata(LLVMContext::MD_invariant_load) != nullptr)
isInvariantLoad = true;
}
const DataLayout &DL = BB->getModule()->getDataLayout();
// Walk backwards through the basic block, looking for dependencies.
while (ScanIt != BB->begin()) {
Instruction *Inst = --ScanIt;
if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst))
// Debug intrinsics don't (and can't) cause dependencies.
if (isa<DbgInfoIntrinsic>(II)) continue;
// Limit the amount of scanning we do so we don't end up with quadratic
// running time on extreme testcases.
--Limit;
if (!Limit)
return MemDepResult::getUnknown();
if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) {
// If we reach a lifetime begin or end marker, then the query ends here
// because the value is undefined.
if (II->getIntrinsicID() == Intrinsic::lifetime_start) {
// FIXME: This only considers queries directly on the invariant-tagged
// pointer, not on query pointers that are indexed off of them. It'd
// be nice to handle that at some point (the right approach is to use
// GetPointerBaseWithConstantOffset).
if (AA->isMustAlias(MemoryLocation(II->getArgOperand(1)), MemLoc))
return MemDepResult::getDef(II);
continue;
}
}
// Values depend on loads if the pointers are must aliased. This means that
// a load depends on another must aliased load from the same value.
// One exception is atomic loads: a value can depend on an atomic load that it
// does not alias with when this atomic load indicates that another thread may
// be accessing the location.
if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) {
// While volatile access cannot be eliminated, they do not have to clobber
// non-aliasing locations, as normal accesses, for example, can be safely
// reordered with volatile accesses.
if (LI->isVolatile()) {
if (!QueryInst)
// Original QueryInst *may* be volatile
return MemDepResult::getClobber(LI);
if (isVolatile(QueryInst))
// Ordering required if QueryInst is itself volatile
return MemDepResult::getClobber(LI);
// Otherwise, volatile doesn't imply any special ordering
}
// Atomic loads have complications involved.
// A Monotonic (or higher) load is OK if the query inst is itself not atomic.
// FIXME: This is overly conservative.
if (LI->isAtomic() && LI->getOrdering() > Unordered) {
if (!QueryInst)
return MemDepResult::getClobber(LI);
if (LI->getOrdering() != Monotonic)
return MemDepResult::getClobber(LI);
if (auto *QueryLI = dyn_cast<LoadInst>(QueryInst)) {
if (!QueryLI->isSimple())
return MemDepResult::getClobber(LI);
} else if (auto *QuerySI = dyn_cast<StoreInst>(QueryInst)) {
if (!QuerySI->isSimple())
return MemDepResult::getClobber(LI);
} else if (QueryInst->mayReadOrWriteMemory()) {
return MemDepResult::getClobber(LI);
}
}
MemoryLocation LoadLoc = MemoryLocation::get(LI);
// If we found a pointer, check if it could be the same as our pointer.
AliasResult R = AA->alias(LoadLoc, MemLoc);
if (isLoad) {
if (R == NoAlias) {
// If this is an over-aligned integer load (for example,
// "load i8* %P, align 4") see if it would obviously overlap with the
// queried location if widened to a larger load (e.g. if the queried
// location is 1 byte at P+1). If so, return it as a load/load
// clobber result, allowing the client to decide to widen the load if
// it wants to.
if (IntegerType *ITy = dyn_cast<IntegerType>(LI->getType())) {
if (LI->getAlignment() * 8 > ITy->getPrimitiveSizeInBits() &&
isLoadLoadClobberIfExtendedToFullWidth(MemLoc, MemLocBase,
MemLocOffset, LI))
return MemDepResult::getClobber(Inst);
}
continue;
}
// Must aliased loads are defs of each other.
if (R == MustAlias)
return MemDepResult::getDef(Inst);
#if 0 // FIXME: Temporarily disabled. GVN is cleverly rewriting loads
// in terms of clobbering loads, but since it does this by looking
// at the clobbering load directly, it doesn't know about any
// phi translation that may have happened along the way.
// If we have a partial alias, then return this as a clobber for the
// client to handle.
if (R == PartialAlias)
return MemDepResult::getClobber(Inst);
#endif
// Random may-alias loads don't depend on each other without a
// dependence.
continue;
}
// Stores don't depend on other no-aliased accesses.
if (R == NoAlias)
continue;
// Stores don't alias loads from read-only memory.
if (AA->pointsToConstantMemory(LoadLoc))
continue;
// Stores depend on may/must aliased loads.
return MemDepResult::getDef(Inst);
}
if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
// Atomic stores have complications involved.
// A Monotonic store is OK if the query inst is itself not atomic.
// FIXME: This is overly conservative.
if (!SI->isUnordered()) {
if (!QueryInst)
return MemDepResult::getClobber(SI);
if (SI->getOrdering() != Monotonic)
return MemDepResult::getClobber(SI);
if (auto *QueryLI = dyn_cast<LoadInst>(QueryInst)) {
if (!QueryLI->isSimple())
return MemDepResult::getClobber(SI);
} else if (auto *QuerySI = dyn_cast<StoreInst>(QueryInst)) {
if (!QuerySI->isSimple())
return MemDepResult::getClobber(SI);
} else if (QueryInst->mayReadOrWriteMemory()) {
return MemDepResult::getClobber(SI);
}
}
// FIXME: this is overly conservative.
// While volatile access cannot be eliminated, they do not have to clobber
// non-aliasing locations, as normal accesses can for example be reordered
// with volatile accesses.
if (SI->isVolatile())
return MemDepResult::getClobber(SI);
// If alias analysis can tell that this store is guaranteed to not modify
// the query pointer, ignore it. Use getModRefInfo to handle cases where
// the query pointer points to constant memory etc.
if (AA->getModRefInfo(SI, MemLoc) == AliasAnalysis::NoModRef)
continue;
// Ok, this store might clobber the query pointer. Check to see if it is
// a must alias: in this case, we want to return this as a def.
MemoryLocation StoreLoc = MemoryLocation::get(SI);
// If we found a pointer, check if it could be the same as our pointer.
AliasResult R = AA->alias(StoreLoc, MemLoc);
if (R == NoAlias)
continue;
if (R == MustAlias)
return MemDepResult::getDef(Inst);
if (isInvariantLoad)
continue;
return MemDepResult::getClobber(Inst);
}
// If this is an allocation, and if we know that the accessed pointer is to
// the allocation, return Def. This means that there is no dependence and
// the access can be optimized based on that. For example, a load could
// turn into undef.
// Note: Only determine this to be a malloc if Inst is the malloc call, not
// a subsequent bitcast of the malloc call result. There can be stores to
// the malloced memory between the malloc call and its bitcast uses, and we
// need to continue scanning until the malloc call.
const TargetLibraryInfo *TLI = AA->getTargetLibraryInfo();
if (isa<AllocaInst>(Inst) || isNoAliasFn(Inst, TLI)) {
const Value *AccessPtr = GetUnderlyingObject(MemLoc.Ptr, DL);
if (AccessPtr == Inst || AA->isMustAlias(Inst, AccessPtr))
return MemDepResult::getDef(Inst);
if (isInvariantLoad)
continue;
// Be conservative if the accessed pointer may alias the allocation.
if (AA->alias(Inst, AccessPtr) != NoAlias)
return MemDepResult::getClobber(Inst);
// If the allocation is not aliased and does not read memory (like
// strdup), it is safe to ignore.
if (isa<AllocaInst>(Inst) ||
isMallocLikeFn(Inst, TLI) || isCallocLikeFn(Inst, TLI))
continue;
}
if (isInvariantLoad)
continue;
// See if this instruction (e.g. a call or vaarg) mod/ref's the pointer.
AliasAnalysis::ModRefResult MR = AA->getModRefInfo(Inst, MemLoc);
// If necessary, perform additional analysis.
if (MR == AliasAnalysis::ModRef)
MR = AA->callCapturesBefore(Inst, MemLoc, DT);
switch (MR) {
case AliasAnalysis::NoModRef:
// If the call has no effect on the queried pointer, just ignore it.
continue;
case AliasAnalysis::Mod:
return MemDepResult::getClobber(Inst);
case AliasAnalysis::Ref:
// If the call is known to never store to the pointer, and if this is a
// load query, we can safely ignore it (scan past it).
if (isLoad)
continue;
LLVM_FALLTHROUGH; // HLSL Change
default:
// Otherwise, there is a potential dependence. Return a clobber.
return MemDepResult::getClobber(Inst);
}
}
// No dependence found. If this is the entry block of the function, it is
// unknown, otherwise it is non-local.
if (BB != &BB->getParent()->getEntryBlock())
return MemDepResult::getNonLocal();
return MemDepResult::getNonFuncLocal();
}
/// getDependency - Return the instruction on which a memory operation
/// depends.
MemDepResult MemoryDependenceAnalysis::getDependency(Instruction *QueryInst, unsigned ScanLimit) {
Instruction *ScanPos = QueryInst;
// Check for a cached result
MemDepResult &LocalCache = LocalDeps[QueryInst];
// If the cached entry is non-dirty, just return it. Note that this depends
// on MemDepResult's default constructing to 'dirty'.
if (!LocalCache.isDirty())
return LocalCache;
// Otherwise, if we have a dirty entry, we know we can start the scan at that
// instruction, which may save us some work.
if (Instruction *Inst = LocalCache.getInst()) {
ScanPos = Inst;
RemoveFromReverseMap(ReverseLocalDeps, Inst, QueryInst);
}
BasicBlock *QueryParent = QueryInst->getParent();
// Do the scan.
if (BasicBlock::iterator(QueryInst) == QueryParent->begin()) {
// No dependence found. If this is the entry block of the function, it is
// unknown, otherwise it is non-local.
if (QueryParent != &QueryParent->getParent()->getEntryBlock())
LocalCache = MemDepResult::getNonLocal();
else
LocalCache = MemDepResult::getNonFuncLocal();
} else {
MemoryLocation MemLoc;
AliasAnalysis::ModRefResult MR = GetLocation(QueryInst, MemLoc, AA);
if (MemLoc.Ptr) {
// If we can do a pointer scan, make it happen.
bool isLoad = !(MR & AliasAnalysis::Mod);
if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(QueryInst))
isLoad |= II->getIntrinsicID() == Intrinsic::lifetime_start;
LocalCache = getPointerDependencyFrom(MemLoc, isLoad, ScanPos,
QueryParent, QueryInst, ScanLimit);
} else if (isa<CallInst>(QueryInst) || isa<InvokeInst>(QueryInst)) {
CallSite QueryCS(QueryInst);
bool isReadOnly = AA->onlyReadsMemory(QueryCS);
LocalCache = getCallSiteDependencyFrom(QueryCS, isReadOnly, ScanPos,
QueryParent);
} else
// Non-memory instruction.
LocalCache = MemDepResult::getUnknown();
}
// Remember the result!
if (Instruction *I = LocalCache.getInst())
ReverseLocalDeps[I].insert(QueryInst);
return LocalCache;
}
#ifndef NDEBUG
/// AssertSorted - This method is used when -debug is specified to verify that
/// cache arrays are properly kept sorted.
static void AssertSorted(MemoryDependenceAnalysis::NonLocalDepInfo &Cache,
int Count = -1) {
if (Count == -1) Count = Cache.size();
if (Count == 0) return;
for (unsigned i = 1; i != unsigned(Count); ++i)
assert(!(Cache[i] < Cache[i-1]) && "Cache isn't sorted!");
}
#endif
/// getNonLocalCallDependency - Perform a full dependency query for the
/// specified call, returning the set of blocks that the value is
/// potentially live across. The returned set of results will include a
/// "NonLocal" result for all blocks where the value is live across.
///
/// This method assumes the instruction returns a "NonLocal" dependency
/// within its own block.
///
/// This returns a reference to an internal data structure that may be
/// invalidated on the next non-local query or when an instruction is
/// removed. Clients must copy this data if they want it around longer than
/// that.
const MemoryDependenceAnalysis::NonLocalDepInfo &
MemoryDependenceAnalysis::getNonLocalCallDependency(CallSite QueryCS) {
assert(getDependency(QueryCS.getInstruction()).isNonLocal() &&
"getNonLocalCallDependency should only be used on calls with non-local deps!");
PerInstNLInfo &CacheP = NonLocalDeps[QueryCS.getInstruction()];
NonLocalDepInfo &Cache = CacheP.first;
/// DirtyBlocks - This is the set of blocks that need to be recomputed. In
/// the cached case, this can happen due to instructions being deleted etc. In
/// the uncached case, this starts out as the set of predecessors we care
/// about.
SmallVector<BasicBlock*, 32> DirtyBlocks;
if (!Cache.empty()) {
// Okay, we have a cache entry. If we know it is not dirty, just return it
// with no computation.
if (!CacheP.second) {
++NumCacheNonLocal;
return Cache;
}
// If we already have a partially computed set of results, scan them to
// determine what is dirty, seeding our initial DirtyBlocks worklist.
for (NonLocalDepInfo::iterator I = Cache.begin(), E = Cache.end();
I != E; ++I)
if (I->getResult().isDirty())
DirtyBlocks.push_back(I->getBB());
// Sort the cache so that we can do fast binary search lookups below.
std::sort(Cache.begin(), Cache.end());
++NumCacheDirtyNonLocal;
//cerr << "CACHED CASE: " << DirtyBlocks.size() << " dirty: "
// << Cache.size() << " cached: " << *QueryInst;
} else {
// Seed DirtyBlocks with each of the preds of QueryInst's block.
BasicBlock *QueryBB = QueryCS.getInstruction()->getParent();
for (BasicBlock *Pred : PredCache.get(QueryBB))
DirtyBlocks.push_back(Pred);
++NumUncacheNonLocal;
}
// isReadonlyCall - If this is a read-only call, we can be more aggressive.
bool isReadonlyCall = AA->onlyReadsMemory(QueryCS);
SmallPtrSet<BasicBlock*, 64> Visited;
unsigned NumSortedEntries = Cache.size();
DEBUG(AssertSorted(Cache));
// Iterate while we still have blocks to update.
while (!DirtyBlocks.empty()) {
BasicBlock *DirtyBB = DirtyBlocks.back();
DirtyBlocks.pop_back();
// Already processed this block?
if (!Visited.insert(DirtyBB).second)
continue;
// Do a binary search to see if we already have an entry for this block in
// the cache set. If so, find it.
DEBUG(AssertSorted(Cache, NumSortedEntries));
NonLocalDepInfo::iterator Entry =
std::upper_bound(Cache.begin(), Cache.begin()+NumSortedEntries,
NonLocalDepEntry(DirtyBB));
if (Entry != Cache.begin() && std::prev(Entry)->getBB() == DirtyBB)
--Entry;
NonLocalDepEntry *ExistingResult = nullptr;
if (Entry != Cache.begin()+NumSortedEntries &&
Entry->getBB() == DirtyBB) {
// If we already have an entry, and if it isn't already dirty, the block
// is done.
if (!Entry->getResult().isDirty())
continue;
// Otherwise, remember this slot so we can update the value.
ExistingResult = &*Entry;
}
// If the dirty entry has a pointer, start scanning from it so we don't have
// to rescan the entire block.
BasicBlock::iterator ScanPos = DirtyBB->end();
if (ExistingResult) {
if (Instruction *Inst = ExistingResult->getResult().getInst()) {
ScanPos = Inst;
// We're removing QueryInst's use of Inst.
RemoveFromReverseMap(ReverseNonLocalDeps, Inst,
QueryCS.getInstruction());
}
}
// Find out if this block has a local dependency for QueryInst.
MemDepResult Dep;
if (ScanPos != DirtyBB->begin()) {
Dep = getCallSiteDependencyFrom(QueryCS, isReadonlyCall,ScanPos, DirtyBB);
} else if (DirtyBB != &DirtyBB->getParent()->getEntryBlock()) {
// No dependence found. If this is the entry block of the function, it is
// a clobber, otherwise it is unknown.
Dep = MemDepResult::getNonLocal();
} else {
Dep = MemDepResult::getNonFuncLocal();
}
// If we had a dirty entry for the block, update it. Otherwise, just add
// a new entry.
if (ExistingResult)
ExistingResult->setResult(Dep);
else
Cache.push_back(NonLocalDepEntry(DirtyBB, Dep));
// If the block has a dependency (i.e. it isn't completely transparent to
// the value), remember the association!
if (!Dep.isNonLocal()) {
// Keep the ReverseNonLocalDeps map up to date so we can efficiently
// update this when we remove instructions.
if (Instruction *Inst = Dep.getInst())
ReverseNonLocalDeps[Inst].insert(QueryCS.getInstruction());
} else {
// If the block *is* completely transparent to the load, we need to check
// the predecessors of this block. Add them to our worklist.
for (BasicBlock *Pred : PredCache.get(DirtyBB))
DirtyBlocks.push_back(Pred);
}
}
return Cache;
}
/// getNonLocalPointerDependency - Perform a full dependency query for an
/// access to the specified (non-volatile) memory location, returning the
/// set of instructions that either define or clobber the value.
///
/// This method assumes the pointer has a "NonLocal" dependency within its
/// own block.
///
void MemoryDependenceAnalysis::
getNonLocalPointerDependency(Instruction *QueryInst,
SmallVectorImpl<NonLocalDepResult> &Result) {
const MemoryLocation Loc = MemoryLocation::get(QueryInst);
bool isLoad = isa<LoadInst>(QueryInst);
BasicBlock *FromBB = QueryInst->getParent();
assert(FromBB);
assert(Loc.Ptr->getType()->isPointerTy() &&
"Can't get pointer deps of a non-pointer!");
Result.clear();
// This routine does not expect to deal with volatile instructions.
// Doing so would require piping through the QueryInst all the way through.
// TODO: volatiles can't be elided, but they can be reordered with other
// non-volatile accesses.
// We currently give up on any instruction which is ordered, but we do handle
// atomic instructions which are unordered.
// TODO: Handle ordered instructions
auto isOrdered = [](Instruction *Inst) {
if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) {
return !LI->isUnordered();
} else if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
return !SI->isUnordered();
}
return false;
};
if (isVolatile(QueryInst) || isOrdered(QueryInst)) {
Result.push_back(NonLocalDepResult(FromBB,
MemDepResult::getUnknown(),
const_cast<Value *>(Loc.Ptr)));
return;
}
const DataLayout &DL = FromBB->getModule()->getDataLayout();
PHITransAddr Address(const_cast<Value *>(Loc.Ptr), DL, AC);
// This is the set of blocks we've inspected, and the pointer we consider in
// each block. Because of critical edges, we currently bail out if querying
// a block with multiple different pointers. This can happen during PHI
// translation.
DenseMap<BasicBlock*, Value*> Visited;
if (!getNonLocalPointerDepFromBB(QueryInst, Address, Loc, isLoad, FromBB,
Result, Visited, true))
return;
Result.clear();
Result.push_back(NonLocalDepResult(FromBB,
MemDepResult::getUnknown(),
const_cast<Value *>(Loc.Ptr)));
}
/// GetNonLocalInfoForBlock - Compute the memdep value for BB with
/// Pointer/PointeeSize using either cached information in Cache or by doing a
/// lookup (which may use dirty cache info if available). If we do a lookup,
/// add the result to the cache.
MemDepResult MemoryDependenceAnalysis::GetNonLocalInfoForBlock(
Instruction *QueryInst, const MemoryLocation &Loc, bool isLoad,
BasicBlock *BB, NonLocalDepInfo *Cache, unsigned NumSortedEntries) {
// Do a binary search to see if we already have an entry for this block in
// the cache set. If so, find it.
NonLocalDepInfo::iterator Entry =
std::upper_bound(Cache->begin(), Cache->begin()+NumSortedEntries,
NonLocalDepEntry(BB));
if (Entry != Cache->begin() && (Entry-1)->getBB() == BB)
--Entry;
NonLocalDepEntry *ExistingResult = nullptr;
if (Entry != Cache->begin()+NumSortedEntries && Entry->getBB() == BB)
ExistingResult = &*Entry;
// If we have a cached entry, and it is non-dirty, use it as the value for
// this dependency.
if (ExistingResult && !ExistingResult->getResult().isDirty()) {
++NumCacheNonLocalPtr;
return ExistingResult->getResult();
}
// Otherwise, we have to scan for the value. If we have a dirty cache
// entry, start scanning from its position, otherwise we scan from the end
// of the block.
BasicBlock::iterator ScanPos = BB->end();
if (ExistingResult && ExistingResult->getResult().getInst()) {
assert(ExistingResult->getResult().getInst()->getParent() == BB &&
"Instruction invalidated?");
++NumCacheDirtyNonLocalPtr;
ScanPos = ExistingResult->getResult().getInst();
// Eliminating the dirty entry from 'Cache', so update the reverse info.
ValueIsLoadPair CacheKey(Loc.Ptr, isLoad);
RemoveFromReverseMap(ReverseNonLocalPtrDeps, ScanPos, CacheKey);
} else {
++NumUncacheNonLocalPtr;
}
// Scan the block for the dependency.
MemDepResult Dep = getPointerDependencyFrom(Loc, isLoad, ScanPos, BB,
QueryInst);
// If we had a dirty entry for the block, update it. Otherwise, just add
// a new entry.
if (ExistingResult)
ExistingResult->setResult(Dep);
else
Cache->push_back(NonLocalDepEntry(BB, Dep));
// If the block has a dependency (i.e. it isn't completely transparent to
// the value), remember the reverse association because we just added it
// to Cache!
if (!Dep.isDef() && !Dep.isClobber())
return Dep;
// Keep the ReverseNonLocalPtrDeps map up to date so we can efficiently
// update MemDep when we remove instructions.
Instruction *Inst = Dep.getInst();
assert(Inst && "Didn't depend on anything?");
ValueIsLoadPair CacheKey(Loc.Ptr, isLoad);
ReverseNonLocalPtrDeps[Inst].insert(CacheKey);
return Dep;
}
/// SortNonLocalDepInfoCache - Sort the NonLocalDepInfo cache, given a certain
/// number of elements in the array that are already properly ordered. This is
/// optimized for the case when only a few entries are added.
static void
SortNonLocalDepInfoCache(MemoryDependenceAnalysis::NonLocalDepInfo &Cache,
unsigned NumSortedEntries) {
switch (Cache.size() - NumSortedEntries) {
case 0:
// done, no new entries.
break;
case 2: {
// Two new entries, insert the last one into place.
NonLocalDepEntry Val = Cache.back();
Cache.pop_back();
MemoryDependenceAnalysis::NonLocalDepInfo::iterator Entry =
std::upper_bound(Cache.begin(), Cache.end()-1, Val);
Cache.insert(Entry, Val);
// FALL THROUGH.
LLVM_FALLTHROUGH; // HLSL Change
}
case 1:
// One new entry, Just insert the new value at the appropriate position.
if (Cache.size() != 1) {
NonLocalDepEntry Val = Cache.back();
Cache.pop_back();
MemoryDependenceAnalysis::NonLocalDepInfo::iterator Entry =
std::upper_bound(Cache.begin(), Cache.end(), Val);
Cache.insert(Entry, Val);
}
break;
default:
// Added many values, do a full scale sort.
std::sort(Cache.begin(), Cache.end());
break;
}
}
/// getNonLocalPointerDepFromBB - Perform a dependency query based on
/// pointer/pointeesize starting at the end of StartBB. Add any clobber/def
/// results to the results vector and keep track of which blocks are visited in
/// 'Visited'.
///
/// This has special behavior for the first block queries (when SkipFirstBlock
/// is true). In this special case, it ignores the contents of the specified
/// block and starts returning dependence info for its predecessors.
///
/// This function returns false on success, or true to indicate that it could
/// not compute dependence information for some reason. This should be treated
/// as a clobber dependence on the first instruction in the predecessor block.
bool MemoryDependenceAnalysis::getNonLocalPointerDepFromBB(
Instruction *QueryInst, const PHITransAddr &Pointer,
const MemoryLocation &Loc, bool isLoad, BasicBlock *StartBB,
SmallVectorImpl<NonLocalDepResult> &Result,
DenseMap<BasicBlock *, Value *> &Visited, bool SkipFirstBlock) {
// Look up the cached info for Pointer.
ValueIsLoadPair CacheKey(Pointer.getAddr(), isLoad);
// Set up a temporary NLPI value. If the map doesn't yet have an entry for
// CacheKey, this value will be inserted as the associated value. Otherwise,
// it'll be ignored, and we'll have to check to see if the cached size and
// aa tags are consistent with the current query.
NonLocalPointerInfo InitialNLPI;
InitialNLPI.Size = Loc.Size;
InitialNLPI.AATags = Loc.AATags;
// Get the NLPI for CacheKey, inserting one into the map if it doesn't
// already have one.
std::pair<CachedNonLocalPointerInfo::iterator, bool> Pair =
NonLocalPointerDeps.insert(std::make_pair(CacheKey, InitialNLPI));
NonLocalPointerInfo *CacheInfo = &Pair.first->second;
// If we already have a cache entry for this CacheKey, we may need to do some
// work to reconcile the cache entry and the current query.
if (!Pair.second) {
if (CacheInfo->Size < Loc.Size) {
// The query's Size is greater than the cached one. Throw out the
// cached data and proceed with the query at the greater size.
CacheInfo->Pair = BBSkipFirstBlockPair();
CacheInfo->Size = Loc.Size;
for (NonLocalDepInfo::iterator DI = CacheInfo->NonLocalDeps.begin(),
DE = CacheInfo->NonLocalDeps.end(); DI != DE; ++DI)
if (Instruction *Inst = DI->getResult().getInst())
RemoveFromReverseMap(ReverseNonLocalPtrDeps, Inst, CacheKey);
CacheInfo->NonLocalDeps.clear();
} else if (CacheInfo->Size > Loc.Size) {
// This query's Size is less than the cached one. Conservatively restart
// the query using the greater size.
return getNonLocalPointerDepFromBB(QueryInst, Pointer,
Loc.getWithNewSize(CacheInfo->Size),
isLoad, StartBB, Result, Visited,
SkipFirstBlock);
}
// If the query's AATags are inconsistent with the cached one,
// conservatively throw out the cached data and restart the query with
// no tag if needed.
if (CacheInfo->AATags != Loc.AATags) {
if (CacheInfo->AATags) {
CacheInfo->Pair = BBSkipFirstBlockPair();
CacheInfo->AATags = AAMDNodes();
for (NonLocalDepInfo::iterator DI = CacheInfo->NonLocalDeps.begin(),
DE = CacheInfo->NonLocalDeps.end(); DI != DE; ++DI)
if (Instruction *Inst = DI->getResult().getInst())
RemoveFromReverseMap(ReverseNonLocalPtrDeps, Inst, CacheKey);
CacheInfo->NonLocalDeps.clear();
}
if (Loc.AATags)
return getNonLocalPointerDepFromBB(QueryInst,
Pointer, Loc.getWithoutAATags(),
isLoad, StartBB, Result, Visited,
SkipFirstBlock);
}
}
NonLocalDepInfo *Cache = &CacheInfo->NonLocalDeps;
// If we have valid cached information for exactly the block we are
// investigating, just return it with no recomputation.
if (CacheInfo->Pair == BBSkipFirstBlockPair(StartBB, SkipFirstBlock)) {
// We have a fully cached result for this query then we can just return the
// cached results and populate the visited set. However, we have to verify
// that we don't already have conflicting results for these blocks. Check
// to ensure that if a block in the results set is in the visited set that
// it was for the same pointer query.
if (!Visited.empty()) {
for (NonLocalDepInfo::iterator I = Cache->begin(), E = Cache->end();
I != E; ++I) {
DenseMap<BasicBlock*, Value*>::iterator VI = Visited.find(I->getBB());
if (VI == Visited.end() || VI->second == Pointer.getAddr())
continue;
// We have a pointer mismatch in a block. Just return clobber, saying
// that something was clobbered in this result. We could also do a
// non-fully cached query, but there is little point in doing this.
return true;
}
}
Value *Addr = Pointer.getAddr();
for (NonLocalDepInfo::iterator I = Cache->begin(), E = Cache->end();
I != E; ++I) {
Visited.insert(std::make_pair(I->getBB(), Addr));
if (I->getResult().isNonLocal()) {
continue;
}
if (!DT) {
Result.push_back(NonLocalDepResult(I->getBB(),
MemDepResult::getUnknown(),
Addr));
} else if (DT->isReachableFromEntry(I->getBB())) {
Result.push_back(NonLocalDepResult(I->getBB(), I->getResult(), Addr));
}
}
++NumCacheCompleteNonLocalPtr;
return false;
}
// Otherwise, either this is a new block, a block with an invalid cache
// pointer or one that we're about to invalidate by putting more info into it
// than its valid cache info. If empty, the result will be valid cache info,
// otherwise it isn't.
if (Cache->empty())
CacheInfo->Pair = BBSkipFirstBlockPair(StartBB, SkipFirstBlock);
else
CacheInfo->Pair = BBSkipFirstBlockPair();
SmallVector<BasicBlock*, 32> Worklist;
Worklist.push_back(StartBB);
// PredList used inside loop.
SmallVector<std::pair<BasicBlock*, PHITransAddr>, 16> PredList;
// Keep track of the entries that we know are sorted. Previously cached
// entries will all be sorted. The entries we add we only sort on demand (we
// don't insert every element into its sorted position). We know that we
// won't get any reuse from currently inserted values, because we don't
// revisit blocks after we insert info for them.
unsigned NumSortedEntries = Cache->size();
DEBUG(AssertSorted(*Cache));
while (!Worklist.empty()) {
BasicBlock *BB = Worklist.pop_back_val();
// If we do process a large number of blocks it becomes very expensive and
// likely it isn't worth worrying about
if (Result.size() > NumResultsLimit) {
Worklist.clear();
// Sort it now (if needed) so that recursive invocations of
// getNonLocalPointerDepFromBB and other routines that could reuse the
// cache value will only see properly sorted cache arrays.
if (Cache && NumSortedEntries != Cache->size()) {
SortNonLocalDepInfoCache(*Cache, NumSortedEntries);
}
// Since we bail out, the "Cache" set won't contain all of the
// results for the query. This is ok (we can still use it to accelerate
// specific block queries) but we can't do the fastpath "return all
// results from the set". Clear out the indicator for this.
CacheInfo->Pair = BBSkipFirstBlockPair();
return true;
}
// Skip the first block if we have it.
if (!SkipFirstBlock) {
// Analyze the dependency of *Pointer in FromBB. See if we already have
// been here.
assert(Visited.count(BB) && "Should check 'visited' before adding to WL");
// Get the dependency info for Pointer in BB. If we have cached
// information, we will use it, otherwise we compute it.
DEBUG(AssertSorted(*Cache, NumSortedEntries));
MemDepResult Dep = GetNonLocalInfoForBlock(QueryInst,
Loc, isLoad, BB, Cache,
NumSortedEntries);
// If we got a Def or Clobber, add this to the list of results.
if (!Dep.isNonLocal()) {
if (!DT) {
Result.push_back(NonLocalDepResult(BB,
MemDepResult::getUnknown(),
Pointer.getAddr()));
continue;
} else if (DT->isReachableFromEntry(BB)) {
Result.push_back(NonLocalDepResult(BB, Dep, Pointer.getAddr()));
continue;
}
}
}
// If 'Pointer' is an instruction defined in this block, then we need to do
// phi translation to change it into a value live in the predecessor block.
// If not, we just add the predecessors to the worklist and scan them with
// the same Pointer.
if (!Pointer.NeedsPHITranslationFromBlock(BB)) {
SkipFirstBlock = false;
SmallVector<BasicBlock*, 16> NewBlocks;
for (BasicBlock *Pred : PredCache.get(BB)) {
// Verify that we haven't looked at this block yet.
std::pair<DenseMap<BasicBlock*,Value*>::iterator, bool>
InsertRes = Visited.insert(std::make_pair(Pred, Pointer.getAddr()));
if (InsertRes.second) {
// First time we've looked at *PI.
NewBlocks.push_back(Pred);
continue;
}
// If we have seen this block before, but it was with a different
// pointer then we have a phi translation failure and we have to treat
// this as a clobber.
if (InsertRes.first->second != Pointer.getAddr()) {
// Make sure to clean up the Visited map before continuing on to
// PredTranslationFailure.
for (unsigned i = 0; i < NewBlocks.size(); i++)
Visited.erase(NewBlocks[i]);
goto PredTranslationFailure;
}
}
Worklist.append(NewBlocks.begin(), NewBlocks.end());
continue;
}
// We do need to do phi translation, if we know ahead of time we can't phi
// translate this value, don't even try.
if (!Pointer.IsPotentiallyPHITranslatable())
goto PredTranslationFailure;
// We may have added values to the cache list before this PHI translation.
// If so, we haven't done anything to ensure that the cache remains sorted.
// Sort it now (if needed) so that recursive invocations of
// getNonLocalPointerDepFromBB and other routines that could reuse the cache
// value will only see properly sorted cache arrays.
if (Cache && NumSortedEntries != Cache->size()) {
SortNonLocalDepInfoCache(*Cache, NumSortedEntries);
NumSortedEntries = Cache->size();
}
Cache = nullptr;
PredList.clear();
for (BasicBlock *Pred : PredCache.get(BB)) {
PredList.push_back(std::make_pair(Pred, Pointer));
// Get the PHI translated pointer in this predecessor. This can fail if
// not translatable, in which case the getAddr() returns null.
PHITransAddr &PredPointer = PredList.back().second;
PredPointer.PHITranslateValue(BB, Pred, DT, /*MustDominate=*/false);
Value *PredPtrVal = PredPointer.getAddr();
// Check to see if we have already visited this pred block with another
// pointer. If so, we can't do this lookup. This failure can occur
// with PHI translation when a critical edge exists and the PHI node in
// the successor translates to a pointer value different than the
// pointer the block was first analyzed with.
std::pair<DenseMap<BasicBlock*,Value*>::iterator, bool>
InsertRes = Visited.insert(std::make_pair(Pred, PredPtrVal));
if (!InsertRes.second) {
// We found the pred; take it off the list of preds to visit.
PredList.pop_back();
// If the predecessor was visited with PredPtr, then we already did
// the analysis and can ignore it.
if (InsertRes.first->second == PredPtrVal)
continue;
// Otherwise, the block was previously analyzed with a different
// pointer. We can't represent the result of this case, so we just
// treat this as a phi translation failure.
// Make sure to clean up the Visited map before continuing on to
// PredTranslationFailure.
for (unsigned i = 0, n = PredList.size(); i < n; ++i)
Visited.erase(PredList[i].first);
goto PredTranslationFailure;
}
}
// Actually process results here; this need to be a separate loop to avoid
// calling getNonLocalPointerDepFromBB for blocks we don't want to return
// any results for. (getNonLocalPointerDepFromBB will modify our
// datastructures in ways the code after the PredTranslationFailure label
// doesn't expect.)
for (unsigned i = 0, n = PredList.size(); i < n; ++i) {
BasicBlock *Pred = PredList[i].first;
PHITransAddr &PredPointer = PredList[i].second;
Value *PredPtrVal = PredPointer.getAddr();
bool CanTranslate = true;
// If PHI translation was unable to find an available pointer in this
// predecessor, then we have to assume that the pointer is clobbered in
// that predecessor. We can still do PRE of the load, which would insert
// a computation of the pointer in this predecessor.
if (!PredPtrVal)
CanTranslate = false;
// FIXME: it is entirely possible that PHI translating will end up with
// the same value. Consider PHI translating something like:
// X = phi [x, bb1], [y, bb2]. PHI translating for bb1 doesn't *need*
// to recurse here, pedantically speaking.
// If getNonLocalPointerDepFromBB fails here, that means the cached
// result conflicted with the Visited list; we have to conservatively
// assume it is unknown, but this also does not block PRE of the load.
if (!CanTranslate ||
getNonLocalPointerDepFromBB(QueryInst, PredPointer,
Loc.getWithNewPtr(PredPtrVal),
isLoad, Pred,
Result, Visited)) {
// Add the entry to the Result list.
NonLocalDepResult Entry(Pred, MemDepResult::getUnknown(), PredPtrVal);
Result.push_back(Entry);
// Since we had a phi translation failure, the cache for CacheKey won't
// include all of the entries that we need to immediately satisfy future
// queries. Mark this in NonLocalPointerDeps by setting the
// BBSkipFirstBlockPair pointer to null. This requires reuse of the
// cached value to do more work but not miss the phi trans failure.
NonLocalPointerInfo &NLPI = NonLocalPointerDeps[CacheKey];
NLPI.Pair = BBSkipFirstBlockPair();
continue;
}
}
// Refresh the CacheInfo/Cache pointer so that it isn't invalidated.
CacheInfo = &NonLocalPointerDeps[CacheKey];
Cache = &CacheInfo->NonLocalDeps;
NumSortedEntries = Cache->size();
// Since we did phi translation, the "Cache" set won't contain all of the
// results for the query. This is ok (we can still use it to accelerate
// specific block queries) but we can't do the fastpath "return all
// results from the set" Clear out the indicator for this.
CacheInfo->Pair = BBSkipFirstBlockPair();
SkipFirstBlock = false;
continue;
PredTranslationFailure:
// The following code is "failure"; we can't produce a sane translation
// for the given block. It assumes that we haven't modified any of
// our datastructures while processing the current block.
if (!Cache) {
// Refresh the CacheInfo/Cache pointer if it got invalidated.
CacheInfo = &NonLocalPointerDeps[CacheKey];
Cache = &CacheInfo->NonLocalDeps;
NumSortedEntries = Cache->size();
}
// Since we failed phi translation, the "Cache" set won't contain all of the
// results for the query. This is ok (we can still use it to accelerate
// specific block queries) but we can't do the fastpath "return all
// results from the set". Clear out the indicator for this.
CacheInfo->Pair = BBSkipFirstBlockPair();
// If *nothing* works, mark the pointer as unknown.
//
// If this is the magic first block, return this as a clobber of the whole
// incoming value. Since we can't phi translate to one of the predecessors,
// we have to bail out.
if (SkipFirstBlock)
return true;
for (NonLocalDepInfo::reverse_iterator I = Cache->rbegin(); ; ++I) {
assert(I != Cache->rend() && "Didn't find current block??");
if (I->getBB() != BB)
continue;
assert((I->getResult().isNonLocal() || !DT->isReachableFromEntry(BB)) &&
"Should only be here with transparent block");
I->setResult(MemDepResult::getUnknown());
Result.push_back(NonLocalDepResult(I->getBB(), I->getResult(),
Pointer.getAddr()));
break;
}
}
// Okay, we're done now. If we added new values to the cache, re-sort it.
SortNonLocalDepInfoCache(*Cache, NumSortedEntries);
DEBUG(AssertSorted(*Cache));
return false;
}
/// RemoveCachedNonLocalPointerDependencies - If P exists in
/// CachedNonLocalPointerInfo, remove it.
void MemoryDependenceAnalysis::
RemoveCachedNonLocalPointerDependencies(ValueIsLoadPair P) {
CachedNonLocalPointerInfo::iterator It =
NonLocalPointerDeps.find(P);
if (It == NonLocalPointerDeps.end()) return;
// Remove all of the entries in the BB->val map. This involves removing
// instructions from the reverse map.
NonLocalDepInfo &PInfo = It->second.NonLocalDeps;
for (unsigned i = 0, e = PInfo.size(); i != e; ++i) {
Instruction *Target = PInfo[i].getResult().getInst();
if (!Target) continue; // Ignore non-local dep results.
assert(Target->getParent() == PInfo[i].getBB());
// Eliminating the dirty entry from 'Cache', so update the reverse info.
RemoveFromReverseMap(ReverseNonLocalPtrDeps, Target, P);
}
// Remove P from NonLocalPointerDeps (which deletes NonLocalDepInfo).
NonLocalPointerDeps.erase(It);
}
/// invalidateCachedPointerInfo - This method is used to invalidate cached
/// information about the specified pointer, because it may be too
/// conservative in memdep. This is an optional call that can be used when
/// the client detects an equivalence between the pointer and some other
/// value and replaces the other value with ptr. This can make Ptr available
/// in more places that cached info does not necessarily keep.
void MemoryDependenceAnalysis::invalidateCachedPointerInfo(Value *Ptr) {
// If Ptr isn't really a pointer, just ignore it.
if (!Ptr->getType()->isPointerTy()) return;
// Flush store info for the pointer.
RemoveCachedNonLocalPointerDependencies(ValueIsLoadPair(Ptr, false));
// Flush load info for the pointer.
RemoveCachedNonLocalPointerDependencies(ValueIsLoadPair(Ptr, true));
}
/// invalidateCachedPredecessors - Clear the PredIteratorCache info.
/// This needs to be done when the CFG changes, e.g., due to splitting
/// critical edges.
void MemoryDependenceAnalysis::invalidateCachedPredecessors() {
PredCache.clear();
}
/// removeInstruction - Remove an instruction from the dependence analysis,
/// updating the dependence of instructions that previously depended on it.
/// This method attempts to keep the cache coherent using the reverse map.
void MemoryDependenceAnalysis::removeInstruction(Instruction *RemInst) {
// Walk through the Non-local dependencies, removing this one as the value
// for any cached queries.
NonLocalDepMapType::iterator NLDI = NonLocalDeps.find(RemInst);
if (NLDI != NonLocalDeps.end()) {
NonLocalDepInfo &BlockMap = NLDI->second.first;
for (NonLocalDepInfo::iterator DI = BlockMap.begin(), DE = BlockMap.end();
DI != DE; ++DI)
if (Instruction *Inst = DI->getResult().getInst())
RemoveFromReverseMap(ReverseNonLocalDeps, Inst, RemInst);
NonLocalDeps.erase(NLDI);
}
// If we have a cached local dependence query for this instruction, remove it.
//
LocalDepMapType::iterator LocalDepEntry = LocalDeps.find(RemInst);
if (LocalDepEntry != LocalDeps.end()) {
// Remove us from DepInst's reverse set now that the local dep info is gone.
if (Instruction *Inst = LocalDepEntry->second.getInst())
RemoveFromReverseMap(ReverseLocalDeps, Inst, RemInst);
// Remove this local dependency info.
LocalDeps.erase(LocalDepEntry);
}
// If we have any cached pointer dependencies on this instruction, remove
// them. If the instruction has non-pointer type, then it can't be a pointer
// base.
// Remove it from both the load info and the store info. The instruction
// can't be in either of these maps if it is non-pointer.
if (RemInst->getType()->isPointerTy()) {
RemoveCachedNonLocalPointerDependencies(ValueIsLoadPair(RemInst, false));
RemoveCachedNonLocalPointerDependencies(ValueIsLoadPair(RemInst, true));
}
// Loop over all of the things that depend on the instruction we're removing.
//
SmallVector<std::pair<Instruction*, Instruction*>, 8> ReverseDepsToAdd;
// If we find RemInst as a clobber or Def in any of the maps for other values,
// we need to replace its entry with a dirty version of the instruction after
// it. If RemInst is a terminator, we use a null dirty value.
//
// Using a dirty version of the instruction after RemInst saves having to scan
// the entire block to get to this point.
MemDepResult NewDirtyVal;
if (!RemInst->isTerminator())
NewDirtyVal = MemDepResult::getDirty(++BasicBlock::iterator(RemInst));
ReverseDepMapType::iterator ReverseDepIt = ReverseLocalDeps.find(RemInst);
if (ReverseDepIt != ReverseLocalDeps.end()) {
// RemInst can't be the terminator if it has local stuff depending on it.
assert(!ReverseDepIt->second.empty() && !isa<TerminatorInst>(RemInst) &&
"Nothing can locally depend on a terminator");
for (Instruction *InstDependingOnRemInst : ReverseDepIt->second) {
assert(InstDependingOnRemInst != RemInst &&
"Already removed our local dep info");
LocalDeps[InstDependingOnRemInst] = NewDirtyVal;
// Make sure to remember that new things depend on NewDepInst.
assert(NewDirtyVal.getInst() && "There is no way something else can have "
"a local dep on this if it is a terminator!");
ReverseDepsToAdd.push_back(std::make_pair(NewDirtyVal.getInst(),
InstDependingOnRemInst));
}
ReverseLocalDeps.erase(ReverseDepIt);
// Add new reverse deps after scanning the set, to avoid invalidating the
// 'ReverseDeps' reference.
while (!ReverseDepsToAdd.empty()) {
ReverseLocalDeps[ReverseDepsToAdd.back().first]
.insert(ReverseDepsToAdd.back().second);
ReverseDepsToAdd.pop_back();
}
}
ReverseDepIt = ReverseNonLocalDeps.find(RemInst);
if (ReverseDepIt != ReverseNonLocalDeps.end()) {
for (Instruction *I : ReverseDepIt->second) {
assert(I != RemInst && "Already removed NonLocalDep info for RemInst");
PerInstNLInfo &INLD = NonLocalDeps[I];
// The information is now dirty!
INLD.second = true;
for (NonLocalDepInfo::iterator DI = INLD.first.begin(),
DE = INLD.first.end(); DI != DE; ++DI) {
if (DI->getResult().getInst() != RemInst) continue;
// Convert to a dirty entry for the subsequent instruction.
DI->setResult(NewDirtyVal);
if (Instruction *NextI = NewDirtyVal.getInst())
ReverseDepsToAdd.push_back(std::make_pair(NextI, I));
}
}
ReverseNonLocalDeps.erase(ReverseDepIt);
// Add new reverse deps after scanning the set, to avoid invalidating 'Set'
while (!ReverseDepsToAdd.empty()) {
ReverseNonLocalDeps[ReverseDepsToAdd.back().first]
.insert(ReverseDepsToAdd.back().second);
ReverseDepsToAdd.pop_back();
}
}
// If the instruction is in ReverseNonLocalPtrDeps then it appears as a
// value in the NonLocalPointerDeps info.
ReverseNonLocalPtrDepTy::iterator ReversePtrDepIt =
ReverseNonLocalPtrDeps.find(RemInst);
if (ReversePtrDepIt != ReverseNonLocalPtrDeps.end()) {
SmallVector<std::pair<Instruction*, ValueIsLoadPair>,8> ReversePtrDepsToAdd;
for (ValueIsLoadPair P : ReversePtrDepIt->second) {
assert(P.getPointer() != RemInst &&
"Already removed NonLocalPointerDeps info for RemInst");
NonLocalDepInfo &NLPDI = NonLocalPointerDeps[P].NonLocalDeps;
// The cache is not valid for any specific block anymore.
NonLocalPointerDeps[P].Pair = BBSkipFirstBlockPair();
// Update any entries for RemInst to use the instruction after it.
for (NonLocalDepInfo::iterator DI = NLPDI.begin(), DE = NLPDI.end();
DI != DE; ++DI) {
if (DI->getResult().getInst() != RemInst) continue;
// Convert to a dirty entry for the subsequent instruction.
DI->setResult(NewDirtyVal);
if (Instruction *NewDirtyInst = NewDirtyVal.getInst())
ReversePtrDepsToAdd.push_back(std::make_pair(NewDirtyInst, P));
}
// Re-sort the NonLocalDepInfo. Changing the dirty entry to its
// subsequent value may invalidate the sortedness.
std::sort(NLPDI.begin(), NLPDI.end());
}
ReverseNonLocalPtrDeps.erase(ReversePtrDepIt);
while (!ReversePtrDepsToAdd.empty()) {
ReverseNonLocalPtrDeps[ReversePtrDepsToAdd.back().first]
.insert(ReversePtrDepsToAdd.back().second);
ReversePtrDepsToAdd.pop_back();
}
}
assert(!NonLocalDeps.count(RemInst) && "RemInst got reinserted?");
AA->deleteValue(RemInst);
DEBUG(verifyRemoved(RemInst));
}
/// verifyRemoved - Verify that the specified instruction does not occur
/// in our internal data structures. This function verifies by asserting in
/// debug builds.
void MemoryDependenceAnalysis::verifyRemoved(Instruction *D) const {
#ifndef NDEBUG
for (LocalDepMapType::const_iterator I = LocalDeps.begin(),
E = LocalDeps.end(); I != E; ++I) {
assert(I->first != D && "Inst occurs in data structures");
assert(I->second.getInst() != D &&
"Inst occurs in data structures");
}
for (CachedNonLocalPointerInfo::const_iterator I =NonLocalPointerDeps.begin(),
E = NonLocalPointerDeps.end(); I != E; ++I) {
assert(I->first.getPointer() != D && "Inst occurs in NLPD map key");
const NonLocalDepInfo &Val = I->second.NonLocalDeps;
for (NonLocalDepInfo::const_iterator II = Val.begin(), E = Val.end();
II != E; ++II)
assert(II->getResult().getInst() != D && "Inst occurs as NLPD value");
}
for (NonLocalDepMapType::const_iterator I = NonLocalDeps.begin(),
E = NonLocalDeps.end(); I != E; ++I) {
assert(I->first != D && "Inst occurs in data structures");
const PerInstNLInfo &INLD = I->second;
for (NonLocalDepInfo::const_iterator II = INLD.first.begin(),
EE = INLD.first.end(); II != EE; ++II)
assert(II->getResult().getInst() != D && "Inst occurs in data structures");
}
for (ReverseDepMapType::const_iterator I = ReverseLocalDeps.begin(),
E = ReverseLocalDeps.end(); I != E; ++I) {
assert(I->first != D && "Inst occurs in data structures");
for (Instruction *Inst : I->second)
assert(Inst != D && "Inst occurs in data structures");
}
for (ReverseDepMapType::const_iterator I = ReverseNonLocalDeps.begin(),
E = ReverseNonLocalDeps.end();
I != E; ++I) {
assert(I->first != D && "Inst occurs in data structures");
for (Instruction *Inst : I->second)
assert(Inst != D && "Inst occurs in data structures");
}
for (ReverseNonLocalPtrDepTy::const_iterator
I = ReverseNonLocalPtrDeps.begin(),
E = ReverseNonLocalPtrDeps.end(); I != E; ++I) {
assert(I->first != D && "Inst occurs in rev NLPD map");
for (ValueIsLoadPair P : I->second)
assert(P != ValueIsLoadPair(D, false) &&
P != ValueIsLoadPair(D, true) &&
"Inst occurs in ReverseNonLocalPtrDeps map");
}
#endif
}
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/Analysis/regionprinter.cpp | //===- RegionPrinter.cpp - Print regions tree pass ------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
// Print out the region tree of a function using dotty/graphviz.
//===----------------------------------------------------------------------===//
#include "llvm/Analysis/RegionPrinter.h"
#include "llvm/ADT/DepthFirstIterator.h"
#include "llvm/ADT/PostOrderIterator.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/DOTGraphTraitsPass.h"
#include "llvm/Analysis/Passes.h"
#include "llvm/Analysis/RegionInfo.h"
#include "llvm/Analysis/RegionIterator.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
//===----------------------------------------------------------------------===//
/// onlySimpleRegion - Show only the simple regions in the RegionViewer.
static cl::opt<bool> onlySimpleRegions(
"only-simple-regions",
cl::desc("Show only simple regions in the graphviz viewer"), cl::Hidden,
cl::init(false));
namespace llvm {
template <> struct DOTGraphTraits<RegionNode *> : public DefaultDOTGraphTraits {
DOTGraphTraits(bool isSimple = false) : DefaultDOTGraphTraits(isSimple) {}
std::string getNodeLabel(RegionNode *Node, RegionNode *Graph) {
if (!Node->isSubRegion()) {
BasicBlock *BB = Node->getNodeAs<BasicBlock>();
if (isSimple())
return DOTGraphTraits<const Function *>::getSimpleNodeLabel(
BB, BB->getParent());
else
return DOTGraphTraits<const Function *>::getCompleteNodeLabel(
BB, BB->getParent());
}
return "Not implemented";
}
};
template <>
struct DOTGraphTraits<RegionInfoPass *> : public DOTGraphTraits<RegionNode *> {
DOTGraphTraits(bool isSimple = false)
: DOTGraphTraits<RegionNode *>(isSimple) {}
static std::string getGraphName(RegionInfoPass *DT) { return "Region Graph"; }
std::string getNodeLabel(RegionNode *Node, RegionInfoPass *G) {
RegionInfo &RI = G->getRegionInfo();
return DOTGraphTraits<RegionNode *>::getNodeLabel(
Node, reinterpret_cast<RegionNode *>(RI.getTopLevelRegion()));
}
std::string getEdgeAttributes(RegionNode *srcNode,
GraphTraits<RegionInfo *>::ChildIteratorType CI,
RegionInfoPass *G) {
RegionInfo &RI = G->getRegionInfo();
RegionNode *destNode = *CI;
if (srcNode->isSubRegion() || destNode->isSubRegion())
return "";
// In case of a backedge, do not use it to define the layout of the nodes.
BasicBlock *srcBB = srcNode->getNodeAs<BasicBlock>();
BasicBlock *destBB = destNode->getNodeAs<BasicBlock>();
Region *R = RI.getRegionFor(destBB);
while (R && R->getParent())
if (R->getParent()->getEntry() == destBB)
R = R->getParent();
else
break;
if (R && R->getEntry() == destBB &&
R->contains(srcBB)) // HLSL Change - add null check
return "constraint=false";
return "";
}
// Print the cluster of the subregions. This groups the single basic blocks
// and adds a different background color for each group.
static void printRegionCluster(const Region &R,
GraphWriter<RegionInfoPass *> &GW,
unsigned depth = 0) {
raw_ostream &O = GW.getOStream();
O.indent(2 * depth) << "subgraph cluster_" << static_cast<const void *>(&R)
<< " {\n";
O.indent(2 * (depth + 1)) << "label = \"\";\n";
if (!onlySimpleRegions || R.isSimple()) {
O.indent(2 * (depth + 1)) << "style = filled;\n";
O.indent(2 * (depth + 1))
<< "color = " << ((R.getDepth() * 2 % 12) + 1) << "\n";
} else {
O.indent(2 * (depth + 1)) << "style = solid;\n";
O.indent(2 * (depth + 1))
<< "color = " << ((R.getDepth() * 2 % 12) + 2) << "\n";
}
for (Region::const_iterator RI = R.begin(), RE = R.end(); RI != RE; ++RI)
printRegionCluster(**RI, GW, depth + 1);
const RegionInfo &RI = *static_cast<const RegionInfo *>(R.getRegionInfo());
for (auto *BB : R.blocks())
if (RI.getRegionFor(BB) == &R)
O.indent(2 * (depth + 1))
<< "Node"
<< static_cast<const void *>(RI.getTopLevelRegion()->getBBNode(BB))
<< ";\n";
O.indent(2 * depth) << "}\n";
}
static void addCustomGraphFeatures(const RegionInfoPass *RIP,
GraphWriter<RegionInfoPass *> &GW) {
const RegionInfo &RI = RIP->getRegionInfo();
raw_ostream &O = GW.getOStream();
O << "\tcolorscheme = \"paired12\"\n";
printRegionCluster(*RI.getTopLevelRegion(), GW, 4);
}
};
} // end namespace llvm
namespace {
struct RegionViewer : public DOTGraphTraitsViewer<RegionInfoPass, false> {
static char ID;
RegionViewer() : DOTGraphTraitsViewer<RegionInfoPass, false>("reg", ID) {
initializeRegionViewerPass(*PassRegistry::getPassRegistry());
}
};
char RegionViewer::ID = 0;
struct RegionOnlyViewer : public DOTGraphTraitsViewer<RegionInfoPass, true> {
static char ID;
RegionOnlyViewer()
: DOTGraphTraitsViewer<RegionInfoPass, true>("regonly", ID) {
initializeRegionOnlyViewerPass(*PassRegistry::getPassRegistry());
}
};
char RegionOnlyViewer::ID = 0;
struct RegionPrinter : public DOTGraphTraitsPrinter<RegionInfoPass, false> {
static char ID;
RegionPrinter() : DOTGraphTraitsPrinter<RegionInfoPass, false>("reg", ID) {
initializeRegionPrinterPass(*PassRegistry::getPassRegistry());
}
};
char RegionPrinter::ID = 0;
} // end anonymous namespace
INITIALIZE_PASS(RegionPrinter, "dot-regions",
"Print regions of function to 'dot' file", true, true)
INITIALIZE_PASS(RegionViewer, "view-regions", "View regions of function", true,
true)
INITIALIZE_PASS(RegionOnlyViewer, "view-regions-only",
"View regions of function (with no function bodies)", true,
true)
namespace {
struct RegionOnlyPrinter : public DOTGraphTraitsPrinter<RegionInfoPass, true> {
static char ID;
RegionOnlyPrinter() : DOTGraphTraitsPrinter<RegionInfoPass, true>("reg", ID) {
initializeRegionOnlyPrinterPass(*PassRegistry::getPassRegistry());
}
};
} // namespace
char RegionOnlyPrinter::ID = 0;
INITIALIZE_PASS(RegionOnlyPrinter, "dot-regions-only",
"Print regions of function to 'dot' file "
"(with no function bodies)",
true, true)
FunctionPass *llvm::createRegionViewerPass() { return new RegionViewer(); }
FunctionPass *llvm::createRegionOnlyViewerPass() {
return new RegionOnlyViewer();
}
FunctionPass *llvm::createRegionPrinterPass() { return new RegionPrinter(); }
FunctionPass *llvm::createRegionOnlyPrinterPass() {
return new RegionOnlyPrinter();
}
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/Analysis/BranchProbabilityInfo.cpp | //===-- BranchProbabilityInfo.cpp - Branch Probability Analysis -----------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// Loops should be simplified before this analysis.
//
//===----------------------------------------------------------------------===//
#include "llvm/Analysis/BranchProbabilityInfo.h"
#include "llvm/ADT/PostOrderIterator.h"
#include "llvm/Analysis/LoopInfo.h"
#include "llvm/IR/CFG.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Metadata.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
#define DEBUG_TYPE "branch-prob"
INITIALIZE_PASS_BEGIN(BranchProbabilityInfo, "branch-prob",
"Branch Probability Analysis", false, true)
INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
INITIALIZE_PASS_END(BranchProbabilityInfo, "branch-prob",
"Branch Probability Analysis", false, true)
char BranchProbabilityInfo::ID = 0;
// Weights are for internal use only. They are used by heuristics to help to
// estimate edges' probability. Example:
//
// Using "Loop Branch Heuristics" we predict weights of edges for the
// block BB2.
// ...
// |
// V
// BB1<-+
// | |
// | | (Weight = 124)
// V |
// BB2--+
// |
// | (Weight = 4)
// V
// BB3
//
// Probability of the edge BB2->BB1 = 124 / (124 + 4) = 0.96875
// Probability of the edge BB2->BB3 = 4 / (124 + 4) = 0.03125
static const uint32_t LBH_TAKEN_WEIGHT = 124;
static const uint32_t LBH_NONTAKEN_WEIGHT = 4;
/// \brief Unreachable-terminating branch taken weight.
///
/// This is the weight for a branch being taken to a block that terminates
/// (eventually) in unreachable. These are predicted as unlikely as possible.
static const uint32_t UR_TAKEN_WEIGHT = 1;
/// \brief Unreachable-terminating branch not-taken weight.
///
/// This is the weight for a branch not being taken toward a block that
/// terminates (eventually) in unreachable. Such a branch is essentially never
/// taken. Set the weight to an absurdly high value so that nested loops don't
/// easily subsume it.
static const uint32_t UR_NONTAKEN_WEIGHT = 1024*1024 - 1;
/// \brief Weight for a branch taken going into a cold block.
///
/// This is the weight for a branch taken toward a block marked
/// cold. A block is marked cold if it's postdominated by a
/// block containing a call to a cold function. Cold functions
/// are those marked with attribute 'cold'.
static const uint32_t CC_TAKEN_WEIGHT = 4;
/// \brief Weight for a branch not-taken into a cold block.
///
/// This is the weight for a branch not taken toward a block marked
/// cold.
static const uint32_t CC_NONTAKEN_WEIGHT = 64;
static const uint32_t PH_TAKEN_WEIGHT = 20;
static const uint32_t PH_NONTAKEN_WEIGHT = 12;
static const uint32_t ZH_TAKEN_WEIGHT = 20;
static const uint32_t ZH_NONTAKEN_WEIGHT = 12;
static const uint32_t FPH_TAKEN_WEIGHT = 20;
static const uint32_t FPH_NONTAKEN_WEIGHT = 12;
/// \brief Invoke-terminating normal branch taken weight
///
/// This is the weight for branching to the normal destination of an invoke
/// instruction. We expect this to happen most of the time. Set the weight to an
/// absurdly high value so that nested loops subsume it.
static const uint32_t IH_TAKEN_WEIGHT = 1024 * 1024 - 1;
/// \brief Invoke-terminating normal branch not-taken weight.
///
/// This is the weight for branching to the unwind destination of an invoke
/// instruction. This is essentially never taken.
static const uint32_t IH_NONTAKEN_WEIGHT = 1;
// Standard weight value. Used when none of the heuristics set weight for
// the edge.
static const uint32_t NORMAL_WEIGHT = 16;
// Minimum weight of an edge. Please note, that weight is NEVER 0.
static const uint32_t MIN_WEIGHT = 1;
/// \brief Calculate edge weights for successors lead to unreachable.
///
/// Predict that a successor which leads necessarily to an
/// unreachable-terminated block as extremely unlikely.
bool BranchProbabilityInfo::calcUnreachableHeuristics(BasicBlock *BB) {
TerminatorInst *TI = BB->getTerminator();
if (TI->getNumSuccessors() == 0) {
if (isa<UnreachableInst>(TI))
PostDominatedByUnreachable.insert(BB);
return false;
}
SmallVector<unsigned, 4> UnreachableEdges;
SmallVector<unsigned, 4> ReachableEdges;
for (succ_iterator I = succ_begin(BB), E = succ_end(BB); I != E; ++I) {
if (PostDominatedByUnreachable.count(*I))
UnreachableEdges.push_back(I.getSuccessorIndex());
else
ReachableEdges.push_back(I.getSuccessorIndex());
}
// If all successors are in the set of blocks post-dominated by unreachable,
// this block is too.
if (UnreachableEdges.size() == TI->getNumSuccessors())
PostDominatedByUnreachable.insert(BB);
// Skip probabilities if this block has a single successor or if all were
// reachable.
if (TI->getNumSuccessors() == 1 || UnreachableEdges.empty())
return false;
uint32_t UnreachableWeight =
std::max(UR_TAKEN_WEIGHT / (unsigned)UnreachableEdges.size(), MIN_WEIGHT);
for (SmallVectorImpl<unsigned>::iterator I = UnreachableEdges.begin(),
E = UnreachableEdges.end();
I != E; ++I)
setEdgeWeight(BB, *I, UnreachableWeight);
if (ReachableEdges.empty())
return true;
uint32_t ReachableWeight =
std::max(UR_NONTAKEN_WEIGHT / (unsigned)ReachableEdges.size(),
NORMAL_WEIGHT);
for (SmallVectorImpl<unsigned>::iterator I = ReachableEdges.begin(),
E = ReachableEdges.end();
I != E; ++I)
setEdgeWeight(BB, *I, ReachableWeight);
return true;
}
// Propagate existing explicit probabilities from either profile data or
// 'expect' intrinsic processing.
bool BranchProbabilityInfo::calcMetadataWeights(BasicBlock *BB) {
TerminatorInst *TI = BB->getTerminator();
if (TI->getNumSuccessors() == 1)
return false;
if (!isa<BranchInst>(TI) && !isa<SwitchInst>(TI))
return false;
MDNode *WeightsNode = TI->getMetadata(LLVMContext::MD_prof);
if (!WeightsNode)
return false;
// Check that the number of successors is manageable.
assert(TI->getNumSuccessors() < UINT32_MAX && "Too many successors");
// Ensure there are weights for all of the successors. Note that the first
// operand to the metadata node is a name, not a weight.
if (WeightsNode->getNumOperands() != TI->getNumSuccessors() + 1)
return false;
// Build up the final weights that will be used in a temporary buffer.
// Compute the sum of all weights to later decide whether they need to
// be scaled to fit in 32 bits.
uint64_t WeightSum = 0;
SmallVector<uint32_t, 2> Weights;
Weights.reserve(TI->getNumSuccessors());
for (unsigned i = 1, e = WeightsNode->getNumOperands(); i != e; ++i) {
ConstantInt *Weight =
mdconst::dyn_extract<ConstantInt>(WeightsNode->getOperand(i));
if (!Weight)
return false;
assert(Weight->getValue().getActiveBits() <= 32 &&
"Too many bits for uint32_t");
Weights.push_back(Weight->getZExtValue());
WeightSum += Weights.back();
}
assert(Weights.size() == TI->getNumSuccessors() && "Checked above");
// If the sum of weights does not fit in 32 bits, scale every weight down
// accordingly.
uint64_t ScalingFactor =
(WeightSum > UINT32_MAX) ? WeightSum / UINT32_MAX + 1 : 1;
WeightSum = 0;
for (unsigned i = 0, e = TI->getNumSuccessors(); i != e; ++i) {
uint32_t W = Weights[i] / ScalingFactor;
WeightSum += W;
setEdgeWeight(BB, i, W);
}
assert(WeightSum <= UINT32_MAX &&
"Expected weights to scale down to 32 bits");
return true;
}
/// \brief Calculate edge weights for edges leading to cold blocks.
///
/// A cold block is one post-dominated by a block with a call to a
/// cold function. Those edges are unlikely to be taken, so we give
/// them relatively low weight.
///
/// Return true if we could compute the weights for cold edges.
/// Return false, otherwise.
bool BranchProbabilityInfo::calcColdCallHeuristics(BasicBlock *BB) {
TerminatorInst *TI = BB->getTerminator();
if (TI->getNumSuccessors() == 0)
return false;
// Determine which successors are post-dominated by a cold block.
SmallVector<unsigned, 4> ColdEdges;
SmallVector<unsigned, 4> NormalEdges;
for (succ_iterator I = succ_begin(BB), E = succ_end(BB); I != E; ++I)
if (PostDominatedByColdCall.count(*I))
ColdEdges.push_back(I.getSuccessorIndex());
else
NormalEdges.push_back(I.getSuccessorIndex());
// If all successors are in the set of blocks post-dominated by cold calls,
// this block is in the set post-dominated by cold calls.
if (ColdEdges.size() == TI->getNumSuccessors())
PostDominatedByColdCall.insert(BB);
else {
// Otherwise, if the block itself contains a cold function, add it to the
// set of blocks postdominated by a cold call.
assert(!PostDominatedByColdCall.count(BB));
for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I)
if (CallInst *CI = dyn_cast<CallInst>(I))
if (CI->hasFnAttr(Attribute::Cold)) {
PostDominatedByColdCall.insert(BB);
break;
}
}
// Skip probabilities if this block has a single successor.
if (TI->getNumSuccessors() == 1 || ColdEdges.empty())
return false;
uint32_t ColdWeight =
std::max(CC_TAKEN_WEIGHT / (unsigned) ColdEdges.size(), MIN_WEIGHT);
for (SmallVectorImpl<unsigned>::iterator I = ColdEdges.begin(),
E = ColdEdges.end();
I != E; ++I)
setEdgeWeight(BB, *I, ColdWeight);
if (NormalEdges.empty())
return true;
uint32_t NormalWeight = std::max(
CC_NONTAKEN_WEIGHT / (unsigned) NormalEdges.size(), NORMAL_WEIGHT);
for (SmallVectorImpl<unsigned>::iterator I = NormalEdges.begin(),
E = NormalEdges.end();
I != E; ++I)
setEdgeWeight(BB, *I, NormalWeight);
return true;
}
// Calculate Edge Weights using "Pointer Heuristics". Predict a comparsion
// between two pointer or pointer and NULL will fail.
bool BranchProbabilityInfo::calcPointerHeuristics(BasicBlock *BB) {
BranchInst * BI = dyn_cast<BranchInst>(BB->getTerminator());
if (!BI || !BI->isConditional())
return false;
Value *Cond = BI->getCondition();
ICmpInst *CI = dyn_cast<ICmpInst>(Cond);
if (!CI || !CI->isEquality())
return false;
Value *LHS = CI->getOperand(0);
if (!LHS->getType()->isPointerTy())
return false;
assert(CI->getOperand(1)->getType()->isPointerTy());
// p != 0 -> isProb = true
// p == 0 -> isProb = false
// p != q -> isProb = true
// p == q -> isProb = false;
unsigned TakenIdx = 0, NonTakenIdx = 1;
bool isProb = CI->getPredicate() == ICmpInst::ICMP_NE;
if (!isProb)
std::swap(TakenIdx, NonTakenIdx);
setEdgeWeight(BB, TakenIdx, PH_TAKEN_WEIGHT);
setEdgeWeight(BB, NonTakenIdx, PH_NONTAKEN_WEIGHT);
return true;
}
// Calculate Edge Weights using "Loop Branch Heuristics". Predict backedges
// as taken, exiting edges as not-taken.
bool BranchProbabilityInfo::calcLoopBranchHeuristics(BasicBlock *BB) {
Loop *L = LI->getLoopFor(BB);
if (!L)
return false;
SmallVector<unsigned, 8> BackEdges;
SmallVector<unsigned, 8> ExitingEdges;
SmallVector<unsigned, 8> InEdges; // Edges from header to the loop.
for (succ_iterator I = succ_begin(BB), E = succ_end(BB); I != E; ++I) {
if (!L->contains(*I))
ExitingEdges.push_back(I.getSuccessorIndex());
else if (L->getHeader() == *I)
BackEdges.push_back(I.getSuccessorIndex());
else
InEdges.push_back(I.getSuccessorIndex());
}
if (BackEdges.empty() && ExitingEdges.empty())
return false;
if (uint32_t numBackEdges = BackEdges.size()) {
uint32_t backWeight = LBH_TAKEN_WEIGHT / numBackEdges;
if (backWeight < NORMAL_WEIGHT)
backWeight = NORMAL_WEIGHT;
for (SmallVectorImpl<unsigned>::iterator EI = BackEdges.begin(),
EE = BackEdges.end(); EI != EE; ++EI) {
setEdgeWeight(BB, *EI, backWeight);
}
}
if (uint32_t numInEdges = InEdges.size()) {
uint32_t inWeight = LBH_TAKEN_WEIGHT / numInEdges;
if (inWeight < NORMAL_WEIGHT)
inWeight = NORMAL_WEIGHT;
for (SmallVectorImpl<unsigned>::iterator EI = InEdges.begin(),
EE = InEdges.end(); EI != EE; ++EI) {
setEdgeWeight(BB, *EI, inWeight);
}
}
if (uint32_t numExitingEdges = ExitingEdges.size()) {
uint32_t exitWeight = LBH_NONTAKEN_WEIGHT / numExitingEdges;
if (exitWeight < MIN_WEIGHT)
exitWeight = MIN_WEIGHT;
for (SmallVectorImpl<unsigned>::iterator EI = ExitingEdges.begin(),
EE = ExitingEdges.end(); EI != EE; ++EI) {
setEdgeWeight(BB, *EI, exitWeight);
}
}
return true;
}
bool BranchProbabilityInfo::calcZeroHeuristics(BasicBlock *BB) {
BranchInst * BI = dyn_cast<BranchInst>(BB->getTerminator());
if (!BI || !BI->isConditional())
return false;
Value *Cond = BI->getCondition();
ICmpInst *CI = dyn_cast<ICmpInst>(Cond);
if (!CI)
return false;
Value *RHS = CI->getOperand(1);
ConstantInt *CV = dyn_cast<ConstantInt>(RHS);
if (!CV)
return false;
// If the LHS is the result of AND'ing a value with a single bit bitmask,
// we don't have information about probabilities.
if (Instruction *LHS = dyn_cast<Instruction>(CI->getOperand(0)))
if (LHS->getOpcode() == Instruction::And)
if (ConstantInt *AndRHS = dyn_cast<ConstantInt>(LHS->getOperand(1)))
if (AndRHS->getUniqueInteger().isPowerOf2())
return false;
bool isProb;
if (CV->isZero()) {
switch (CI->getPredicate()) {
case CmpInst::ICMP_EQ:
// X == 0 -> Unlikely
isProb = false;
break;
case CmpInst::ICMP_NE:
// X != 0 -> Likely
isProb = true;
break;
case CmpInst::ICMP_SLT:
// X < 0 -> Unlikely
isProb = false;
break;
case CmpInst::ICMP_SGT:
// X > 0 -> Likely
isProb = true;
break;
default:
return false;
}
} else if (CV->isOne() && CI->getPredicate() == CmpInst::ICMP_SLT) {
// InstCombine canonicalizes X <= 0 into X < 1.
// X <= 0 -> Unlikely
isProb = false;
} else if (CV->isAllOnesValue()) {
switch (CI->getPredicate()) {
case CmpInst::ICMP_EQ:
// X == -1 -> Unlikely
isProb = false;
break;
case CmpInst::ICMP_NE:
// X != -1 -> Likely
isProb = true;
break;
case CmpInst::ICMP_SGT:
// InstCombine canonicalizes X >= 0 into X > -1.
// X >= 0 -> Likely
isProb = true;
break;
default:
return false;
}
} else {
return false;
}
unsigned TakenIdx = 0, NonTakenIdx = 1;
if (!isProb)
std::swap(TakenIdx, NonTakenIdx);
setEdgeWeight(BB, TakenIdx, ZH_TAKEN_WEIGHT);
setEdgeWeight(BB, NonTakenIdx, ZH_NONTAKEN_WEIGHT);
return true;
}
bool BranchProbabilityInfo::calcFloatingPointHeuristics(BasicBlock *BB) {
BranchInst *BI = dyn_cast<BranchInst>(BB->getTerminator());
if (!BI || !BI->isConditional())
return false;
Value *Cond = BI->getCondition();
FCmpInst *FCmp = dyn_cast<FCmpInst>(Cond);
if (!FCmp)
return false;
bool isProb;
if (FCmp->isEquality()) {
// f1 == f2 -> Unlikely
// f1 != f2 -> Likely
isProb = !FCmp->isTrueWhenEqual();
} else if (FCmp->getPredicate() == FCmpInst::FCMP_ORD) {
// !isnan -> Likely
isProb = true;
} else if (FCmp->getPredicate() == FCmpInst::FCMP_UNO) {
// isnan -> Unlikely
isProb = false;
} else {
return false;
}
unsigned TakenIdx = 0, NonTakenIdx = 1;
if (!isProb)
std::swap(TakenIdx, NonTakenIdx);
setEdgeWeight(BB, TakenIdx, FPH_TAKEN_WEIGHT);
setEdgeWeight(BB, NonTakenIdx, FPH_NONTAKEN_WEIGHT);
return true;
}
bool BranchProbabilityInfo::calcInvokeHeuristics(BasicBlock *BB) {
InvokeInst *II = dyn_cast<InvokeInst>(BB->getTerminator());
if (!II)
return false;
setEdgeWeight(BB, 0/*Index for Normal*/, IH_TAKEN_WEIGHT);
setEdgeWeight(BB, 1/*Index for Unwind*/, IH_NONTAKEN_WEIGHT);
return true;
}
void BranchProbabilityInfo::getAnalysisUsage(AnalysisUsage &AU) const {
AU.addRequired<LoopInfoWrapperPass>();
AU.setPreservesAll();
}
bool BranchProbabilityInfo::runOnFunction(Function &F) {
DEBUG(dbgs() << "---- Branch Probability Info : " << F.getName()
<< " ----\n\n");
LastF = &F; // Store the last function we ran on for printing.
LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
assert(PostDominatedByUnreachable.empty());
assert(PostDominatedByColdCall.empty());
// Walk the basic blocks in post-order so that we can build up state about
// the successors of a block iteratively.
for (auto BB : post_order(&F.getEntryBlock())) {
DEBUG(dbgs() << "Computing probabilities for " << BB->getName() << "\n");
if (calcUnreachableHeuristics(BB))
continue;
if (calcMetadataWeights(BB))
continue;
if (calcColdCallHeuristics(BB))
continue;
if (calcLoopBranchHeuristics(BB))
continue;
if (calcPointerHeuristics(BB))
continue;
if (calcZeroHeuristics(BB))
continue;
if (calcFloatingPointHeuristics(BB))
continue;
calcInvokeHeuristics(BB);
}
PostDominatedByUnreachable.clear();
PostDominatedByColdCall.clear();
return false;
}
void BranchProbabilityInfo::releaseMemory() {
Weights.clear();
}
void BranchProbabilityInfo::print(raw_ostream &OS, const Module *) const {
OS << "---- Branch Probabilities ----\n";
// We print the probabilities from the last function the analysis ran over,
// or the function it is currently running over.
assert(LastF && "Cannot print prior to running over a function");
for (Function::const_iterator BI = LastF->begin(), BE = LastF->end();
BI != BE; ++BI) {
for (succ_const_iterator SI = succ_begin(BI), SE = succ_end(BI);
SI != SE; ++SI) {
printEdgeProbability(OS << " ", BI, *SI);
}
}
}
uint32_t BranchProbabilityInfo::getSumForBlock(const BasicBlock *BB) const {
uint32_t Sum = 0;
for (succ_const_iterator I = succ_begin(BB), E = succ_end(BB); I != E; ++I) {
uint32_t Weight = getEdgeWeight(BB, I.getSuccessorIndex());
uint32_t PrevSum = Sum;
Sum += Weight;
assert(Sum >= PrevSum); (void) PrevSum;
}
return Sum;
}
bool BranchProbabilityInfo::
isEdgeHot(const BasicBlock *Src, const BasicBlock *Dst) const {
// Hot probability is at least 4/5 = 80%
// FIXME: Compare against a static "hot" BranchProbability.
return getEdgeProbability(Src, Dst) > BranchProbability(4, 5);
}
BasicBlock *BranchProbabilityInfo::getHotSucc(BasicBlock *BB) const {
uint32_t Sum = 0;
uint32_t MaxWeight = 0;
BasicBlock *MaxSucc = nullptr;
for (succ_iterator I = succ_begin(BB), E = succ_end(BB); I != E; ++I) {
BasicBlock *Succ = *I;
uint32_t Weight = getEdgeWeight(BB, Succ);
uint32_t PrevSum = Sum;
Sum += Weight;
assert(Sum > PrevSum); (void) PrevSum;
if (Weight > MaxWeight) {
MaxWeight = Weight;
MaxSucc = Succ;
}
}
// Hot probability is at least 4/5 = 80%
if (BranchProbability(MaxWeight, Sum) > BranchProbability(4, 5))
return MaxSucc;
return nullptr;
}
/// Get the raw edge weight for the edge. If can't find it, return
/// DEFAULT_WEIGHT value. Here an edge is specified using PredBlock and an index
/// to the successors.
uint32_t BranchProbabilityInfo::
getEdgeWeight(const BasicBlock *Src, unsigned IndexInSuccessors) const {
DenseMap<Edge, uint32_t>::const_iterator I =
Weights.find(std::make_pair(Src, IndexInSuccessors));
if (I != Weights.end())
return I->second;
return DEFAULT_WEIGHT;
}
uint32_t BranchProbabilityInfo::getEdgeWeight(const BasicBlock *Src,
succ_const_iterator Dst) const {
return getEdgeWeight(Src, Dst.getSuccessorIndex());
}
/// Get the raw edge weight calculated for the block pair. This returns the sum
/// of all raw edge weights from Src to Dst.
uint32_t BranchProbabilityInfo::
getEdgeWeight(const BasicBlock *Src, const BasicBlock *Dst) const {
uint32_t Weight = 0;
bool FoundWeight = false;
DenseMap<Edge, uint32_t>::const_iterator MapI;
for (succ_const_iterator I = succ_begin(Src), E = succ_end(Src); I != E; ++I)
if (*I == Dst) {
MapI = Weights.find(std::make_pair(Src, I.getSuccessorIndex()));
if (MapI != Weights.end()) {
FoundWeight = true;
Weight += MapI->second;
}
}
return (!FoundWeight) ? DEFAULT_WEIGHT : Weight;
}
/// Set the edge weight for a given edge specified by PredBlock and an index
/// to the successors.
void BranchProbabilityInfo::
setEdgeWeight(const BasicBlock *Src, unsigned IndexInSuccessors,
uint32_t Weight) {
Weights[std::make_pair(Src, IndexInSuccessors)] = Weight;
DEBUG(dbgs() << "set edge " << Src->getName() << " -> "
<< IndexInSuccessors << " successor weight to "
<< Weight << "\n");
}
/// Get an edge's probability, relative to other out-edges from Src.
BranchProbability BranchProbabilityInfo::
getEdgeProbability(const BasicBlock *Src, unsigned IndexInSuccessors) const {
uint32_t N = getEdgeWeight(Src, IndexInSuccessors);
uint32_t D = getSumForBlock(Src);
return BranchProbability(N, D);
}
/// Get the probability of going from Src to Dst. It returns the sum of all
/// probabilities for edges from Src to Dst.
BranchProbability BranchProbabilityInfo::
getEdgeProbability(const BasicBlock *Src, const BasicBlock *Dst) const {
uint32_t N = getEdgeWeight(Src, Dst);
uint32_t D = getSumForBlock(Src);
return BranchProbability(N, D);
}
raw_ostream &
BranchProbabilityInfo::printEdgeProbability(raw_ostream &OS,
const BasicBlock *Src,
const BasicBlock *Dst) const {
const BranchProbability Prob = getEdgeProbability(Src, Dst);
OS << "edge " << Src->getName() << " -> " << Dst->getName()
<< " probability is " << Prob
<< (isEdgeHot(Src, Dst) ? " [HOT edge]\n" : "\n");
return OS;
}
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/Analysis/Interval.cpp | //===- Interval.cpp - Interval class code ---------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file contains the definition of the Interval class, which represents a
// partition of a control flow graph of some kind.
//
//===----------------------------------------------------------------------===//
#include "llvm/Analysis/Interval.h"
#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/CFG.h"
#include "llvm/Support/raw_ostream.h"
#include <algorithm>
using namespace llvm;
//===----------------------------------------------------------------------===//
// Interval Implementation
//===----------------------------------------------------------------------===//
// isLoop - Find out if there is a back edge in this interval...
//
bool Interval::isLoop() const {
// There is a loop in this interval iff one of the predecessors of the header
// node lives in the interval.
for (::pred_iterator I = ::pred_begin(HeaderNode), E = ::pred_end(HeaderNode);
I != E; ++I)
if (contains(*I))
return true;
return false;
}
void Interval::print(raw_ostream &OS) const {
OS << "-------------------------------------------------------------\n"
<< "Interval Contents:\n";
// Print out all of the basic blocks in the interval...
for (std::vector<BasicBlock*>::const_iterator I = Nodes.begin(),
E = Nodes.end(); I != E; ++I)
OS << **I << "\n";
OS << "Interval Predecessors:\n";
for (std::vector<BasicBlock*>::const_iterator I = Predecessors.begin(),
E = Predecessors.end(); I != E; ++I)
OS << **I << "\n";
OS << "Interval Successors:\n";
for (std::vector<BasicBlock*>::const_iterator I = Successors.begin(),
E = Successors.end(); I != E; ++I)
OS << **I << "\n";
}
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/Analysis/MemDepPrinter.cpp | //===- MemDepPrinter.cpp - Printer for MemoryDependenceAnalysis -----------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
//
//===----------------------------------------------------------------------===//
#include "llvm/Analysis/Passes.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/Analysis/MemoryDependenceAnalysis.h"
#include "llvm/IR/CallSite.h"
#include "llvm/IR/InstIterator.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
namespace {
struct MemDepPrinter : public FunctionPass {
const Function *F;
enum DepType {
Clobber = 0,
Def,
NonFuncLocal,
Unknown
};
static const char *const DepTypeStr[];
typedef PointerIntPair<const Instruction *, 2, DepType> InstTypePair;
typedef std::pair<InstTypePair, const BasicBlock *> Dep;
typedef SmallSetVector<Dep, 4> DepSet;
typedef DenseMap<const Instruction *, DepSet> DepSetMap;
DepSetMap Deps;
static char ID; // Pass identifcation, replacement for typeid
MemDepPrinter() : FunctionPass(ID) {
initializeMemDepPrinterPass(*PassRegistry::getPassRegistry());
}
bool runOnFunction(Function &F) override;
void print(raw_ostream &OS, const Module * = nullptr) const override;
void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.addRequiredTransitive<AliasAnalysis>();
AU.addRequiredTransitive<MemoryDependenceAnalysis>();
AU.setPreservesAll();
}
void releaseMemory() override {
Deps.clear();
F = nullptr;
}
private:
static InstTypePair getInstTypePair(MemDepResult dep) {
if (dep.isClobber())
return InstTypePair(dep.getInst(), Clobber);
if (dep.isDef())
return InstTypePair(dep.getInst(), Def);
if (dep.isNonFuncLocal())
return InstTypePair(dep.getInst(), NonFuncLocal);
assert(dep.isUnknown() && "unexpected dependence type");
return InstTypePair(dep.getInst(), Unknown);
}
static InstTypePair getInstTypePair(const Instruction* inst, DepType type) {
return InstTypePair(inst, type);
}
};
}
char MemDepPrinter::ID = 0;
INITIALIZE_PASS_BEGIN(MemDepPrinter, "print-memdeps",
"Print MemDeps of function", false, true)
INITIALIZE_PASS_DEPENDENCY(MemoryDependenceAnalysis)
INITIALIZE_PASS_END(MemDepPrinter, "print-memdeps",
"Print MemDeps of function", false, true)
FunctionPass *llvm::createMemDepPrinter() {
return new MemDepPrinter();
}
const char *const MemDepPrinter::DepTypeStr[]
= {"Clobber", "Def", "NonFuncLocal", "Unknown"};
bool MemDepPrinter::runOnFunction(Function &F) {
this->F = &F;
MemoryDependenceAnalysis &MDA = getAnalysis<MemoryDependenceAnalysis>();
// All this code uses non-const interfaces because MemDep is not
// const-friendly, though nothing is actually modified.
for (auto &I : inst_range(F)) {
Instruction *Inst = &I;
if (!Inst->mayReadFromMemory() && !Inst->mayWriteToMemory())
continue;
MemDepResult Res = MDA.getDependency(Inst);
if (!Res.isNonLocal()) {
Deps[Inst].insert(std::make_pair(getInstTypePair(Res),
static_cast<BasicBlock *>(nullptr)));
} else if (auto CS = CallSite(Inst)) {
const MemoryDependenceAnalysis::NonLocalDepInfo &NLDI =
MDA.getNonLocalCallDependency(CS);
DepSet &InstDeps = Deps[Inst];
for (MemoryDependenceAnalysis::NonLocalDepInfo::const_iterator
I = NLDI.begin(), E = NLDI.end(); I != E; ++I) {
const MemDepResult &Res = I->getResult();
InstDeps.insert(std::make_pair(getInstTypePair(Res), I->getBB()));
}
} else {
SmallVector<NonLocalDepResult, 4> NLDI;
assert( (isa<LoadInst>(Inst) || isa<StoreInst>(Inst) ||
isa<VAArgInst>(Inst)) && "Unknown memory instruction!");
MDA.getNonLocalPointerDependency(Inst, NLDI);
DepSet &InstDeps = Deps[Inst];
for (SmallVectorImpl<NonLocalDepResult>::const_iterator
I = NLDI.begin(), E = NLDI.end(); I != E; ++I) {
const MemDepResult &Res = I->getResult();
InstDeps.insert(std::make_pair(getInstTypePair(Res), I->getBB()));
}
}
}
return false;
}
void MemDepPrinter::print(raw_ostream &OS, const Module *M) const {
for (const auto &I : inst_range(*F)) {
const Instruction *Inst = &I;
DepSetMap::const_iterator DI = Deps.find(Inst);
if (DI == Deps.end())
continue;
const DepSet &InstDeps = DI->second;
for (const auto &I : InstDeps) {
const Instruction *DepInst = I.first.getPointer();
DepType type = I.first.getInt();
const BasicBlock *DepBB = I.second;
OS << " ";
OS << DepTypeStr[type];
if (DepBB) {
OS << " in block ";
DepBB->printAsOperand(OS, /*PrintType=*/false, M);
}
if (DepInst) {
OS << " from: ";
DepInst->print(OS);
}
OS << "\n";
}
Inst->print(OS);
OS << "\n\n";
}
}
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/Analysis/PostDominators.cpp | //===- PostDominators.cpp - Post-Dominator Calculation --------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements the post-dominator construction algorithms.
//
//===----------------------------------------------------------------------===//
#include "llvm/Analysis/PostDominators.h"
#include "llvm/ADT/DepthFirstIterator.h"
#include "llvm/ADT/SetOperations.h"
#include "llvm/IR/CFG.h"
#include "llvm/IR/Instructions.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/GenericDomTreeConstruction.h"
using namespace llvm;
#define DEBUG_TYPE "postdomtree"
//===----------------------------------------------------------------------===//
// PostDominatorTree Implementation
//===----------------------------------------------------------------------===//
char PostDominatorTree::ID = 0;
INITIALIZE_PASS(PostDominatorTree, "postdomtree",
"Post-Dominator Tree Construction", true, true)
bool PostDominatorTree::runOnFunction(Function &F) {
DT->recalculate(F);
return false;
}
PostDominatorTree::~PostDominatorTree() {
delete DT;
}
void PostDominatorTree::print(raw_ostream &OS, const Module *) const {
DT->print(OS);
}
FunctionPass* llvm::createPostDomTree() {
return new PostDominatorTree();
}
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/Analysis/AliasAnalysis.cpp | //===- AliasAnalysis.cpp - Generic Alias Analysis Interface Implementation -==//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements the generic AliasAnalysis interface which is used as the
// common interface used by all clients and implementations of alias analysis.
//
// This file also implements the default version of the AliasAnalysis interface
// that is to be used when no other implementation is specified. This does some
// simple tests that detect obvious cases: two different global pointers cannot
// alias, a global cannot alias a malloc, two different mallocs cannot alias,
// etc.
//
// This alias analysis implementation really isn't very good for anything, but
// it is very fast, and makes a nice clean default implementation. Because it
// handles lots of little corner cases, other, more complex, alias analysis
// implementations may choose to rely on this pass to resolve these simple and
// easy cases.
//
//===----------------------------------------------------------------------===//
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/Analysis/CFG.h"
#include "llvm/Analysis/CaptureTracking.h"
#include "llvm/Analysis/TargetLibraryInfo.h"
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/Dominators.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Type.h"
#include "llvm/Pass.h"
using namespace llvm;
// Register the AliasAnalysis interface, providing a nice name to refer to.
INITIALIZE_ANALYSIS_GROUP(AliasAnalysis, "Alias Analysis", NoAA)
char AliasAnalysis::ID = 0;
//===----------------------------------------------------------------------===//
// Default chaining methods
//===----------------------------------------------------------------------===//
AliasResult AliasAnalysis::alias(const MemoryLocation &LocA,
const MemoryLocation &LocB) {
assert(AA && "AA didn't call InitializeAliasAnalysis in its run method!");
return AA->alias(LocA, LocB);
}
bool AliasAnalysis::pointsToConstantMemory(const MemoryLocation &Loc,
bool OrLocal) {
assert(AA && "AA didn't call InitializeAliasAnalysis in its run method!");
return AA->pointsToConstantMemory(Loc, OrLocal);
}
AliasAnalysis::ModRefResult
AliasAnalysis::getArgModRefInfo(ImmutableCallSite CS, unsigned ArgIdx) {
assert(AA && "AA didn't call InitializeAliasAnalysis in its run method!");
return AA->getArgModRefInfo(CS, ArgIdx);
}
void AliasAnalysis::deleteValue(Value *V) {
assert(AA && "AA didn't call InitializeAliasAnalysis in its run method!");
AA->deleteValue(V);
}
void AliasAnalysis::addEscapingUse(Use &U) {
assert(AA && "AA didn't call InitializeAliasAnalysis in its run method!");
AA->addEscapingUse(U);
}
AliasAnalysis::ModRefResult
AliasAnalysis::getModRefInfo(Instruction *I, ImmutableCallSite Call) {
// We may have two calls
if (auto CS = ImmutableCallSite(I)) {
// Check if the two calls modify the same memory
return getModRefInfo(Call, CS);
} else {
// Otherwise, check if the call modifies or references the
// location this memory access defines. The best we can say
// is that if the call references what this instruction
// defines, it must be clobbered by this location.
const MemoryLocation DefLoc = MemoryLocation::get(I);
if (getModRefInfo(Call, DefLoc) != AliasAnalysis::NoModRef)
return AliasAnalysis::ModRef;
}
return AliasAnalysis::NoModRef;
}
AliasAnalysis::ModRefResult
AliasAnalysis::getModRefInfo(ImmutableCallSite CS, const MemoryLocation &Loc) {
assert(AA && "AA didn't call InitializeAliasAnalysis in its run method!");
ModRefBehavior MRB = getModRefBehavior(CS);
if (MRB == DoesNotAccessMemory)
return NoModRef;
ModRefResult Mask = ModRef;
if (onlyReadsMemory(MRB))
Mask = Ref;
if (onlyAccessesArgPointees(MRB)) {
bool doesAlias = false;
ModRefResult AllArgsMask = NoModRef;
if (doesAccessArgPointees(MRB)) {
for (ImmutableCallSite::arg_iterator AI = CS.arg_begin(), AE = CS.arg_end();
AI != AE; ++AI) {
const Value *Arg = *AI;
if (!Arg->getType()->isPointerTy())
continue;
unsigned ArgIdx = std::distance(CS.arg_begin(), AI);
MemoryLocation ArgLoc =
MemoryLocation::getForArgument(CS, ArgIdx, *TLI);
if (!isNoAlias(ArgLoc, Loc)) {
ModRefResult ArgMask = getArgModRefInfo(CS, ArgIdx);
doesAlias = true;
AllArgsMask = ModRefResult(AllArgsMask | ArgMask);
}
}
}
if (!doesAlias)
return NoModRef;
Mask = ModRefResult(Mask & AllArgsMask);
}
// If Loc is a constant memory location, the call definitely could not
// modify the memory location.
if ((Mask & Mod) && pointsToConstantMemory(Loc))
Mask = ModRefResult(Mask & ~Mod);
// If this is the end of the chain, don't forward.
if (!AA) return Mask;
// Otherwise, fall back to the next AA in the chain. But we can merge
// in any mask we've managed to compute.
return ModRefResult(AA->getModRefInfo(CS, Loc) & Mask);
}
AliasAnalysis::ModRefResult
AliasAnalysis::getModRefInfo(ImmutableCallSite CS1, ImmutableCallSite CS2) {
assert(AA && "AA didn't call InitializeAliasAnalysis in its run method!");
// If CS1 or CS2 are readnone, they don't interact.
ModRefBehavior CS1B = getModRefBehavior(CS1);
if (CS1B == DoesNotAccessMemory) return NoModRef;
ModRefBehavior CS2B = getModRefBehavior(CS2);
if (CS2B == DoesNotAccessMemory) return NoModRef;
// If they both only read from memory, there is no dependence.
if (onlyReadsMemory(CS1B) && onlyReadsMemory(CS2B))
return NoModRef;
AliasAnalysis::ModRefResult Mask = ModRef;
// If CS1 only reads memory, the only dependence on CS2 can be
// from CS1 reading memory written by CS2.
if (onlyReadsMemory(CS1B))
Mask = ModRefResult(Mask & Ref);
// If CS2 only access memory through arguments, accumulate the mod/ref
// information from CS1's references to the memory referenced by
// CS2's arguments.
if (onlyAccessesArgPointees(CS2B)) {
AliasAnalysis::ModRefResult R = NoModRef;
if (doesAccessArgPointees(CS2B)) {
for (ImmutableCallSite::arg_iterator
I = CS2.arg_begin(), E = CS2.arg_end(); I != E; ++I) {
const Value *Arg = *I;
if (!Arg->getType()->isPointerTy())
continue;
unsigned CS2ArgIdx = std::distance(CS2.arg_begin(), I);
auto CS2ArgLoc = MemoryLocation::getForArgument(CS2, CS2ArgIdx, *TLI);
// ArgMask indicates what CS2 might do to CS2ArgLoc, and the dependence of
// CS1 on that location is the inverse.
ModRefResult ArgMask = getArgModRefInfo(CS2, CS2ArgIdx);
if (ArgMask == Mod)
ArgMask = ModRef;
else if (ArgMask == Ref)
ArgMask = Mod;
R = ModRefResult((R | (getModRefInfo(CS1, CS2ArgLoc) & ArgMask)) & Mask);
if (R == Mask)
break;
}
}
return R;
}
// If CS1 only accesses memory through arguments, check if CS2 references
// any of the memory referenced by CS1's arguments. If not, return NoModRef.
if (onlyAccessesArgPointees(CS1B)) {
AliasAnalysis::ModRefResult R = NoModRef;
if (doesAccessArgPointees(CS1B)) {
for (ImmutableCallSite::arg_iterator
I = CS1.arg_begin(), E = CS1.arg_end(); I != E; ++I) {
const Value *Arg = *I;
if (!Arg->getType()->isPointerTy())
continue;
unsigned CS1ArgIdx = std::distance(CS1.arg_begin(), I);
auto CS1ArgLoc = MemoryLocation::getForArgument(CS1, CS1ArgIdx, *TLI);
// ArgMask indicates what CS1 might do to CS1ArgLoc; if CS1 might Mod
// CS1ArgLoc, then we care about either a Mod or a Ref by CS2. If CS1
// might Ref, then we care only about a Mod by CS2.
ModRefResult ArgMask = getArgModRefInfo(CS1, CS1ArgIdx);
ModRefResult ArgR = getModRefInfo(CS2, CS1ArgLoc);
if (((ArgMask & Mod) != NoModRef && (ArgR & ModRef) != NoModRef) ||
((ArgMask & Ref) != NoModRef && (ArgR & Mod) != NoModRef))
R = ModRefResult((R | ArgMask) & Mask);
if (R == Mask)
break;
}
}
return R;
}
// If this is the end of the chain, don't forward.
if (!AA) return Mask;
// Otherwise, fall back to the next AA in the chain. But we can merge
// in any mask we've managed to compute.
return ModRefResult(AA->getModRefInfo(CS1, CS2) & Mask);
}
AliasAnalysis::ModRefBehavior
AliasAnalysis::getModRefBehavior(ImmutableCallSite CS) {
assert(AA && "AA didn't call InitializeAliasAnalysis in its run method!");
ModRefBehavior Min = UnknownModRefBehavior;
// Call back into the alias analysis with the other form of getModRefBehavior
// to see if it can give a better response.
if (const Function *F = CS.getCalledFunction())
Min = getModRefBehavior(F);
// If this is the end of the chain, don't forward.
if (!AA) return Min;
// Otherwise, fall back to the next AA in the chain. But we can merge
// in any result we've managed to compute.
return ModRefBehavior(AA->getModRefBehavior(CS) & Min);
}
AliasAnalysis::ModRefBehavior
AliasAnalysis::getModRefBehavior(const Function *F) {
assert(AA && "AA didn't call InitializeAliasAnalysis in its run method!");
return AA->getModRefBehavior(F);
}
//===----------------------------------------------------------------------===//
// AliasAnalysis non-virtual helper method implementation
//===----------------------------------------------------------------------===//
AliasAnalysis::ModRefResult
AliasAnalysis::getModRefInfo(const LoadInst *L, const MemoryLocation &Loc) {
// Be conservative in the face of volatile/atomic.
if (!L->isUnordered())
return ModRef;
// If the load address doesn't alias the given address, it doesn't read
// or write the specified memory.
if (Loc.Ptr && !alias(MemoryLocation::get(L), Loc))
return NoModRef;
// Otherwise, a load just reads.
return Ref;
}
AliasAnalysis::ModRefResult
AliasAnalysis::getModRefInfo(const StoreInst *S, const MemoryLocation &Loc) {
// Be conservative in the face of volatile/atomic.
if (!S->isUnordered())
return ModRef;
if (Loc.Ptr) {
// If the store address cannot alias the pointer in question, then the
// specified memory cannot be modified by the store.
if (!alias(MemoryLocation::get(S), Loc))
return NoModRef;
// If the pointer is a pointer to constant memory, then it could not have
// been modified by this store.
if (pointsToConstantMemory(Loc))
return NoModRef;
}
// Otherwise, a store just writes.
return Mod;
}
AliasAnalysis::ModRefResult
AliasAnalysis::getModRefInfo(const VAArgInst *V, const MemoryLocation &Loc) {
if (Loc.Ptr) {
// If the va_arg address cannot alias the pointer in question, then the
// specified memory cannot be accessed by the va_arg.
if (!alias(MemoryLocation::get(V), Loc))
return NoModRef;
// If the pointer is a pointer to constant memory, then it could not have
// been modified by this va_arg.
if (pointsToConstantMemory(Loc))
return NoModRef;
}
// Otherwise, a va_arg reads and writes.
return ModRef;
}
AliasAnalysis::ModRefResult
AliasAnalysis::getModRefInfo(const AtomicCmpXchgInst *CX,
const MemoryLocation &Loc) {
// Acquire/Release cmpxchg has properties that matter for arbitrary addresses.
if (CX->getSuccessOrdering() > Monotonic)
return ModRef;
// If the cmpxchg address does not alias the location, it does not access it.
if (Loc.Ptr && !alias(MemoryLocation::get(CX), Loc))
return NoModRef;
return ModRef;
}
AliasAnalysis::ModRefResult
AliasAnalysis::getModRefInfo(const AtomicRMWInst *RMW,
const MemoryLocation &Loc) {
// Acquire/Release atomicrmw has properties that matter for arbitrary addresses.
if (RMW->getOrdering() > Monotonic)
return ModRef;
// If the atomicrmw address does not alias the location, it does not access it.
if (Loc.Ptr && !alias(MemoryLocation::get(RMW), Loc))
return NoModRef;
return ModRef;
}
// FIXME: this is really just shoring-up a deficiency in alias analysis.
// BasicAA isn't willing to spend linear time determining whether an alloca
// was captured before or after this particular call, while we are. However,
// with a smarter AA in place, this test is just wasting compile time.
AliasAnalysis::ModRefResult AliasAnalysis::callCapturesBefore(
const Instruction *I, const MemoryLocation &MemLoc, DominatorTree *DT) {
if (!DT)
return AliasAnalysis::ModRef;
const Value *Object = GetUnderlyingObject(MemLoc.Ptr, *DL);
if (!isIdentifiedObject(Object) || isa<GlobalValue>(Object) ||
isa<Constant>(Object))
return AliasAnalysis::ModRef;
ImmutableCallSite CS(I);
if (!CS.getInstruction() || CS.getInstruction() == Object)
return AliasAnalysis::ModRef;
if (llvm::PointerMayBeCapturedBefore(Object, /* ReturnCaptures */ true,
/* StoreCaptures */ true, I, DT,
/* include Object */ true))
return AliasAnalysis::ModRef;
unsigned ArgNo = 0;
AliasAnalysis::ModRefResult R = AliasAnalysis::NoModRef;
for (ImmutableCallSite::arg_iterator CI = CS.arg_begin(), CE = CS.arg_end();
CI != CE; ++CI, ++ArgNo) {
// Only look at the no-capture or byval pointer arguments. If this
// pointer were passed to arguments that were neither of these, then it
// couldn't be no-capture.
if (!(*CI)->getType()->isPointerTy() ||
(!CS.doesNotCapture(ArgNo) && !CS.isByValArgument(ArgNo)))
continue;
// If this is a no-capture pointer argument, see if we can tell that it
// is impossible to alias the pointer we're checking. If not, we have to
// assume that the call could touch the pointer, even though it doesn't
// escape.
if (isNoAlias(MemoryLocation(*CI), MemoryLocation(Object)))
continue;
if (CS.doesNotAccessMemory(ArgNo))
continue;
if (CS.onlyReadsMemory(ArgNo)) {
R = AliasAnalysis::Ref;
continue;
}
return AliasAnalysis::ModRef;
}
return R;
}
// AliasAnalysis destructor: DO NOT move this to the header file for
// AliasAnalysis or else clients of the AliasAnalysis class may not depend on
// the AliasAnalysis.o file in the current .a file, causing alias analysis
// support to not be included in the tool correctly!
//
AliasAnalysis::~AliasAnalysis() {}
/// InitializeAliasAnalysis - Subclasses must call this method to initialize the
/// AliasAnalysis interface before any other methods are called.
///
void AliasAnalysis::InitializeAliasAnalysis(Pass *P, const DataLayout *NewDL) {
DL = NewDL;
auto *TLIP = P->getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>();
TLI = TLIP ? &TLIP->getTLI() : nullptr;
AA = &P->getAnalysis<AliasAnalysis>();
}
// getAnalysisUsage - All alias analysis implementations should invoke this
// directly (using AliasAnalysis::getAnalysisUsage(AU)).
void AliasAnalysis::getAnalysisUsage(AnalysisUsage &AU) const {
AU.addRequired<AliasAnalysis>(); // All AA's chain
}
/// getTypeStoreSize - Return the DataLayout store size for the given type,
/// if known, or a conservative value otherwise.
///
uint64_t AliasAnalysis::getTypeStoreSize(Type *Ty) {
return DL ? DL->getTypeStoreSize(Ty) : MemoryLocation::UnknownSize;
}
/// canBasicBlockModify - Return true if it is possible for execution of the
/// specified basic block to modify the location Loc.
///
bool AliasAnalysis::canBasicBlockModify(const BasicBlock &BB,
const MemoryLocation &Loc) {
return canInstructionRangeModRef(BB.front(), BB.back(), Loc, Mod);
}
/// canInstructionRangeModRef - Return true if it is possible for the
/// execution of the specified instructions to mod\ref (according to the
/// mode) the location Loc. The instructions to consider are all
/// of the instructions in the range of [I1,I2] INCLUSIVE.
/// I1 and I2 must be in the same basic block.
bool AliasAnalysis::canInstructionRangeModRef(const Instruction &I1,
const Instruction &I2,
const MemoryLocation &Loc,
const ModRefResult Mode) {
assert(I1.getParent() == I2.getParent() &&
"Instructions not in same basic block!");
BasicBlock::const_iterator I = &I1;
BasicBlock::const_iterator E = &I2;
++E; // Convert from inclusive to exclusive range.
for (; I != E; ++I) // Check every instruction in range
if (getModRefInfo(I, Loc) & Mode)
return true;
return false;
}
/// isNoAliasCall - Return true if this pointer is returned by a noalias
/// function.
bool llvm::isNoAliasCall(const Value *V) {
if (isa<CallInst>(V) || isa<InvokeInst>(V))
return ImmutableCallSite(cast<Instruction>(V))
.paramHasAttr(0, Attribute::NoAlias);
return false;
}
/// isNoAliasArgument - Return true if this is an argument with the noalias
/// attribute.
bool llvm::isNoAliasArgument(const Value *V)
{
if (const Argument *A = dyn_cast<Argument>(V))
return A->hasNoAliasAttr();
return false;
}
/// isIdentifiedObject - Return true if this pointer refers to a distinct and
/// identifiable object. This returns true for:
/// Global Variables and Functions (but not Global Aliases)
/// Allocas and Mallocs
/// ByVal and NoAlias Arguments
/// NoAlias returns
///
bool llvm::isIdentifiedObject(const Value *V) {
if (isa<AllocaInst>(V))
return true;
if (isa<GlobalValue>(V) && !isa<GlobalAlias>(V))
return true;
if (isNoAliasCall(V))
return true;
if (const Argument *A = dyn_cast<Argument>(V))
return A->hasNoAliasAttr() || A->hasByValAttr();
return false;
}
/// isIdentifiedFunctionLocal - Return true if V is umabigously identified
/// at the function-level. Different IdentifiedFunctionLocals can't alias.
/// Further, an IdentifiedFunctionLocal can not alias with any function
/// arguments other than itself, which is not necessarily true for
/// IdentifiedObjects.
bool llvm::isIdentifiedFunctionLocal(const Value *V)
{
return isa<AllocaInst>(V) || isNoAliasCall(V) || isNoAliasArgument(V);
}
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/Analysis/LazyValueInfo.cpp | //===- LazyValueInfo.cpp - Value constraint analysis ------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines the interface for lazy computation of value constraint
// information.
//
//===----------------------------------------------------------------------===//
#include "llvm/Analysis/LazyValueInfo.h"
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/Analysis/AssumptionCache.h"
#include "llvm/Analysis/ConstantFolding.h"
#include "llvm/Analysis/TargetLibraryInfo.h"
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/IR/CFG.h"
#include "llvm/IR/ConstantRange.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/Dominators.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/PatternMatch.h"
#include "llvm/IR/ValueHandle.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
#include <map>
#include <stack>
using namespace llvm;
using namespace PatternMatch;
#define DEBUG_TYPE "lazy-value-info"
char LazyValueInfo::ID = 0;
INITIALIZE_PASS_BEGIN(LazyValueInfo, "lazy-value-info",
"Lazy Value Information Analysis", false, true)
INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
INITIALIZE_PASS_END(LazyValueInfo, "lazy-value-info",
"Lazy Value Information Analysis", false, true)
namespace llvm {
FunctionPass *createLazyValueInfoPass() { return new LazyValueInfo(); }
}
//===----------------------------------------------------------------------===//
// LVILatticeVal
//===----------------------------------------------------------------------===//
/// This is the information tracked by LazyValueInfo for each value.
///
/// FIXME: This is basically just for bringup, this can be made a lot more rich
/// in the future.
///
namespace {
class LVILatticeVal {
enum LatticeValueTy {
/// This Value has no known value yet.
undefined,
/// This Value has a specific constant value.
constant,
/// This Value is known to not have the specified value.
notconstant,
/// The Value falls within this range.
constantrange,
/// This value is not known to be constant, and we know that it has a value.
overdefined
};
/// Val: This stores the current lattice value along with the Constant* for
/// the constant if this is a 'constant' or 'notconstant' value.
LatticeValueTy Tag;
Constant *Val;
ConstantRange Range;
public:
LVILatticeVal() : Tag(undefined), Val(nullptr), Range(1, true) {}
static LVILatticeVal get(Constant *C) {
LVILatticeVal Res;
if (!isa<UndefValue>(C))
Res.markConstant(C);
return Res;
}
static LVILatticeVal getNot(Constant *C) {
LVILatticeVal Res;
if (!isa<UndefValue>(C))
Res.markNotConstant(C);
return Res;
}
static LVILatticeVal getRange(ConstantRange CR) {
LVILatticeVal Res;
Res.markConstantRange(CR);
return Res;
}
bool isUndefined() const { return Tag == undefined; }
bool isConstant() const { return Tag == constant; }
bool isNotConstant() const { return Tag == notconstant; }
bool isConstantRange() const { return Tag == constantrange; }
bool isOverdefined() const { return Tag == overdefined; }
Constant *getConstant() const {
assert(isConstant() && "Cannot get the constant of a non-constant!");
return Val;
}
Constant *getNotConstant() const {
assert(isNotConstant() && "Cannot get the constant of a non-notconstant!");
return Val;
}
ConstantRange getConstantRange() const {
assert(isConstantRange() &&
"Cannot get the constant-range of a non-constant-range!");
return Range;
}
/// Return true if this is a change in status.
bool markOverdefined() {
if (isOverdefined())
return false;
Tag = overdefined;
return true;
}
/// Return true if this is a change in status.
bool markConstant(Constant *V) {
assert(V && "Marking constant with NULL");
if (ConstantInt *CI = dyn_cast<ConstantInt>(V))
return markConstantRange(ConstantRange(CI->getValue()));
if (isa<UndefValue>(V))
return false;
assert((!isConstant() || getConstant() == V) &&
"Marking constant with different value");
assert(isUndefined());
Tag = constant;
Val = V;
return true;
}
/// Return true if this is a change in status.
bool markNotConstant(Constant *V) {
assert(V && "Marking constant with NULL");
if (ConstantInt *CI = dyn_cast<ConstantInt>(V))
return markConstantRange(ConstantRange(CI->getValue()+1, CI->getValue()));
if (isa<UndefValue>(V))
return false;
assert((!isConstant() || getConstant() != V) &&
"Marking constant !constant with same value");
assert((!isNotConstant() || getNotConstant() == V) &&
"Marking !constant with different value");
assert(isUndefined() || isConstant());
Tag = notconstant;
Val = V;
return true;
}
/// Return true if this is a change in status.
bool markConstantRange(const ConstantRange NewR) {
if (isConstantRange()) {
if (NewR.isEmptySet())
return markOverdefined();
bool changed = Range != NewR;
Range = NewR;
return changed;
}
assert(isUndefined());
if (NewR.isEmptySet())
return markOverdefined();
Tag = constantrange;
Range = NewR;
return true;
}
/// Merge the specified lattice value into this one, updating this
/// one and returning true if anything changed.
bool mergeIn(const LVILatticeVal &RHS, const DataLayout &DL) {
if (RHS.isUndefined() || isOverdefined()) return false;
if (RHS.isOverdefined()) return markOverdefined();
if (isUndefined()) {
Tag = RHS.Tag;
Val = RHS.Val;
Range = RHS.Range;
return true;
}
if (isConstant()) {
if (RHS.isConstant()) {
if (Val == RHS.Val)
return false;
return markOverdefined();
}
if (RHS.isNotConstant()) {
if (Val == RHS.Val)
return markOverdefined();
// Unless we can prove that the two Constants are different, we must
// move to overdefined.
if (ConstantInt *Res =
dyn_cast<ConstantInt>(ConstantFoldCompareInstOperands(
CmpInst::ICMP_NE, getConstant(), RHS.getNotConstant(), DL)))
if (Res->isOne())
return markNotConstant(RHS.getNotConstant());
return markOverdefined();
}
// RHS is a ConstantRange, LHS is a non-integer Constant.
// FIXME: consider the case where RHS is a range [1, 0) and LHS is
// a function. The correct result is to pick up RHS.
return markOverdefined();
}
if (isNotConstant()) {
if (RHS.isConstant()) {
if (Val == RHS.Val)
return markOverdefined();
// Unless we can prove that the two Constants are different, we must
// move to overdefined.
if (ConstantInt *Res =
dyn_cast<ConstantInt>(ConstantFoldCompareInstOperands(
CmpInst::ICMP_NE, getNotConstant(), RHS.getConstant(), DL)))
if (Res->isOne())
return false;
return markOverdefined();
}
if (RHS.isNotConstant()) {
if (Val == RHS.Val)
return false;
return markOverdefined();
}
return markOverdefined();
}
assert(isConstantRange() && "New LVILattice type?");
if (!RHS.isConstantRange())
return markOverdefined();
ConstantRange NewR = Range.unionWith(RHS.getConstantRange());
if (NewR.isFullSet())
return markOverdefined();
return markConstantRange(NewR);
}
};
} // end anonymous namespace.
namespace llvm {
raw_ostream &operator<<(raw_ostream &OS, const LVILatticeVal &Val)
LLVM_ATTRIBUTE_USED;
raw_ostream &operator<<(raw_ostream &OS, const LVILatticeVal &Val) {
if (Val.isUndefined())
return OS << "undefined";
if (Val.isOverdefined())
return OS << "overdefined";
if (Val.isNotConstant())
return OS << "notconstant<" << *Val.getNotConstant() << '>';
else if (Val.isConstantRange())
return OS << "constantrange<" << Val.getConstantRange().getLower() << ", "
<< Val.getConstantRange().getUpper() << '>';
return OS << "constant<" << *Val.getConstant() << '>';
}
}
//===----------------------------------------------------------------------===//
// LazyValueInfoCache Decl
//===----------------------------------------------------------------------===//
namespace {
/// A callback value handle updates the cache when values are erased.
class LazyValueInfoCache;
struct LVIValueHandle : public CallbackVH {
LazyValueInfoCache *Parent;
LVIValueHandle(Value *V, LazyValueInfoCache *P)
: CallbackVH(V), Parent(P) { }
void deleted() override;
void allUsesReplacedWith(Value *V) override {
deleted();
}
};
}
namespace {
/// This is the cache kept by LazyValueInfo which
/// maintains information about queries across the clients' queries.
class LazyValueInfoCache {
/// This is all of the cached block information for exactly one Value*.
/// The entries are sorted by the BasicBlock* of the
/// entries, allowing us to do a lookup with a binary search.
typedef std::map<AssertingVH<BasicBlock>, LVILatticeVal> ValueCacheEntryTy;
/// This is all of the cached information for all values,
/// mapped from Value* to key information.
std::map<LVIValueHandle, ValueCacheEntryTy> ValueCache;
/// This tracks, on a per-block basis, the set of values that are
/// over-defined at the end of that block. This is required
/// for cache updating.
typedef std::pair<AssertingVH<BasicBlock>, Value*> OverDefinedPairTy;
DenseSet<OverDefinedPairTy> OverDefinedCache;
/// Keep track of all blocks that we have ever seen, so we
/// don't spend time removing unused blocks from our caches.
DenseSet<AssertingVH<BasicBlock> > SeenBlocks;
/// This stack holds the state of the value solver during a query.
/// It basically emulates the callstack of the naive
/// recursive value lookup process.
std::stack<std::pair<BasicBlock*, Value*> > BlockValueStack;
/// Keeps track of which block-value pairs are in BlockValueStack.
DenseSet<std::pair<BasicBlock*, Value*> > BlockValueSet;
/// Push BV onto BlockValueStack unless it's already in there.
/// Returns true on success.
bool pushBlockValue(const std::pair<BasicBlock *, Value *> &BV) {
if (!BlockValueSet.insert(BV).second)
return false; // It's already in the stack.
BlockValueStack.push(BV);
return true;
}
AssumptionCache *AC; ///< A pointer to the cache of @llvm.assume calls.
const DataLayout &DL; ///< A mandatory DataLayout
DominatorTree *DT; ///< An optional DT pointer.
friend struct LVIValueHandle;
void insertResult(Value *Val, BasicBlock *BB, const LVILatticeVal &Result) {
SeenBlocks.insert(BB);
lookup(Val)[BB] = Result;
if (Result.isOverdefined())
OverDefinedCache.insert(std::make_pair(BB, Val));
}
LVILatticeVal getBlockValue(Value *Val, BasicBlock *BB);
bool getEdgeValue(Value *V, BasicBlock *F, BasicBlock *T,
LVILatticeVal &Result,
Instruction *CxtI = nullptr);
bool hasBlockValue(Value *Val, BasicBlock *BB);
// These methods process one work item and may add more. A false value
// returned means that the work item was not completely processed and must
// be revisited after going through the new items.
bool solveBlockValue(Value *Val, BasicBlock *BB);
bool solveBlockValueNonLocal(LVILatticeVal &BBLV,
Value *Val, BasicBlock *BB);
bool solveBlockValuePHINode(LVILatticeVal &BBLV,
PHINode *PN, BasicBlock *BB);
bool solveBlockValueConstantRange(LVILatticeVal &BBLV,
Instruction *BBI, BasicBlock *BB);
void mergeAssumeBlockValueConstantRange(Value *Val, LVILatticeVal &BBLV,
Instruction *BBI);
void solve();
ValueCacheEntryTy &lookup(Value *V) {
return ValueCache[LVIValueHandle(V, this)];
}
public:
/// This is the query interface to determine the lattice
/// value for the specified Value* at the end of the specified block.
LVILatticeVal getValueInBlock(Value *V, BasicBlock *BB,
Instruction *CxtI = nullptr);
/// This is the query interface to determine the lattice
/// value for the specified Value* at the specified instruction (generally
/// from an assume intrinsic).
LVILatticeVal getValueAt(Value *V, Instruction *CxtI);
/// This is the query interface to determine the lattice
/// value for the specified Value* that is true on the specified edge.
LVILatticeVal getValueOnEdge(Value *V, BasicBlock *FromBB,BasicBlock *ToBB,
Instruction *CxtI = nullptr);
/// This is the update interface to inform the cache that an edge from
/// PredBB to OldSucc has been threaded to be from PredBB to NewSucc.
void threadEdge(BasicBlock *PredBB,BasicBlock *OldSucc,BasicBlock *NewSucc);
/// This is part of the update interface to inform the cache
/// that a block has been deleted.
void eraseBlock(BasicBlock *BB);
/// clear - Empty the cache.
void clear() {
SeenBlocks.clear();
ValueCache.clear();
OverDefinedCache.clear();
}
LazyValueInfoCache(AssumptionCache *AC, const DataLayout &DL,
DominatorTree *DT = nullptr)
: AC(AC), DL(DL), DT(DT) {}
};
} // end anonymous namespace
void LVIValueHandle::deleted() {
typedef std::pair<AssertingVH<BasicBlock>, Value*> OverDefinedPairTy;
SmallVector<OverDefinedPairTy, 4> ToErase;
for (const OverDefinedPairTy &P : Parent->OverDefinedCache)
if (P.second == getValPtr())
ToErase.push_back(P);
for (const OverDefinedPairTy &P : ToErase)
Parent->OverDefinedCache.erase(P);
// This erasure deallocates *this, so it MUST happen after we're done
// using any and all members of *this.
Parent->ValueCache.erase(*this);
}
void LazyValueInfoCache::eraseBlock(BasicBlock *BB) {
// Shortcut if we have never seen this block.
DenseSet<AssertingVH<BasicBlock> >::iterator I = SeenBlocks.find(BB);
if (I == SeenBlocks.end())
return;
SeenBlocks.erase(I);
SmallVector<OverDefinedPairTy, 4> ToErase;
for (const OverDefinedPairTy& P : OverDefinedCache)
if (P.first == BB)
ToErase.push_back(P);
for (const OverDefinedPairTy &P : ToErase)
OverDefinedCache.erase(P);
for (std::map<LVIValueHandle, ValueCacheEntryTy>::iterator
I = ValueCache.begin(), E = ValueCache.end(); I != E; ++I)
I->second.erase(BB);
}
void LazyValueInfoCache::solve() {
while (!BlockValueStack.empty()) {
std::pair<BasicBlock*, Value*> e = BlockValueStack.top();
assert(BlockValueSet.count(e) && "Stack value should be in BlockValueSet!");
if (solveBlockValue(e.second, e.first)) {
// The work item was completely processed.
assert(BlockValueStack.top() == e && "Nothing should have been pushed!");
assert(lookup(e.second).count(e.first) && "Result should be in cache!");
BlockValueStack.pop();
BlockValueSet.erase(e);
} else {
// More work needs to be done before revisiting.
assert(BlockValueStack.top() != e && "Stack should have been pushed!");
}
}
}
bool LazyValueInfoCache::hasBlockValue(Value *Val, BasicBlock *BB) {
// If already a constant, there is nothing to compute.
if (isa<Constant>(Val))
return true;
LVIValueHandle ValHandle(Val, this);
std::map<LVIValueHandle, ValueCacheEntryTy>::iterator I =
ValueCache.find(ValHandle);
if (I == ValueCache.end()) return false;
return I->second.count(BB);
}
LVILatticeVal LazyValueInfoCache::getBlockValue(Value *Val, BasicBlock *BB) {
// If already a constant, there is nothing to compute.
if (Constant *VC = dyn_cast<Constant>(Val))
return LVILatticeVal::get(VC);
SeenBlocks.insert(BB);
return lookup(Val)[BB];
}
bool LazyValueInfoCache::solveBlockValue(Value *Val, BasicBlock *BB) {
if (isa<Constant>(Val))
return true;
if (lookup(Val).count(BB)) {
// If we have a cached value, use that.
DEBUG(dbgs() << " reuse BB '" << BB->getName()
<< "' val=" << lookup(Val)[BB] << '\n');
// Since we're reusing a cached value, we don't need to update the
// OverDefinedCache. The cache will have been properly updated whenever the
// cached value was inserted.
return true;
}
// Hold off inserting this value into the Cache in case we have to return
// false and come back later.
LVILatticeVal Res;
Instruction *BBI = dyn_cast<Instruction>(Val);
if (!BBI || BBI->getParent() != BB) {
if (!solveBlockValueNonLocal(Res, Val, BB))
return false;
insertResult(Val, BB, Res);
return true;
}
if (PHINode *PN = dyn_cast<PHINode>(BBI)) {
if (!solveBlockValuePHINode(Res, PN, BB))
return false;
insertResult(Val, BB, Res);
return true;
}
if (AllocaInst *AI = dyn_cast<AllocaInst>(BBI)) {
Res = LVILatticeVal::getNot(ConstantPointerNull::get(AI->getType()));
insertResult(Val, BB, Res);
return true;
}
// We can only analyze the definitions of certain classes of instructions
// (integral binops and casts at the moment), so bail if this isn't one.
LVILatticeVal Result;
if ((!isa<BinaryOperator>(BBI) && !isa<CastInst>(BBI)) ||
!BBI->getType()->isIntegerTy()) {
DEBUG(dbgs() << " compute BB '" << BB->getName()
<< "' - overdefined because inst def found.\n");
Res.markOverdefined();
insertResult(Val, BB, Res);
return true;
}
// FIXME: We're currently limited to binops with a constant RHS. This should
// be improved.
BinaryOperator *BO = dyn_cast<BinaryOperator>(BBI);
if (BO && !isa<ConstantInt>(BO->getOperand(1))) {
DEBUG(dbgs() << " compute BB '" << BB->getName()
<< "' - overdefined because inst def found.\n");
Res.markOverdefined();
insertResult(Val, BB, Res);
return true;
}
if (!solveBlockValueConstantRange(Res, BBI, BB))
return false;
insertResult(Val, BB, Res);
return true;
}
static bool InstructionDereferencesPointer(Instruction *I, Value *Ptr) {
if (LoadInst *L = dyn_cast<LoadInst>(I)) {
return L->getPointerAddressSpace() == 0 &&
GetUnderlyingObject(L->getPointerOperand(),
L->getModule()->getDataLayout()) == Ptr;
}
if (StoreInst *S = dyn_cast<StoreInst>(I)) {
return S->getPointerAddressSpace() == 0 &&
GetUnderlyingObject(S->getPointerOperand(),
S->getModule()->getDataLayout()) == Ptr;
}
if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(I)) {
if (MI->isVolatile()) return false;
// FIXME: check whether it has a valuerange that excludes zero?
ConstantInt *Len = dyn_cast<ConstantInt>(MI->getLength());
if (!Len || Len->isZero()) return false;
if (MI->getDestAddressSpace() == 0)
if (GetUnderlyingObject(MI->getRawDest(),
MI->getModule()->getDataLayout()) == Ptr)
return true;
if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI))
if (MTI->getSourceAddressSpace() == 0)
if (GetUnderlyingObject(MTI->getRawSource(),
MTI->getModule()->getDataLayout()) == Ptr)
return true;
}
return false;
}
bool LazyValueInfoCache::solveBlockValueNonLocal(LVILatticeVal &BBLV,
Value *Val, BasicBlock *BB) {
LVILatticeVal Result; // Start Undefined.
// If this is a pointer, and there's a load from that pointer in this BB,
// then we know that the pointer can't be NULL.
bool NotNull = false;
if (Val->getType()->isPointerTy()) {
if (isKnownNonNull(Val)) {
NotNull = true;
} else {
const DataLayout &DL = BB->getModule()->getDataLayout();
Value *UnderlyingVal = GetUnderlyingObject(Val, DL);
// If 'GetUnderlyingObject' didn't converge, skip it. It won't converge
// inside InstructionDereferencesPointer either.
if (UnderlyingVal == GetUnderlyingObject(UnderlyingVal, DL, 1)) {
for (Instruction &I : *BB) {
if (InstructionDereferencesPointer(&I, UnderlyingVal)) {
NotNull = true;
break;
}
}
}
}
}
// If this is the entry block, we must be asking about an argument. The
// value is overdefined.
if (BB == &BB->getParent()->getEntryBlock()) {
assert(isa<Argument>(Val) && "Unknown live-in to the entry block");
if (NotNull) {
PointerType *PTy = cast<PointerType>(Val->getType());
Result = LVILatticeVal::getNot(ConstantPointerNull::get(PTy));
} else {
Result.markOverdefined();
}
BBLV = Result;
return true;
}
// Loop over all of our predecessors, merging what we know from them into
// result.
bool EdgesMissing = false;
for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI) {
LVILatticeVal EdgeResult;
EdgesMissing |= !getEdgeValue(Val, *PI, BB, EdgeResult);
if (EdgesMissing)
continue;
Result.mergeIn(EdgeResult, DL);
// If we hit overdefined, exit early. The BlockVals entry is already set
// to overdefined.
if (Result.isOverdefined()) {
DEBUG(dbgs() << " compute BB '" << BB->getName()
<< "' - overdefined because of pred.\n");
// If we previously determined that this is a pointer that can't be null
// then return that rather than giving up entirely.
if (NotNull) {
PointerType *PTy = cast<PointerType>(Val->getType());
Result = LVILatticeVal::getNot(ConstantPointerNull::get(PTy));
}
BBLV = Result;
return true;
}
}
if (EdgesMissing)
return false;
// Return the merged value, which is more precise than 'overdefined'.
assert(!Result.isOverdefined());
BBLV = Result;
return true;
}
bool LazyValueInfoCache::solveBlockValuePHINode(LVILatticeVal &BBLV,
PHINode *PN, BasicBlock *BB) {
LVILatticeVal Result; // Start Undefined.
// Loop over all of our predecessors, merging what we know from them into
// result.
bool EdgesMissing = false;
for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
BasicBlock *PhiBB = PN->getIncomingBlock(i);
Value *PhiVal = PN->getIncomingValue(i);
LVILatticeVal EdgeResult;
// Note that we can provide PN as the context value to getEdgeValue, even
// though the results will be cached, because PN is the value being used as
// the cache key in the caller.
EdgesMissing |= !getEdgeValue(PhiVal, PhiBB, BB, EdgeResult, PN);
if (EdgesMissing)
continue;
Result.mergeIn(EdgeResult, DL);
// If we hit overdefined, exit early. The BlockVals entry is already set
// to overdefined.
if (Result.isOverdefined()) {
DEBUG(dbgs() << " compute BB '" << BB->getName()
<< "' - overdefined because of pred.\n");
BBLV = Result;
return true;
}
}
if (EdgesMissing)
return false;
// Return the merged value, which is more precise than 'overdefined'.
assert(!Result.isOverdefined() && "Possible PHI in entry block?");
BBLV = Result;
return true;
}
static bool getValueFromFromCondition(Value *Val, ICmpInst *ICI,
LVILatticeVal &Result,
bool isTrueDest = true);
// If we can determine a constant range for the value Val in the context
// provided by the instruction BBI, then merge it into BBLV. If we did find a
// constant range, return true.
void LazyValueInfoCache::mergeAssumeBlockValueConstantRange(Value *Val,
LVILatticeVal &BBLV,
Instruction *BBI) {
BBI = BBI ? BBI : dyn_cast<Instruction>(Val);
if (!BBI)
return;
for (auto &AssumeVH : AC->assumptions()) {
if (!AssumeVH)
continue;
auto *I = cast<CallInst>(AssumeVH);
if (!isValidAssumeForContext(I, BBI, DT))
continue;
Value *C = I->getArgOperand(0);
if (ICmpInst *ICI = dyn_cast<ICmpInst>(C)) {
LVILatticeVal Result;
if (getValueFromFromCondition(Val, ICI, Result)) {
if (BBLV.isOverdefined())
BBLV = Result;
else
BBLV.mergeIn(Result, DL);
}
}
}
}
bool LazyValueInfoCache::solveBlockValueConstantRange(LVILatticeVal &BBLV,
Instruction *BBI,
BasicBlock *BB) {
// Figure out the range of the LHS. If that fails, bail.
if (!hasBlockValue(BBI->getOperand(0), BB)) {
if (pushBlockValue(std::make_pair(BB, BBI->getOperand(0))))
return false;
BBLV.markOverdefined();
return true;
}
LVILatticeVal LHSVal = getBlockValue(BBI->getOperand(0), BB);
mergeAssumeBlockValueConstantRange(BBI->getOperand(0), LHSVal, BBI);
if (!LHSVal.isConstantRange()) {
BBLV.markOverdefined();
return true;
}
ConstantRange LHSRange = LHSVal.getConstantRange();
ConstantRange RHSRange(1);
IntegerType *ResultTy = cast<IntegerType>(BBI->getType());
if (isa<BinaryOperator>(BBI)) {
if (ConstantInt *RHS = dyn_cast<ConstantInt>(BBI->getOperand(1))) {
RHSRange = ConstantRange(RHS->getValue());
} else {
BBLV.markOverdefined();
return true;
}
}
// NOTE: We're currently limited by the set of operations that ConstantRange
// can evaluate symbolically. Enhancing that set will allows us to analyze
// more definitions.
LVILatticeVal Result;
switch (BBI->getOpcode()) {
case Instruction::Add:
Result.markConstantRange(LHSRange.add(RHSRange));
break;
case Instruction::Sub:
Result.markConstantRange(LHSRange.sub(RHSRange));
break;
case Instruction::Mul:
Result.markConstantRange(LHSRange.multiply(RHSRange));
break;
case Instruction::UDiv:
Result.markConstantRange(LHSRange.udiv(RHSRange));
break;
case Instruction::Shl:
Result.markConstantRange(LHSRange.shl(RHSRange));
break;
case Instruction::LShr:
Result.markConstantRange(LHSRange.lshr(RHSRange));
break;
case Instruction::Trunc:
Result.markConstantRange(LHSRange.truncate(ResultTy->getBitWidth()));
break;
case Instruction::SExt:
Result.markConstantRange(LHSRange.signExtend(ResultTy->getBitWidth()));
break;
case Instruction::ZExt:
Result.markConstantRange(LHSRange.zeroExtend(ResultTy->getBitWidth()));
break;
case Instruction::BitCast:
Result.markConstantRange(LHSRange);
break;
case Instruction::And:
Result.markConstantRange(LHSRange.binaryAnd(RHSRange));
break;
case Instruction::Or:
Result.markConstantRange(LHSRange.binaryOr(RHSRange));
break;
// Unhandled instructions are overdefined.
default:
DEBUG(dbgs() << " compute BB '" << BB->getName()
<< "' - overdefined because inst def found.\n");
Result.markOverdefined();
break;
}
BBLV = Result;
return true;
}
bool getValueFromFromCondition(Value *Val, ICmpInst *ICI,
LVILatticeVal &Result, bool isTrueDest) {
if (ICI && isa<Constant>(ICI->getOperand(1))) {
if (ICI->isEquality() && ICI->getOperand(0) == Val) {
// We know that V has the RHS constant if this is a true SETEQ or
// false SETNE.
if (isTrueDest == (ICI->getPredicate() == ICmpInst::ICMP_EQ))
Result = LVILatticeVal::get(cast<Constant>(ICI->getOperand(1)));
else
Result = LVILatticeVal::getNot(cast<Constant>(ICI->getOperand(1)));
return true;
}
// Recognize the range checking idiom that InstCombine produces.
// (X-C1) u< C2 --> [C1, C1+C2)
ConstantInt *NegOffset = nullptr;
if (ICI->getPredicate() == ICmpInst::ICMP_ULT)
match(ICI->getOperand(0), m_Add(m_Specific(Val),
m_ConstantInt(NegOffset)));
ConstantInt *CI = dyn_cast<ConstantInt>(ICI->getOperand(1));
if (CI && (ICI->getOperand(0) == Val || NegOffset)) {
// Calculate the range of values that are allowed by the comparison
ConstantRange CmpRange(CI->getValue());
ConstantRange TrueValues =
ConstantRange::makeAllowedICmpRegion(ICI->getPredicate(), CmpRange);
if (NegOffset) // Apply the offset from above.
TrueValues = TrueValues.subtract(NegOffset->getValue());
// If we're interested in the false dest, invert the condition.
if (!isTrueDest) TrueValues = TrueValues.inverse();
Result = LVILatticeVal::getRange(TrueValues);
return true;
}
}
return false;
}
/// \brief Compute the value of Val on the edge BBFrom -> BBTo. Returns false if
/// Val is not constrained on the edge.
static bool getEdgeValueLocal(Value *Val, BasicBlock *BBFrom,
BasicBlock *BBTo, LVILatticeVal &Result) {
// TODO: Handle more complex conditionals. If (v == 0 || v2 < 1) is false, we
// know that v != 0.
if (BranchInst *BI = dyn_cast<BranchInst>(BBFrom->getTerminator())) {
// If this is a conditional branch and only one successor goes to BBTo, then
// we may be able to infer something from the condition.
if (BI->isConditional() &&
BI->getSuccessor(0) != BI->getSuccessor(1)) {
bool isTrueDest = BI->getSuccessor(0) == BBTo;
assert(BI->getSuccessor(!isTrueDest) == BBTo &&
"BBTo isn't a successor of BBFrom");
// If V is the condition of the branch itself, then we know exactly what
// it is.
if (BI->getCondition() == Val) {
Result = LVILatticeVal::get(ConstantInt::get(
Type::getInt1Ty(Val->getContext()), isTrueDest));
return true;
}
// If the condition of the branch is an equality comparison, we may be
// able to infer the value.
if (ICmpInst *ICI = dyn_cast<ICmpInst>(BI->getCondition()))
if (getValueFromFromCondition(Val, ICI, Result, isTrueDest))
return true;
}
}
// If the edge was formed by a switch on the value, then we may know exactly
// what it is.
if (SwitchInst *SI = dyn_cast<SwitchInst>(BBFrom->getTerminator())) {
if (SI->getCondition() != Val)
return false;
bool DefaultCase = SI->getDefaultDest() == BBTo;
unsigned BitWidth = Val->getType()->getIntegerBitWidth();
ConstantRange EdgesVals(BitWidth, DefaultCase/*isFullSet*/);
for (SwitchInst::CaseIt i : SI->cases()) {
ConstantRange EdgeVal(i.getCaseValue()->getValue());
if (DefaultCase) {
// It is possible that the default destination is the destination of
// some cases. There is no need to perform difference for those cases.
if (i.getCaseSuccessor() != BBTo)
EdgesVals = EdgesVals.difference(EdgeVal);
} else if (i.getCaseSuccessor() == BBTo)
EdgesVals = EdgesVals.unionWith(EdgeVal);
}
Result = LVILatticeVal::getRange(EdgesVals);
return true;
}
return false;
}
/// \brief Compute the value of Val on the edge BBFrom -> BBTo or the value at
/// the basic block if the edge does not constrain Val.
bool LazyValueInfoCache::getEdgeValue(Value *Val, BasicBlock *BBFrom,
BasicBlock *BBTo, LVILatticeVal &Result,
Instruction *CxtI) {
// If already a constant, there is nothing to compute.
if (Constant *VC = dyn_cast<Constant>(Val)) {
Result = LVILatticeVal::get(VC);
return true;
}
if (getEdgeValueLocal(Val, BBFrom, BBTo, Result)) {
if (!Result.isConstantRange() ||
Result.getConstantRange().getSingleElement())
return true;
// FIXME: this check should be moved to the beginning of the function when
// LVI better supports recursive values. Even for the single value case, we
// can intersect to detect dead code (an empty range).
if (!hasBlockValue(Val, BBFrom)) {
if (pushBlockValue(std::make_pair(BBFrom, Val)))
return false;
Result.markOverdefined();
return true;
}
// Try to intersect ranges of the BB and the constraint on the edge.
LVILatticeVal InBlock = getBlockValue(Val, BBFrom);
mergeAssumeBlockValueConstantRange(Val, InBlock, BBFrom->getTerminator());
// See note on the use of the CxtI with mergeAssumeBlockValueConstantRange,
// and caching, below.
mergeAssumeBlockValueConstantRange(Val, InBlock, CxtI);
if (!InBlock.isConstantRange())
return true;
ConstantRange Range =
Result.getConstantRange().intersectWith(InBlock.getConstantRange());
Result = LVILatticeVal::getRange(Range);
return true;
}
if (!hasBlockValue(Val, BBFrom)) {
if (pushBlockValue(std::make_pair(BBFrom, Val)))
return false;
Result.markOverdefined();
return true;
}
// If we couldn't compute the value on the edge, use the value from the BB.
Result = getBlockValue(Val, BBFrom);
mergeAssumeBlockValueConstantRange(Val, Result, BBFrom->getTerminator());
// We can use the context instruction (generically the ultimate instruction
// the calling pass is trying to simplify) here, even though the result of
// this function is generally cached when called from the solve* functions
// (and that cached result might be used with queries using a different
// context instruction), because when this function is called from the solve*
// functions, the context instruction is not provided. When called from
// LazyValueInfoCache::getValueOnEdge, the context instruction is provided,
// but then the result is not cached.
mergeAssumeBlockValueConstantRange(Val, Result, CxtI);
return true;
}
LVILatticeVal LazyValueInfoCache::getValueInBlock(Value *V, BasicBlock *BB,
Instruction *CxtI) {
DEBUG(dbgs() << "LVI Getting block end value " << *V << " at '"
<< BB->getName() << "'\n");
assert(BlockValueStack.empty() && BlockValueSet.empty());
pushBlockValue(std::make_pair(BB, V));
solve();
LVILatticeVal Result = getBlockValue(V, BB);
mergeAssumeBlockValueConstantRange(V, Result, CxtI);
DEBUG(dbgs() << " Result = " << Result << "\n");
return Result;
}
LVILatticeVal LazyValueInfoCache::getValueAt(Value *V, Instruction *CxtI) {
DEBUG(dbgs() << "LVI Getting value " << *V << " at '"
<< CxtI->getName() << "'\n");
LVILatticeVal Result;
mergeAssumeBlockValueConstantRange(V, Result, CxtI);
DEBUG(dbgs() << " Result = " << Result << "\n");
return Result;
}
LVILatticeVal LazyValueInfoCache::
getValueOnEdge(Value *V, BasicBlock *FromBB, BasicBlock *ToBB,
Instruction *CxtI) {
DEBUG(dbgs() << "LVI Getting edge value " << *V << " from '"
<< FromBB->getName() << "' to '" << ToBB->getName() << "'\n");
LVILatticeVal Result;
if (!getEdgeValue(V, FromBB, ToBB, Result, CxtI)) {
solve();
bool WasFastQuery = getEdgeValue(V, FromBB, ToBB, Result, CxtI);
(void)WasFastQuery;
assert(WasFastQuery && "More work to do after problem solved?");
}
DEBUG(dbgs() << " Result = " << Result << "\n");
return Result;
}
void LazyValueInfoCache::threadEdge(BasicBlock *PredBB, BasicBlock *OldSucc,
BasicBlock *NewSucc) {
// When an edge in the graph has been threaded, values that we could not
// determine a value for before (i.e. were marked overdefined) may be possible
// to solve now. We do NOT try to proactively update these values. Instead,
// we clear their entries from the cache, and allow lazy updating to recompute
// them when needed.
// The updating process is fairly simple: we need to drop cached info
// for all values that were marked overdefined in OldSucc, and for those same
// values in any successor of OldSucc (except NewSucc) in which they were
// also marked overdefined.
std::vector<BasicBlock*> worklist;
worklist.push_back(OldSucc);
DenseSet<Value*> ClearSet;
for (OverDefinedPairTy &P : OverDefinedCache)
if (P.first == OldSucc)
ClearSet.insert(P.second);
// Use a worklist to perform a depth-first search of OldSucc's successors.
// NOTE: We do not need a visited list since any blocks we have already
// visited will have had their overdefined markers cleared already, and we
// thus won't loop to their successors.
while (!worklist.empty()) {
BasicBlock *ToUpdate = worklist.back();
worklist.pop_back();
// Skip blocks only accessible through NewSucc.
if (ToUpdate == NewSucc) continue;
bool changed = false;
for (Value *V : ClearSet) {
// If a value was marked overdefined in OldSucc, and is here too...
DenseSet<OverDefinedPairTy>::iterator OI =
OverDefinedCache.find(std::make_pair(ToUpdate, V));
if (OI == OverDefinedCache.end()) continue;
// Remove it from the caches.
ValueCacheEntryTy &Entry = ValueCache[LVIValueHandle(V, this)];
ValueCacheEntryTy::iterator CI = Entry.find(ToUpdate);
assert(CI != Entry.end() && "Couldn't find entry to update?");
Entry.erase(CI);
OverDefinedCache.erase(OI);
// If we removed anything, then we potentially need to update
// blocks successors too.
changed = true;
}
if (!changed) continue;
worklist.insert(worklist.end(), succ_begin(ToUpdate), succ_end(ToUpdate));
}
}
//===----------------------------------------------------------------------===//
// LazyValueInfo Impl
//===----------------------------------------------------------------------===//
/// This lazily constructs the LazyValueInfoCache.
static LazyValueInfoCache &getCache(void *&PImpl, AssumptionCache *AC,
const DataLayout *DL,
DominatorTree *DT = nullptr) {
if (!PImpl) {
assert(DL && "getCache() called with a null DataLayout");
PImpl = new LazyValueInfoCache(AC, *DL, DT);
}
return *static_cast<LazyValueInfoCache*>(PImpl);
}
bool LazyValueInfo::runOnFunction(Function &F) {
AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
const DataLayout &DL = F.getParent()->getDataLayout();
DominatorTreeWrapperPass *DTWP =
getAnalysisIfAvailable<DominatorTreeWrapperPass>();
DT = DTWP ? &DTWP->getDomTree() : nullptr;
TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
if (PImpl)
getCache(PImpl, AC, &DL, DT).clear();
// Fully lazy.
return false;
}
void LazyValueInfo::getAnalysisUsage(AnalysisUsage &AU) const {
AU.setPreservesAll();
AU.addRequired<AssumptionCacheTracker>();
AU.addRequired<TargetLibraryInfoWrapperPass>();
}
void LazyValueInfo::releaseMemory() {
// If the cache was allocated, free it.
if (PImpl) {
delete &getCache(PImpl, AC, nullptr);
PImpl = nullptr;
}
}
Constant *LazyValueInfo::getConstant(Value *V, BasicBlock *BB,
Instruction *CxtI) {
const DataLayout &DL = BB->getModule()->getDataLayout();
LVILatticeVal Result =
getCache(PImpl, AC, &DL, DT).getValueInBlock(V, BB, CxtI);
if (Result.isConstant())
return Result.getConstant();
if (Result.isConstantRange()) {
ConstantRange CR = Result.getConstantRange();
if (const APInt *SingleVal = CR.getSingleElement())
return ConstantInt::get(V->getContext(), *SingleVal);
}
return nullptr;
}
/// Determine whether the specified value is known to be a
/// constant on the specified edge. Return null if not.
Constant *LazyValueInfo::getConstantOnEdge(Value *V, BasicBlock *FromBB,
BasicBlock *ToBB,
Instruction *CxtI) {
const DataLayout &DL = FromBB->getModule()->getDataLayout();
LVILatticeVal Result =
getCache(PImpl, AC, &DL, DT).getValueOnEdge(V, FromBB, ToBB, CxtI);
if (Result.isConstant())
return Result.getConstant();
if (Result.isConstantRange()) {
ConstantRange CR = Result.getConstantRange();
if (const APInt *SingleVal = CR.getSingleElement())
return ConstantInt::get(V->getContext(), *SingleVal);
}
return nullptr;
}
static LazyValueInfo::Tristate getPredicateResult(unsigned Pred, Constant *C,
LVILatticeVal &Result,
const DataLayout &DL,
TargetLibraryInfo *TLI) {
// If we know the value is a constant, evaluate the conditional.
Constant *Res = nullptr;
if (Result.isConstant()) {
Res = ConstantFoldCompareInstOperands(Pred, Result.getConstant(), C, DL,
TLI);
if (ConstantInt *ResCI = dyn_cast<ConstantInt>(Res))
return ResCI->isZero() ? LazyValueInfo::False : LazyValueInfo::True;
return LazyValueInfo::Unknown;
}
if (Result.isConstantRange()) {
ConstantInt *CI = dyn_cast<ConstantInt>(C);
if (!CI) return LazyValueInfo::Unknown;
ConstantRange CR = Result.getConstantRange();
if (Pred == ICmpInst::ICMP_EQ) {
if (!CR.contains(CI->getValue()))
return LazyValueInfo::False;
if (CR.isSingleElement() && CR.contains(CI->getValue()))
return LazyValueInfo::True;
} else if (Pred == ICmpInst::ICMP_NE) {
if (!CR.contains(CI->getValue()))
return LazyValueInfo::True;
if (CR.isSingleElement() && CR.contains(CI->getValue()))
return LazyValueInfo::False;
}
// Handle more complex predicates.
ConstantRange TrueValues =
ICmpInst::makeConstantRange((ICmpInst::Predicate)Pred, CI->getValue());
if (TrueValues.contains(CR))
return LazyValueInfo::True;
if (TrueValues.inverse().contains(CR))
return LazyValueInfo::False;
return LazyValueInfo::Unknown;
}
if (Result.isNotConstant()) {
// If this is an equality comparison, we can try to fold it knowing that
// "V != C1".
if (Pred == ICmpInst::ICMP_EQ) {
// !C1 == C -> false iff C1 == C.
Res = ConstantFoldCompareInstOperands(ICmpInst::ICMP_NE,
Result.getNotConstant(), C, DL,
TLI);
if (Res->isNullValue())
return LazyValueInfo::False;
} else if (Pred == ICmpInst::ICMP_NE) {
// !C1 != C -> true iff C1 == C.
Res = ConstantFoldCompareInstOperands(ICmpInst::ICMP_NE,
Result.getNotConstant(), C, DL,
TLI);
if (Res->isNullValue())
return LazyValueInfo::True;
}
return LazyValueInfo::Unknown;
}
return LazyValueInfo::Unknown;
}
/// Determine whether the specified value comparison with a constant is known to
/// be true or false on the specified CFG edge. Pred is a CmpInst predicate.
LazyValueInfo::Tristate
LazyValueInfo::getPredicateOnEdge(unsigned Pred, Value *V, Constant *C,
BasicBlock *FromBB, BasicBlock *ToBB,
Instruction *CxtI) {
const DataLayout &DL = FromBB->getModule()->getDataLayout();
LVILatticeVal Result =
getCache(PImpl, AC, &DL, DT).getValueOnEdge(V, FromBB, ToBB, CxtI);
return getPredicateResult(Pred, C, Result, DL, TLI);
}
LazyValueInfo::Tristate
LazyValueInfo::getPredicateAt(unsigned Pred, Value *V, Constant *C,
Instruction *CxtI) {
const DataLayout &DL = CxtI->getModule()->getDataLayout();
LVILatticeVal Result = getCache(PImpl, AC, &DL, DT).getValueAt(V, CxtI);
Tristate Ret = getPredicateResult(Pred, C, Result, DL, TLI);
if (Ret != Unknown)
return Ret;
// TODO: Move this logic inside getValueAt so that it can be cached rather
// than re-queried on each call. This would also allow us to merge the
// underlying lattice values to get more information
if (CxtI) {
// For a comparison where the V is outside this block, it's possible
// that we've branched on it before. Look to see if the value is known
// on all incoming edges.
BasicBlock *BB = CxtI->getParent();
pred_iterator PI = pred_begin(BB), PE = pred_end(BB);
if (PI != PE &&
(!isa<Instruction>(V) ||
cast<Instruction>(V)->getParent() != BB)) {
// For predecessor edge, determine if the comparison is true or false
// on that edge. If they're all true or all false, we can conclude
// the value of the comparison in this block.
Tristate Baseline = getPredicateOnEdge(Pred, V, C, *PI, BB, CxtI);
if (Baseline != Unknown) {
// Check that all remaining incoming values match the first one.
while (++PI != PE) {
Tristate Ret = getPredicateOnEdge(Pred, V, C, *PI, BB, CxtI);
if (Ret != Baseline) break;
}
// If we terminated early, then one of the values didn't match.
if (PI == PE) {
return Baseline;
}
}
}
}
return Unknown;
}
void LazyValueInfo::threadEdge(BasicBlock *PredBB, BasicBlock *OldSucc,
BasicBlock *NewSucc) {
if (PImpl) {
const DataLayout &DL = PredBB->getModule()->getDataLayout();
getCache(PImpl, AC, &DL, DT).threadEdge(PredBB, OldSucc, NewSucc);
}
}
void LazyValueInfo::eraseBlock(BasicBlock *BB) {
if (PImpl) {
const DataLayout &DL = BB->getModule()->getDataLayout();
getCache(PImpl, AC, &DL, DT).eraseBlock(BB);
}
}
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/Analysis/DominanceFrontier.cpp | //===- DominanceFrontier.cpp - Dominance Frontier Calculation -------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "llvm/Analysis/DominanceFrontier.h"
#include "llvm/Analysis/DominanceFrontierImpl.h"
using namespace llvm;
namespace llvm {
template class DominanceFrontierBase<BasicBlock>;
template class ForwardDominanceFrontierBase<BasicBlock>;
}
char DominanceFrontier::ID = 0;
INITIALIZE_PASS_BEGIN(DominanceFrontier, "domfrontier",
"Dominance Frontier Construction", true, true)
INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
INITIALIZE_PASS_END(DominanceFrontier, "domfrontier",
"Dominance Frontier Construction", true, true)
DominanceFrontier::DominanceFrontier()
: FunctionPass(ID),
Base() {
initializeDominanceFrontierPass(*PassRegistry::getPassRegistry());
}
void DominanceFrontier::releaseMemory() {
Base.releaseMemory();
}
bool DominanceFrontier::runOnFunction(Function &) {
releaseMemory();
Base.analyze(getAnalysis<DominatorTreeWrapperPass>().getDomTree());
return false;
}
void DominanceFrontier::getAnalysisUsage(AnalysisUsage &AU) const {
AU.setPreservesAll();
AU.addRequired<DominatorTreeWrapperPass>();
}
void DominanceFrontier::print(raw_ostream &OS, const Module *) const {
Base.print(OS);
}
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
void DominanceFrontier::dump() const {
print(dbgs());
}
#endif
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/Analysis/BlockFrequencyInfo.cpp | //===- BlockFrequencyInfo.cpp - Block Frequency Analysis ------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// Loops should be simplified before this analysis.
//
//===----------------------------------------------------------------------===//
#include "llvm/Analysis/BlockFrequencyInfo.h"
#include "llvm/Analysis/BlockFrequencyInfoImpl.h"
#include "llvm/Analysis/BranchProbabilityInfo.h"
#include "llvm/Analysis/LoopInfo.h"
#include "llvm/Analysis/Passes.h"
#include "llvm/IR/CFG.h"
#include "llvm/InitializePasses.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/GraphWriter.h"
using namespace llvm;
#define DEBUG_TYPE "block-freq"
#ifndef NDEBUG
enum GVDAGType {
GVDT_None,
GVDT_Fraction,
GVDT_Integer
};
static cl::opt<GVDAGType>
ViewBlockFreqPropagationDAG("view-block-freq-propagation-dags", cl::Hidden,
cl::desc("Pop up a window to show a dag displaying how block "
"frequencies propagation through the CFG."),
cl::values(
clEnumValN(GVDT_None, "none",
"do not display graphs."),
clEnumValN(GVDT_Fraction, "fraction", "display a graph using the "
"fractional block frequency representation."),
clEnumValN(GVDT_Integer, "integer", "display a graph using the raw "
"integer fractional block frequency representation."),
clEnumValEnd));
namespace llvm {
template <>
struct GraphTraits<BlockFrequencyInfo *> {
typedef const BasicBlock NodeType;
typedef succ_const_iterator ChildIteratorType;
typedef Function::const_iterator nodes_iterator;
static inline const NodeType *getEntryNode(const BlockFrequencyInfo *G) {
return G->getFunction()->begin();
}
static ChildIteratorType child_begin(const NodeType *N) {
return succ_begin(N);
}
static ChildIteratorType child_end(const NodeType *N) {
return succ_end(N);
}
static nodes_iterator nodes_begin(const BlockFrequencyInfo *G) {
return G->getFunction()->begin();
}
static nodes_iterator nodes_end(const BlockFrequencyInfo *G) {
return G->getFunction()->end();
}
};
template<>
struct DOTGraphTraits<BlockFrequencyInfo*> : public DefaultDOTGraphTraits {
explicit DOTGraphTraits(bool isSimple=false) :
DefaultDOTGraphTraits(isSimple) {}
static std::string getGraphName(const BlockFrequencyInfo *G) {
return G->getFunction()->getName();
}
std::string getNodeLabel(const BasicBlock *Node,
const BlockFrequencyInfo *Graph) {
std::string Result;
raw_string_ostream OS(Result);
OS << Node->getName() << ":";
switch (ViewBlockFreqPropagationDAG) {
case GVDT_Fraction:
Graph->printBlockFreq(OS, Node);
break;
case GVDT_Integer:
OS << Graph->getBlockFreq(Node).getFrequency();
break;
case GVDT_None:
llvm_unreachable("If we are not supposed to render a graph we should "
"never reach this point.");
}
return Result;
}
};
} // end namespace llvm
#endif
INITIALIZE_PASS_BEGIN(BlockFrequencyInfo, "block-freq",
"Block Frequency Analysis", true, true)
INITIALIZE_PASS_DEPENDENCY(BranchProbabilityInfo)
INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
INITIALIZE_PASS_END(BlockFrequencyInfo, "block-freq",
"Block Frequency Analysis", true, true)
char BlockFrequencyInfo::ID = 0;
BlockFrequencyInfo::BlockFrequencyInfo() : FunctionPass(ID) {
initializeBlockFrequencyInfoPass(*PassRegistry::getPassRegistry());
}
BlockFrequencyInfo::~BlockFrequencyInfo() {}
void BlockFrequencyInfo::getAnalysisUsage(AnalysisUsage &AU) const {
AU.addRequired<BranchProbabilityInfo>();
AU.addRequired<LoopInfoWrapperPass>();
AU.setPreservesAll();
}
bool BlockFrequencyInfo::runOnFunction(Function &F) {
BranchProbabilityInfo &BPI = getAnalysis<BranchProbabilityInfo>();
LoopInfo &LI = getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
if (!BFI)
BFI.reset(new ImplType);
BFI->doFunction(&F, &BPI, &LI);
#ifndef NDEBUG
if (ViewBlockFreqPropagationDAG != GVDT_None)
view();
#endif
return false;
}
void BlockFrequencyInfo::releaseMemory() { BFI.reset(); }
void BlockFrequencyInfo::print(raw_ostream &O, const Module *) const {
if (BFI) BFI->print(O);
}
BlockFrequency BlockFrequencyInfo::getBlockFreq(const BasicBlock *BB) const {
return BFI ? BFI->getBlockFreq(BB) : 0;
}
/// Pop up a ghostview window with the current block frequency propagation
/// rendered using dot.
void BlockFrequencyInfo::view() const {
// This code is only for debugging.
#ifndef NDEBUG
ViewGraph(const_cast<BlockFrequencyInfo *>(this), "BlockFrequencyDAGs");
#else
errs() << "BlockFrequencyInfo::view is only available in debug builds on "
"systems with Graphviz or gv!\n";
#endif // NDEBUG
}
const Function *BlockFrequencyInfo::getFunction() const {
return BFI ? BFI->getFunction() : nullptr;
}
raw_ostream &BlockFrequencyInfo::
printBlockFreq(raw_ostream &OS, const BlockFrequency Freq) const {
return BFI ? BFI->printBlockFreq(OS, Freq) : OS;
}
raw_ostream &
BlockFrequencyInfo::printBlockFreq(raw_ostream &OS,
const BasicBlock *BB) const {
return BFI ? BFI->printBlockFreq(OS, BB) : OS;
}
uint64_t BlockFrequencyInfo::getEntryFreq() const {
return BFI ? BFI->getEntryFreq() : 0;
}
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/Analysis/InstructionSimplify.cpp | //===- InstructionSimplify.cpp - Fold instruction operands ----------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements routines for folding instructions into simpler forms
// that do not require creating new instructions. This does constant folding
// ("add i32 1, 1" -> "2") but can also handle non-constant operands, either
// returning a constant ("and i32 %x, 0" -> "0") or an already existing value
// ("and i32 %x, %x" -> "%x"). All operands are assumed to have already been
// simplified: This is usually true and assuming it simplifies the logic (if
// they have not been simplified then results are correct but maybe suboptimal).
//
//===----------------------------------------------------------------------===//
#include "llvm/Analysis/InstructionSimplify.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/Analysis/ConstantFolding.h"
#include "llvm/Analysis/MemoryBuiltins.h"
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/Analysis/VectorUtils.h"
#include "llvm/IR/ConstantRange.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/Dominators.h"
#include "llvm/IR/GetElementPtrTypeIterator.h"
#include "llvm/IR/GlobalAlias.h"
#include "llvm/IR/Operator.h"
#include "llvm/IR/PatternMatch.h"
#include "llvm/IR/ValueHandle.h"
#include <algorithm>
#include "llvm/Analysis/DxilSimplify.h" // HLSL Change - simplify dxil call.
using namespace llvm;
using namespace llvm::PatternMatch;
#define DEBUG_TYPE "instsimplify"
enum { RecursionLimit = 3 };
STATISTIC(NumExpand, "Number of expansions");
STATISTIC(NumReassoc, "Number of reassociations");
namespace {
struct Query {
const DataLayout &DL;
const TargetLibraryInfo *TLI;
const DominatorTree *DT;
AssumptionCache *AC;
const Instruction *CxtI;
Query(const DataLayout &DL, const TargetLibraryInfo *tli,
const DominatorTree *dt, AssumptionCache *ac = nullptr,
const Instruction *cxti = nullptr)
: DL(DL), TLI(tli), DT(dt), AC(ac), CxtI(cxti) {}
};
} // end anonymous namespace
static Value *SimplifyAndInst(Value *, Value *, const Query &, unsigned);
static Value *SimplifyBinOp(unsigned, Value *, Value *, const Query &,
unsigned);
static Value *SimplifyFPBinOp(unsigned, Value *, Value *, const FastMathFlags &,
const Query &, unsigned);
static Value *SimplifyCmpInst(unsigned, Value *, Value *, const Query &,
unsigned);
static Value *SimplifyOrInst(Value *, Value *, const Query &, unsigned);
static Value *SimplifyXorInst(Value *, Value *, const Query &, unsigned);
static Value *SimplifyTruncInst(Value *, Type *, const Query &, unsigned);
/// getFalse - For a boolean type, or a vector of boolean type, return false, or
/// a vector with every element false, as appropriate for the type.
static Constant *getFalse(Type *Ty) {
assert(Ty->getScalarType()->isIntegerTy(1) &&
"Expected i1 type or a vector of i1!");
return Constant::getNullValue(Ty);
}
/// getTrue - For a boolean type, or a vector of boolean type, return true, or
/// a vector with every element true, as appropriate for the type.
static Constant *getTrue(Type *Ty) {
assert(Ty->getScalarType()->isIntegerTy(1) &&
"Expected i1 type or a vector of i1!");
return Constant::getAllOnesValue(Ty);
}
/// isSameCompare - Is V equivalent to the comparison "LHS Pred RHS"?
static bool isSameCompare(Value *V, CmpInst::Predicate Pred, Value *LHS,
Value *RHS) {
CmpInst *Cmp = dyn_cast<CmpInst>(V);
if (!Cmp)
return false;
CmpInst::Predicate CPred = Cmp->getPredicate();
Value *CLHS = Cmp->getOperand(0), *CRHS = Cmp->getOperand(1);
if (CPred == Pred && CLHS == LHS && CRHS == RHS)
return true;
return CPred == CmpInst::getSwappedPredicate(Pred) && CLHS == RHS &&
CRHS == LHS;
}
/// ValueDominatesPHI - Does the given value dominate the specified phi node?
static bool ValueDominatesPHI(Value *V, PHINode *P, const DominatorTree *DT) {
Instruction *I = dyn_cast<Instruction>(V);
if (!I)
// Arguments and constants dominate all instructions.
return true;
// If we are processing instructions (and/or basic blocks) that have not been
// fully added to a function, the parent nodes may still be null. Simply
// return the conservative answer in these cases.
if (!I->getParent() || !P->getParent() || !I->getParent()->getParent())
return false;
// If we have a DominatorTree then do a precise test.
if (DT) {
if (!DT->isReachableFromEntry(P->getParent()))
return true;
if (!DT->isReachableFromEntry(I->getParent()))
return false;
return DT->dominates(I, P);
}
// Otherwise, if the instruction is in the entry block, and is not an invoke,
// then it obviously dominates all phi nodes.
if (I->getParent() == &I->getParent()->getParent()->getEntryBlock() &&
!isa<InvokeInst>(I))
return true;
return false;
}
/// ExpandBinOp - Simplify "A op (B op' C)" by distributing op over op', turning
/// it into "(A op B) op' (A op C)". Here "op" is given by Opcode and "op'" is
/// given by OpcodeToExpand, while "A" corresponds to LHS and "B op' C" to RHS.
/// Also performs the transform "(A op' B) op C" -> "(A op C) op' (B op C)".
/// Returns the simplified value, or null if no simplification was performed.
static Value *ExpandBinOp(unsigned Opcode, Value *LHS, Value *RHS,
unsigned OpcToExpand, const Query &Q,
unsigned MaxRecurse) {
Instruction::BinaryOps OpcodeToExpand = (Instruction::BinaryOps)OpcToExpand;
// Recursion is always used, so bail out at once if we already hit the limit.
if (!MaxRecurse--)
return nullptr;
// Check whether the expression has the form "(A op' B) op C".
if (BinaryOperator *Op0 = dyn_cast<BinaryOperator>(LHS))
if (Op0->getOpcode() == OpcodeToExpand) {
// It does! Try turning it into "(A op C) op' (B op C)".
Value *A = Op0->getOperand(0), *B = Op0->getOperand(1), *C = RHS;
// Do "A op C" and "B op C" both simplify?
if (Value *L = SimplifyBinOp(Opcode, A, C, Q, MaxRecurse))
if (Value *R = SimplifyBinOp(Opcode, B, C, Q, MaxRecurse)) {
// They do! Return "L op' R" if it simplifies or is already available.
// If "L op' R" equals "A op' B" then "L op' R" is just the LHS.
if ((L == A && R == B) || (Instruction::isCommutative(OpcodeToExpand)
&& L == B && R == A)) {
++NumExpand;
return LHS;
}
// Otherwise return "L op' R" if it simplifies.
if (Value *V = SimplifyBinOp(OpcodeToExpand, L, R, Q, MaxRecurse)) {
++NumExpand;
return V;
}
}
}
// Check whether the expression has the form "A op (B op' C)".
if (BinaryOperator *Op1 = dyn_cast<BinaryOperator>(RHS))
if (Op1->getOpcode() == OpcodeToExpand) {
// It does! Try turning it into "(A op B) op' (A op C)".
Value *A = LHS, *B = Op1->getOperand(0), *C = Op1->getOperand(1);
// Do "A op B" and "A op C" both simplify?
if (Value *L = SimplifyBinOp(Opcode, A, B, Q, MaxRecurse))
if (Value *R = SimplifyBinOp(Opcode, A, C, Q, MaxRecurse)) {
// They do! Return "L op' R" if it simplifies or is already available.
// If "L op' R" equals "B op' C" then "L op' R" is just the RHS.
if ((L == B && R == C) || (Instruction::isCommutative(OpcodeToExpand)
&& L == C && R == B)) {
++NumExpand;
return RHS;
}
// Otherwise return "L op' R" if it simplifies.
if (Value *V = SimplifyBinOp(OpcodeToExpand, L, R, Q, MaxRecurse)) {
++NumExpand;
return V;
}
}
}
return nullptr;
}
/// SimplifyAssociativeBinOp - Generic simplifications for associative binary
/// operations. Returns the simpler value, or null if none was found.
static Value *SimplifyAssociativeBinOp(unsigned Opc, Value *LHS, Value *RHS,
const Query &Q, unsigned MaxRecurse) {
Instruction::BinaryOps Opcode = (Instruction::BinaryOps)Opc;
assert(Instruction::isAssociative(Opcode) && "Not an associative operation!");
// Recursion is always used, so bail out at once if we already hit the limit.
if (!MaxRecurse--)
return nullptr;
BinaryOperator *Op0 = dyn_cast<BinaryOperator>(LHS);
BinaryOperator *Op1 = dyn_cast<BinaryOperator>(RHS);
// Transform: "(A op B) op C" ==> "A op (B op C)" if it simplifies completely.
if (Op0 && Op0->getOpcode() == Opcode) {
Value *A = Op0->getOperand(0);
Value *B = Op0->getOperand(1);
Value *C = RHS;
// Does "B op C" simplify?
if (Value *V = SimplifyBinOp(Opcode, B, C, Q, MaxRecurse)) {
// It does! Return "A op V" if it simplifies or is already available.
// If V equals B then "A op V" is just the LHS.
if (V == B) return LHS;
// Otherwise return "A op V" if it simplifies.
if (Value *W = SimplifyBinOp(Opcode, A, V, Q, MaxRecurse)) {
++NumReassoc;
return W;
}
}
}
// Transform: "A op (B op C)" ==> "(A op B) op C" if it simplifies completely.
if (Op1 && Op1->getOpcode() == Opcode) {
Value *A = LHS;
Value *B = Op1->getOperand(0);
Value *C = Op1->getOperand(1);
// Does "A op B" simplify?
if (Value *V = SimplifyBinOp(Opcode, A, B, Q, MaxRecurse)) {
// It does! Return "V op C" if it simplifies or is already available.
// If V equals B then "V op C" is just the RHS.
if (V == B) return RHS;
// Otherwise return "V op C" if it simplifies.
if (Value *W = SimplifyBinOp(Opcode, V, C, Q, MaxRecurse)) {
++NumReassoc;
return W;
}
}
}
// The remaining transforms require commutativity as well as associativity.
if (!Instruction::isCommutative(Opcode))
return nullptr;
// Transform: "(A op B) op C" ==> "(C op A) op B" if it simplifies completely.
if (Op0 && Op0->getOpcode() == Opcode) {
Value *A = Op0->getOperand(0);
Value *B = Op0->getOperand(1);
Value *C = RHS;
// Does "C op A" simplify?
if (Value *V = SimplifyBinOp(Opcode, C, A, Q, MaxRecurse)) {
// It does! Return "V op B" if it simplifies or is already available.
// If V equals A then "V op B" is just the LHS.
if (V == A) return LHS;
// Otherwise return "V op B" if it simplifies.
if (Value *W = SimplifyBinOp(Opcode, V, B, Q, MaxRecurse)) {
++NumReassoc;
return W;
}
}
}
// Transform: "A op (B op C)" ==> "B op (C op A)" if it simplifies completely.
if (Op1 && Op1->getOpcode() == Opcode) {
Value *A = LHS;
Value *B = Op1->getOperand(0);
Value *C = Op1->getOperand(1);
// Does "C op A" simplify?
if (Value *V = SimplifyBinOp(Opcode, C, A, Q, MaxRecurse)) {
// It does! Return "B op V" if it simplifies or is already available.
// If V equals C then "B op V" is just the RHS.
if (V == C) return RHS;
// Otherwise return "B op V" if it simplifies.
if (Value *W = SimplifyBinOp(Opcode, B, V, Q, MaxRecurse)) {
++NumReassoc;
return W;
}
}
}
return nullptr;
}
/// ThreadBinOpOverSelect - In the case of a binary operation with a select
/// instruction as an operand, try to simplify the binop by seeing whether
/// evaluating it on both branches of the select results in the same value.
/// Returns the common value if so, otherwise returns null.
static Value *ThreadBinOpOverSelect(unsigned Opcode, Value *LHS, Value *RHS,
const Query &Q, unsigned MaxRecurse) {
// Recursion is always used, so bail out at once if we already hit the limit.
if (!MaxRecurse--)
return nullptr;
SelectInst *SI;
if (isa<SelectInst>(LHS)) {
SI = cast<SelectInst>(LHS);
} else {
assert(isa<SelectInst>(RHS) && "No select instruction operand!");
SI = cast<SelectInst>(RHS);
}
// Evaluate the BinOp on the true and false branches of the select.
Value *TV;
Value *FV;
if (SI == LHS) {
TV = SimplifyBinOp(Opcode, SI->getTrueValue(), RHS, Q, MaxRecurse);
FV = SimplifyBinOp(Opcode, SI->getFalseValue(), RHS, Q, MaxRecurse);
} else {
TV = SimplifyBinOp(Opcode, LHS, SI->getTrueValue(), Q, MaxRecurse);
FV = SimplifyBinOp(Opcode, LHS, SI->getFalseValue(), Q, MaxRecurse);
}
// If they simplified to the same value, then return the common value.
// If they both failed to simplify then return null.
if (TV == FV)
return TV;
// If one branch simplified to undef, return the other one.
if (TV && isa<UndefValue>(TV))
return FV;
if (FV && isa<UndefValue>(FV))
return TV;
// If applying the operation did not change the true and false select values,
// then the result of the binop is the select itself.
if (TV == SI->getTrueValue() && FV == SI->getFalseValue())
return SI;
// If one branch simplified and the other did not, and the simplified
// value is equal to the unsimplified one, return the simplified value.
// For example, select (cond, X, X & Z) & Z -> X & Z.
if ((FV && !TV) || (TV && !FV)) {
// Check that the simplified value has the form "X op Y" where "op" is the
// same as the original operation.
Instruction *Simplified = dyn_cast<Instruction>(FV ? FV : TV);
if (Simplified && Simplified->getOpcode() == Opcode) {
// The value that didn't simplify is "UnsimplifiedLHS op UnsimplifiedRHS".
// We already know that "op" is the same as for the simplified value. See
// if the operands match too. If so, return the simplified value.
Value *UnsimplifiedBranch = FV ? SI->getTrueValue() : SI->getFalseValue();
Value *UnsimplifiedLHS = SI == LHS ? UnsimplifiedBranch : LHS;
Value *UnsimplifiedRHS = SI == LHS ? RHS : UnsimplifiedBranch;
if (Simplified->getOperand(0) == UnsimplifiedLHS &&
Simplified->getOperand(1) == UnsimplifiedRHS)
return Simplified;
if (Simplified->isCommutative() &&
Simplified->getOperand(1) == UnsimplifiedLHS &&
Simplified->getOperand(0) == UnsimplifiedRHS)
return Simplified;
}
}
return nullptr;
}
/// ThreadCmpOverSelect - In the case of a comparison with a select instruction,
/// try to simplify the comparison by seeing whether both branches of the select
/// result in the same value. Returns the common value if so, otherwise returns
/// null.
static Value *ThreadCmpOverSelect(CmpInst::Predicate Pred, Value *LHS,
Value *RHS, const Query &Q,
unsigned MaxRecurse) {
// Recursion is always used, so bail out at once if we already hit the limit.
if (!MaxRecurse--)
return nullptr;
// Make sure the select is on the LHS.
if (!isa<SelectInst>(LHS)) {
std::swap(LHS, RHS);
Pred = CmpInst::getSwappedPredicate(Pred);
}
assert(isa<SelectInst>(LHS) && "Not comparing with a select instruction!");
SelectInst *SI = cast<SelectInst>(LHS);
Value *Cond = SI->getCondition();
Value *TV = SI->getTrueValue();
Value *FV = SI->getFalseValue();
// Now that we have "cmp select(Cond, TV, FV), RHS", analyse it.
// Does "cmp TV, RHS" simplify?
Value *TCmp = SimplifyCmpInst(Pred, TV, RHS, Q, MaxRecurse);
if (TCmp == Cond) {
// It not only simplified, it simplified to the select condition. Replace
// it with 'true'.
TCmp = getTrue(Cond->getType());
} else if (!TCmp) {
// It didn't simplify. However if "cmp TV, RHS" is equal to the select
// condition then we can replace it with 'true'. Otherwise give up.
if (!isSameCompare(Cond, Pred, TV, RHS))
return nullptr;
TCmp = getTrue(Cond->getType());
}
// Does "cmp FV, RHS" simplify?
Value *FCmp = SimplifyCmpInst(Pred, FV, RHS, Q, MaxRecurse);
if (FCmp == Cond) {
// It not only simplified, it simplified to the select condition. Replace
// it with 'false'.
FCmp = getFalse(Cond->getType());
} else if (!FCmp) {
// It didn't simplify. However if "cmp FV, RHS" is equal to the select
// condition then we can replace it with 'false'. Otherwise give up.
if (!isSameCompare(Cond, Pred, FV, RHS))
return nullptr;
FCmp = getFalse(Cond->getType());
}
// If both sides simplified to the same value, then use it as the result of
// the original comparison.
if (TCmp == FCmp)
return TCmp;
// The remaining cases only make sense if the select condition has the same
// type as the result of the comparison, so bail out if this is not so.
if (Cond->getType()->isVectorTy() != RHS->getType()->isVectorTy())
return nullptr;
// If the false value simplified to false, then the result of the compare
// is equal to "Cond && TCmp". This also catches the case when the false
// value simplified to false and the true value to true, returning "Cond".
if (match(FCmp, m_Zero()))
if (Value *V = SimplifyAndInst(Cond, TCmp, Q, MaxRecurse))
return V;
// If the true value simplified to true, then the result of the compare
// is equal to "Cond || FCmp".
if (match(TCmp, m_One()))
if (Value *V = SimplifyOrInst(Cond, FCmp, Q, MaxRecurse))
return V;
// Finally, if the false value simplified to true and the true value to
// false, then the result of the compare is equal to "!Cond".
if (match(FCmp, m_One()) && match(TCmp, m_Zero()))
if (Value *V =
SimplifyXorInst(Cond, Constant::getAllOnesValue(Cond->getType()),
Q, MaxRecurse))
return V;
return nullptr;
}
/// ThreadBinOpOverPHI - In the case of a binary operation with an operand that
/// is a PHI instruction, try to simplify the binop by seeing whether evaluating
/// it on the incoming phi values yields the same result for every value. If so
/// returns the common value, otherwise returns null.
static Value *ThreadBinOpOverPHI(unsigned Opcode, Value *LHS, Value *RHS,
const Query &Q, unsigned MaxRecurse) {
// Recursion is always used, so bail out at once if we already hit the limit.
if (!MaxRecurse--)
return nullptr;
PHINode *PI;
if (isa<PHINode>(LHS)) {
PI = cast<PHINode>(LHS);
// Bail out if RHS and the phi may be mutually interdependent due to a loop.
if (!ValueDominatesPHI(RHS, PI, Q.DT))
return nullptr;
} else {
assert(isa<PHINode>(RHS) && "No PHI instruction operand!");
PI = cast<PHINode>(RHS);
// Bail out if LHS and the phi may be mutually interdependent due to a loop.
if (!ValueDominatesPHI(LHS, PI, Q.DT))
return nullptr;
}
// Evaluate the BinOp on the incoming phi values.
Value *CommonValue = nullptr;
for (Value *Incoming : PI->incoming_values()) {
// If the incoming value is the phi node itself, it can safely be skipped.
if (Incoming == PI) continue;
Value *V = PI == LHS ?
SimplifyBinOp(Opcode, Incoming, RHS, Q, MaxRecurse) :
SimplifyBinOp(Opcode, LHS, Incoming, Q, MaxRecurse);
// If the operation failed to simplify, or simplified to a different value
// to previously, then give up.
if (!V || (CommonValue && V != CommonValue))
return nullptr;
CommonValue = V;
}
return CommonValue;
}
/// ThreadCmpOverPHI - In the case of a comparison with a PHI instruction, try
/// try to simplify the comparison by seeing whether comparing with all of the
/// incoming phi values yields the same result every time. If so returns the
/// common result, otherwise returns null.
static Value *ThreadCmpOverPHI(CmpInst::Predicate Pred, Value *LHS, Value *RHS,
const Query &Q, unsigned MaxRecurse) {
// Recursion is always used, so bail out at once if we already hit the limit.
if (!MaxRecurse--)
return nullptr;
// Make sure the phi is on the LHS.
if (!isa<PHINode>(LHS)) {
std::swap(LHS, RHS);
Pred = CmpInst::getSwappedPredicate(Pred);
}
assert(isa<PHINode>(LHS) && "Not comparing with a phi instruction!");
PHINode *PI = cast<PHINode>(LHS);
// Bail out if RHS and the phi may be mutually interdependent due to a loop.
if (!ValueDominatesPHI(RHS, PI, Q.DT))
return nullptr;
// Evaluate the BinOp on the incoming phi values.
Value *CommonValue = nullptr;
for (Value *Incoming : PI->incoming_values()) {
// If the incoming value is the phi node itself, it can safely be skipped.
if (Incoming == PI) continue;
Value *V = SimplifyCmpInst(Pred, Incoming, RHS, Q, MaxRecurse);
// If the operation failed to simplify, or simplified to a different value
// to previously, then give up.
if (!V || (CommonValue && V != CommonValue))
return nullptr;
CommonValue = V;
}
return CommonValue;
}
/// SimplifyAddInst - Given operands for an Add, see if we can
/// fold the result. If not, this returns null.
static Value *SimplifyAddInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
const Query &Q, unsigned MaxRecurse) {
if (Constant *CLHS = dyn_cast<Constant>(Op0)) {
if (Constant *CRHS = dyn_cast<Constant>(Op1)) {
Constant *Ops[] = { CLHS, CRHS };
return ConstantFoldInstOperands(Instruction::Add, CLHS->getType(), Ops,
Q.DL, Q.TLI);
}
// Canonicalize the constant to the RHS.
std::swap(Op0, Op1);
}
// X + undef -> undef
if (match(Op1, m_Undef()))
return Op1;
// X + 0 -> X
if (match(Op1, m_Zero()))
return Op0;
// X + (Y - X) -> Y
// (Y - X) + X -> Y
// Eg: X + -X -> 0
Value *Y = nullptr;
if (match(Op1, m_Sub(m_Value(Y), m_Specific(Op0))) ||
match(Op0, m_Sub(m_Value(Y), m_Specific(Op1))))
return Y;
// X + ~X -> -1 since ~X = -X-1
if (match(Op0, m_Not(m_Specific(Op1))) ||
match(Op1, m_Not(m_Specific(Op0))))
return Constant::getAllOnesValue(Op0->getType());
/// i1 add -> xor.
if (MaxRecurse && Op0->getType()->isIntegerTy(1))
if (Value *V = SimplifyXorInst(Op0, Op1, Q, MaxRecurse-1))
return V;
// Try some generic simplifications for associative operations.
if (Value *V = SimplifyAssociativeBinOp(Instruction::Add, Op0, Op1, Q,
MaxRecurse))
return V;
// Threading Add over selects and phi nodes is pointless, so don't bother.
// Threading over the select in "A + select(cond, B, C)" means evaluating
// "A+B" and "A+C" and seeing if they are equal; but they are equal if and
// only if B and C are equal. If B and C are equal then (since we assume
// that operands have already been simplified) "select(cond, B, C)" should
// have been simplified to the common value of B and C already. Analysing
// "A+B" and "A+C" thus gains nothing, but costs compile time. Similarly
// for threading over phi nodes.
return nullptr;
}
Value *llvm::SimplifyAddInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
const DataLayout &DL, const TargetLibraryInfo *TLI,
const DominatorTree *DT, AssumptionCache *AC,
const Instruction *CxtI) {
return ::SimplifyAddInst(Op0, Op1, isNSW, isNUW, Query(DL, TLI, DT, AC, CxtI),
RecursionLimit);
}
/// \brief Compute the base pointer and cumulative constant offsets for V.
///
/// This strips all constant offsets off of V, leaving it the base pointer, and
/// accumulates the total constant offset applied in the returned constant. It
/// returns 0 if V is not a pointer, and returns the constant '0' if there are
/// no constant offsets applied.
///
/// This is very similar to GetPointerBaseWithConstantOffset except it doesn't
/// follow non-inbounds geps. This allows it to remain usable for icmp ult/etc.
/// folding.
static Constant *stripAndComputeConstantOffsets(const DataLayout &DL, Value *&V,
bool AllowNonInbounds = false) {
assert(V->getType()->getScalarType()->isPointerTy());
Type *IntPtrTy = DL.getIntPtrType(V->getType())->getScalarType();
APInt Offset = APInt::getNullValue(IntPtrTy->getIntegerBitWidth());
// Even though we don't look through PHI nodes, we could be called on an
// instruction in an unreachable block, which may be on a cycle.
SmallPtrSet<Value *, 4> Visited;
Visited.insert(V);
do {
if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
if ((!AllowNonInbounds && !GEP->isInBounds()) ||
!GEP->accumulateConstantOffset(DL, Offset))
break;
V = GEP->getPointerOperand();
} else if (Operator::getOpcode(V) == Instruction::BitCast) {
V = cast<Operator>(V)->getOperand(0);
} else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
if (GA->mayBeOverridden())
break;
V = GA->getAliasee();
} else {
break;
}
assert(V->getType()->getScalarType()->isPointerTy() &&
"Unexpected operand type!");
} while (Visited.insert(V).second);
Constant *OffsetIntPtr = ConstantInt::get(IntPtrTy, Offset);
if (V->getType()->isVectorTy())
return ConstantVector::getSplat(V->getType()->getVectorNumElements(),
OffsetIntPtr);
return OffsetIntPtr;
}
/// \brief Compute the constant difference between two pointer values.
/// If the difference is not a constant, returns zero.
static Constant *computePointerDifference(const DataLayout &DL, Value *LHS,
Value *RHS) {
Constant *LHSOffset = stripAndComputeConstantOffsets(DL, LHS);
Constant *RHSOffset = stripAndComputeConstantOffsets(DL, RHS);
// If LHS and RHS are not related via constant offsets to the same base
// value, there is nothing we can do here.
if (LHS != RHS)
return nullptr;
// Otherwise, the difference of LHS - RHS can be computed as:
// LHS - RHS
// = (LHSOffset + Base) - (RHSOffset + Base)
// = LHSOffset - RHSOffset
return ConstantExpr::getSub(LHSOffset, RHSOffset);
}
/// SimplifySubInst - Given operands for a Sub, see if we can
/// fold the result. If not, this returns null.
static Value *SimplifySubInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
const Query &Q, unsigned MaxRecurse) {
if (Constant *CLHS = dyn_cast<Constant>(Op0))
if (Constant *CRHS = dyn_cast<Constant>(Op1)) {
Constant *Ops[] = { CLHS, CRHS };
return ConstantFoldInstOperands(Instruction::Sub, CLHS->getType(),
Ops, Q.DL, Q.TLI);
}
// X - undef -> undef
// undef - X -> undef
if (match(Op0, m_Undef()) || match(Op1, m_Undef()))
return UndefValue::get(Op0->getType());
// X - 0 -> X
if (match(Op1, m_Zero()))
return Op0;
// X - X -> 0
if (Op0 == Op1)
return Constant::getNullValue(Op0->getType());
// 0 - X -> 0 if the sub is NUW.
if (isNUW && match(Op0, m_Zero()))
return Op0;
// (X + Y) - Z -> X + (Y - Z) or Y + (X - Z) if everything simplifies.
// For example, (X + Y) - Y -> X; (Y + X) - Y -> X
Value *X = nullptr, *Y = nullptr, *Z = Op1;
if (MaxRecurse && match(Op0, m_Add(m_Value(X), m_Value(Y)))) { // (X + Y) - Z
// See if "V === Y - Z" simplifies.
if (Value *V = SimplifyBinOp(Instruction::Sub, Y, Z, Q, MaxRecurse-1))
// It does! Now see if "X + V" simplifies.
if (Value *W = SimplifyBinOp(Instruction::Add, X, V, Q, MaxRecurse-1)) {
// It does, we successfully reassociated!
++NumReassoc;
return W;
}
// See if "V === X - Z" simplifies.
if (Value *V = SimplifyBinOp(Instruction::Sub, X, Z, Q, MaxRecurse-1))
// It does! Now see if "Y + V" simplifies.
if (Value *W = SimplifyBinOp(Instruction::Add, Y, V, Q, MaxRecurse-1)) {
// It does, we successfully reassociated!
++NumReassoc;
return W;
}
}
// X - (Y + Z) -> (X - Y) - Z or (X - Z) - Y if everything simplifies.
// For example, X - (X + 1) -> -1
X = Op0;
if (MaxRecurse && match(Op1, m_Add(m_Value(Y), m_Value(Z)))) { // X - (Y + Z)
// See if "V === X - Y" simplifies.
if (Value *V = SimplifyBinOp(Instruction::Sub, X, Y, Q, MaxRecurse-1))
// It does! Now see if "V - Z" simplifies.
if (Value *W = SimplifyBinOp(Instruction::Sub, V, Z, Q, MaxRecurse-1)) {
// It does, we successfully reassociated!
++NumReassoc;
return W;
}
// See if "V === X - Z" simplifies.
if (Value *V = SimplifyBinOp(Instruction::Sub, X, Z, Q, MaxRecurse-1))
// It does! Now see if "V - Y" simplifies.
if (Value *W = SimplifyBinOp(Instruction::Sub, V, Y, Q, MaxRecurse-1)) {
// It does, we successfully reassociated!
++NumReassoc;
return W;
}
}
// Z - (X - Y) -> (Z - X) + Y if everything simplifies.
// For example, X - (X - Y) -> Y.
Z = Op0;
if (MaxRecurse && match(Op1, m_Sub(m_Value(X), m_Value(Y)))) // Z - (X - Y)
// See if "V === Z - X" simplifies.
if (Value *V = SimplifyBinOp(Instruction::Sub, Z, X, Q, MaxRecurse-1))
// It does! Now see if "V + Y" simplifies.
if (Value *W = SimplifyBinOp(Instruction::Add, V, Y, Q, MaxRecurse-1)) {
// It does, we successfully reassociated!
++NumReassoc;
return W;
}
// trunc(X) - trunc(Y) -> trunc(X - Y) if everything simplifies.
if (MaxRecurse && match(Op0, m_Trunc(m_Value(X))) &&
match(Op1, m_Trunc(m_Value(Y))))
if (X->getType() == Y->getType())
// See if "V === X - Y" simplifies.
if (Value *V = SimplifyBinOp(Instruction::Sub, X, Y, Q, MaxRecurse-1))
// It does! Now see if "trunc V" simplifies.
if (Value *W = SimplifyTruncInst(V, Op0->getType(), Q, MaxRecurse-1))
// It does, return the simplified "trunc V".
return W;
// Variations on GEP(base, I, ...) - GEP(base, i, ...) -> GEP(null, I-i, ...).
if (match(Op0, m_PtrToInt(m_Value(X))) &&
match(Op1, m_PtrToInt(m_Value(Y))))
if (Constant *Result = computePointerDifference(Q.DL, X, Y))
return ConstantExpr::getIntegerCast(Result, Op0->getType(), true);
// i1 sub -> xor.
if (MaxRecurse && Op0->getType()->isIntegerTy(1))
if (Value *V = SimplifyXorInst(Op0, Op1, Q, MaxRecurse-1))
return V;
// Threading Sub over selects and phi nodes is pointless, so don't bother.
// Threading over the select in "A - select(cond, B, C)" means evaluating
// "A-B" and "A-C" and seeing if they are equal; but they are equal if and
// only if B and C are equal. If B and C are equal then (since we assume
// that operands have already been simplified) "select(cond, B, C)" should
// have been simplified to the common value of B and C already. Analysing
// "A-B" and "A-C" thus gains nothing, but costs compile time. Similarly
// for threading over phi nodes.
return nullptr;
}
Value *llvm::SimplifySubInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
const DataLayout &DL, const TargetLibraryInfo *TLI,
const DominatorTree *DT, AssumptionCache *AC,
const Instruction *CxtI) {
return ::SimplifySubInst(Op0, Op1, isNSW, isNUW, Query(DL, TLI, DT, AC, CxtI),
RecursionLimit);
}
/// Given operands for an FAdd, see if we can fold the result. If not, this
/// returns null.
static Value *SimplifyFAddInst(Value *Op0, Value *Op1, FastMathFlags FMF,
const Query &Q, unsigned MaxRecurse) {
if (Constant *CLHS = dyn_cast<Constant>(Op0)) {
if (Constant *CRHS = dyn_cast<Constant>(Op1)) {
Constant *Ops[] = { CLHS, CRHS };
return ConstantFoldInstOperands(Instruction::FAdd, CLHS->getType(),
Ops, Q.DL, Q.TLI);
}
// HLSL Change Begins.
if (ConstantFP *FP = dyn_cast<ConstantFP>(Op0))
if (FP->getValueAPF().isNaN())
return Op0;
// HLSL Change Ends.
// Canonicalize the constant to the RHS.
std::swap(Op0, Op1);
}
// HLSL Change Begins.
if (ConstantFP *FP = dyn_cast<ConstantFP>(Op0))
if (FP->getValueAPF().isNaN())
return Op0;
// HLSL Change Ends.
// fadd X, -0 ==> X
if (match(Op1, m_NegZero()))
return Op0;
// fadd X, 0 ==> X, when we know X is not -0
if (match(Op1, m_Zero()) &&
(FMF.noSignedZeros() || CannotBeNegativeZero(Op0)))
return Op0;
// fadd [nnan ninf] X, (fsub [nnan ninf] 0, X) ==> 0
// where nnan and ninf have to occur at least once somewhere in this
// expression
Value *SubOp = nullptr;
if (match(Op1, m_FSub(m_AnyZero(), m_Specific(Op0))))
SubOp = Op1;
else if (match(Op0, m_FSub(m_AnyZero(), m_Specific(Op1))))
SubOp = Op0;
if (SubOp) {
Instruction *FSub = cast<Instruction>(SubOp);
if ((FMF.noNaNs() || FSub->hasNoNaNs()) &&
(FMF.noInfs() || FSub->hasNoInfs()))
return Constant::getNullValue(Op0->getType());
}
return nullptr;
}
/// Given operands for an FSub, see if we can fold the result. If not, this
/// returns null.
static Value *SimplifyFSubInst(Value *Op0, Value *Op1, FastMathFlags FMF,
const Query &Q, unsigned MaxRecurse) {
if (Constant *CLHS = dyn_cast<Constant>(Op0)) {
if (Constant *CRHS = dyn_cast<Constant>(Op1)) {
Constant *Ops[] = { CLHS, CRHS };
return ConstantFoldInstOperands(Instruction::FSub, CLHS->getType(),
Ops, Q.DL, Q.TLI);
}
// HLSL Change Begins.
if (ConstantFP *FP = dyn_cast<ConstantFP>(Op0))
if (FP->getValueAPF().isNaN())
return Op0;
// HLSL Change Ends.
}
// HLSL Change Begins.
if (ConstantFP *FP = dyn_cast<ConstantFP>(Op1))
if (FP->getValueAPF().isNaN())
return Op1;
// HLSL Change Ends.
// fsub X, 0 ==> X
if (match(Op1, m_Zero()))
return Op0;
// fsub X, -0 ==> X, when we know X is not -0
if (match(Op1, m_NegZero()) &&
(FMF.noSignedZeros() || CannotBeNegativeZero(Op0)))
return Op0;
// fsub 0, (fsub -0.0, X) ==> X
Value *X;
if (match(Op0, m_AnyZero())) {
if (match(Op1, m_FSub(m_NegZero(), m_Value(X))))
return X;
if (FMF.noSignedZeros() && match(Op1, m_FSub(m_AnyZero(), m_Value(X))))
return X;
}
// fsub nnan x, x ==> 0.0
if (FMF.noNaNs() && Op0 == Op1)
return Constant::getNullValue(Op0->getType());
return nullptr;
}
/// Given the operands for an FMul, see if we can fold the result
static Value *SimplifyFMulInst(Value *Op0, Value *Op1,
FastMathFlags FMF,
const Query &Q,
unsigned MaxRecurse) {
if (Constant *CLHS = dyn_cast<Constant>(Op0)) {
if (Constant *CRHS = dyn_cast<Constant>(Op1)) {
Constant *Ops[] = {CLHS, CRHS};
return ConstantFoldInstOperands(Instruction::FMul, CLHS->getType(), Ops,
Q.DL, Q.TLI);
}
// HLSL Change Begins.
if (ConstantFP *FP = dyn_cast<ConstantFP>(Op0))
if (FP->getValueAPF().isNaN())
return Op0;
// HLSL Change Ends.
// Canonicalize the constant to the RHS.
std::swap(Op0, Op1);
}
// HLSL Change Begins.
if (ConstantFP *FP = dyn_cast<ConstantFP>(Op0))
if (FP->getValueAPF().isNaN())
return Op0;
// HLSL Change Ends.
// fmul X, 1.0 ==> X
if (match(Op1, m_FPOne()))
return Op0;
// fmul nnan nsz X, 0 ==> 0
if (FMF.noNaNs() && FMF.noSignedZeros() && match(Op1, m_AnyZero()))
return Op1;
return nullptr;
}
/// SimplifyMulInst - Given operands for a Mul, see if we can
/// fold the result. If not, this returns null.
static Value *SimplifyMulInst(Value *Op0, Value *Op1, const Query &Q,
unsigned MaxRecurse) {
if (Constant *CLHS = dyn_cast<Constant>(Op0)) {
if (Constant *CRHS = dyn_cast<Constant>(Op1)) {
Constant *Ops[] = { CLHS, CRHS };
return ConstantFoldInstOperands(Instruction::Mul, CLHS->getType(),
Ops, Q.DL, Q.TLI);
}
// Canonicalize the constant to the RHS.
std::swap(Op0, Op1);
}
// X * undef -> 0
if (match(Op1, m_Undef()))
return Constant::getNullValue(Op0->getType());
// X * 0 -> 0
if (match(Op1, m_Zero()))
return Op1;
// X * 1 -> X
if (match(Op1, m_One()))
return Op0;
// (X / Y) * Y -> X if the division is exact.
Value *X = nullptr;
if (match(Op0, m_Exact(m_IDiv(m_Value(X), m_Specific(Op1)))) || // (X / Y) * Y
match(Op1, m_Exact(m_IDiv(m_Value(X), m_Specific(Op0))))) // Y * (X / Y)
return X;
// i1 mul -> and.
if (MaxRecurse && Op0->getType()->isIntegerTy(1))
if (Value *V = SimplifyAndInst(Op0, Op1, Q, MaxRecurse-1))
return V;
// Try some generic simplifications for associative operations.
if (Value *V = SimplifyAssociativeBinOp(Instruction::Mul, Op0, Op1, Q,
MaxRecurse))
return V;
// Mul distributes over Add. Try some generic simplifications based on this.
if (Value *V = ExpandBinOp(Instruction::Mul, Op0, Op1, Instruction::Add,
Q, MaxRecurse))
return V;
// If the operation is with the result of a select instruction, check whether
// operating on either branch of the select always yields the same value.
if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1))
if (Value *V = ThreadBinOpOverSelect(Instruction::Mul, Op0, Op1, Q,
MaxRecurse))
return V;
// If the operation is with the result of a phi instruction, check whether
// operating on all incoming values of the phi always yields the same value.
if (isa<PHINode>(Op0) || isa<PHINode>(Op1))
if (Value *V = ThreadBinOpOverPHI(Instruction::Mul, Op0, Op1, Q,
MaxRecurse))
return V;
return nullptr;
}
Value *llvm::SimplifyFAddInst(Value *Op0, Value *Op1, FastMathFlags FMF,
const DataLayout &DL,
const TargetLibraryInfo *TLI,
const DominatorTree *DT, AssumptionCache *AC,
const Instruction *CxtI) {
return ::SimplifyFAddInst(Op0, Op1, FMF, Query(DL, TLI, DT, AC, CxtI),
RecursionLimit);
}
Value *llvm::SimplifyFSubInst(Value *Op0, Value *Op1, FastMathFlags FMF,
const DataLayout &DL,
const TargetLibraryInfo *TLI,
const DominatorTree *DT, AssumptionCache *AC,
const Instruction *CxtI) {
return ::SimplifyFSubInst(Op0, Op1, FMF, Query(DL, TLI, DT, AC, CxtI),
RecursionLimit);
}
Value *llvm::SimplifyFMulInst(Value *Op0, Value *Op1, FastMathFlags FMF,
const DataLayout &DL,
const TargetLibraryInfo *TLI,
const DominatorTree *DT, AssumptionCache *AC,
const Instruction *CxtI) {
return ::SimplifyFMulInst(Op0, Op1, FMF, Query(DL, TLI, DT, AC, CxtI),
RecursionLimit);
}
Value *llvm::SimplifyMulInst(Value *Op0, Value *Op1, const DataLayout &DL,
const TargetLibraryInfo *TLI,
const DominatorTree *DT, AssumptionCache *AC,
const Instruction *CxtI) {
return ::SimplifyMulInst(Op0, Op1, Query(DL, TLI, DT, AC, CxtI),
RecursionLimit);
}
/// SimplifyDiv - Given operands for an SDiv or UDiv, see if we can
/// fold the result. If not, this returns null.
static Value *SimplifyDiv(Instruction::BinaryOps Opcode, Value *Op0, Value *Op1,
const Query &Q, unsigned MaxRecurse) {
if (Constant *C0 = dyn_cast<Constant>(Op0)) {
if (Constant *C1 = dyn_cast<Constant>(Op1)) {
Constant *Ops[] = { C0, C1 };
return ConstantFoldInstOperands(Opcode, C0->getType(), Ops, Q.DL, Q.TLI);
}
}
bool isSigned = Opcode == Instruction::SDiv;
// X / undef -> undef
if (match(Op1, m_Undef()))
return Op1;
// X / 0 -> undef, we don't need to preserve faults!
if (match(Op1, m_Zero()))
return UndefValue::get(Op1->getType());
// undef / X -> 0
if (match(Op0, m_Undef()))
return Constant::getNullValue(Op0->getType());
// 0 / X -> 0, we don't need to preserve faults!
if (match(Op0, m_Zero()))
return Op0;
// X / 1 -> X
if (match(Op1, m_One()))
return Op0;
if (Op0->getType()->isIntegerTy(1))
// It can't be division by zero, hence it must be division by one.
return Op0;
// X / X -> 1
if (Op0 == Op1)
return ConstantInt::get(Op0->getType(), 1);
// (X * Y) / Y -> X if the multiplication does not overflow.
Value *X = nullptr, *Y = nullptr;
if (match(Op0, m_Mul(m_Value(X), m_Value(Y))) && (X == Op1 || Y == Op1)) {
if (Y != Op1) std::swap(X, Y); // Ensure expression is (X * Y) / Y, Y = Op1
OverflowingBinaryOperator *Mul = cast<OverflowingBinaryOperator>(Op0);
// If the Mul knows it does not overflow, then we are good to go.
if ((isSigned && Mul->hasNoSignedWrap()) ||
(!isSigned && Mul->hasNoUnsignedWrap()))
return X;
// If X has the form X = A / Y then X * Y cannot overflow.
if (BinaryOperator *Div = dyn_cast<BinaryOperator>(X))
if (Div->getOpcode() == Opcode && Div->getOperand(1) == Y)
return X;
}
// (X rem Y) / Y -> 0
if ((isSigned && match(Op0, m_SRem(m_Value(), m_Specific(Op1)))) ||
(!isSigned && match(Op0, m_URem(m_Value(), m_Specific(Op1)))))
return Constant::getNullValue(Op0->getType());
// (X /u C1) /u C2 -> 0 if C1 * C2 overflow
ConstantInt *C1, *C2;
if (!isSigned && match(Op0, m_UDiv(m_Value(X), m_ConstantInt(C1))) &&
match(Op1, m_ConstantInt(C2))) {
bool Overflow;
C1->getValue().umul_ov(C2->getValue(), Overflow);
if (Overflow)
return Constant::getNullValue(Op0->getType());
}
// If the operation is with the result of a select instruction, check whether
// operating on either branch of the select always yields the same value.
if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1))
if (Value *V = ThreadBinOpOverSelect(Opcode, Op0, Op1, Q, MaxRecurse))
return V;
// If the operation is with the result of a phi instruction, check whether
// operating on all incoming values of the phi always yields the same value.
if (isa<PHINode>(Op0) || isa<PHINode>(Op1))
if (Value *V = ThreadBinOpOverPHI(Opcode, Op0, Op1, Q, MaxRecurse))
return V;
return nullptr;
}
/// SimplifySDivInst - Given operands for an SDiv, see if we can
/// fold the result. If not, this returns null.
static Value *SimplifySDivInst(Value *Op0, Value *Op1, const Query &Q,
unsigned MaxRecurse) {
if (Value *V = SimplifyDiv(Instruction::SDiv, Op0, Op1, Q, MaxRecurse))
return V;
return nullptr;
}
Value *llvm::SimplifySDivInst(Value *Op0, Value *Op1, const DataLayout &DL,
const TargetLibraryInfo *TLI,
const DominatorTree *DT, AssumptionCache *AC,
const Instruction *CxtI) {
return ::SimplifySDivInst(Op0, Op1, Query(DL, TLI, DT, AC, CxtI),
RecursionLimit);
}
/// SimplifyUDivInst - Given operands for a UDiv, see if we can
/// fold the result. If not, this returns null.
static Value *SimplifyUDivInst(Value *Op0, Value *Op1, const Query &Q,
unsigned MaxRecurse) {
if (Value *V = SimplifyDiv(Instruction::UDiv, Op0, Op1, Q, MaxRecurse))
return V;
return nullptr;
}
Value *llvm::SimplifyUDivInst(Value *Op0, Value *Op1, const DataLayout &DL,
const TargetLibraryInfo *TLI,
const DominatorTree *DT, AssumptionCache *AC,
const Instruction *CxtI) {
return ::SimplifyUDivInst(Op0, Op1, Query(DL, TLI, DT, AC, CxtI),
RecursionLimit);
}
static Value *SimplifyFDivInst(Value *Op0, Value *Op1, FastMathFlags FMF,
const Query &Q, unsigned) {
// HLSL Change Begins.
if (Constant *C0 = dyn_cast<Constant>(Op0)) {
if (Constant *C1 = dyn_cast<Constant>(Op1)) {
Constant *Ops[] = {C0, C1};
return ConstantFoldInstOperands(Instruction::FDiv, C0->getType(), Ops,
Q.DL, Q.TLI);
}
if (ConstantFP *FP = dyn_cast<ConstantFP>(C0))
if (FP->getValueAPF().isNaN())
return Op0;
}
if (ConstantFP *FP = dyn_cast<ConstantFP>(Op1))
if (FP->getValueAPF().isNaN())
return Op1;
// HLSL Change Ends.
// undef / X -> undef (the undef could be a snan).
if (match(Op0, m_Undef()))
return Op0;
// X / undef -> undef
if (match(Op1, m_Undef()))
return Op1;
// 0 / X -> 0
// Requires that NaNs are off (X could be zero) and signed zeroes are
// ignored (X could be positive or negative, so the output sign is unknown).
if (FMF.noNaNs() && FMF.noSignedZeros() && match(Op0, m_AnyZero()))
return Op0;
if (FMF.noNaNs()) {
// X / X -> 1.0 is legal when NaNs are ignored.
if (Op0 == Op1)
return ConstantFP::get(Op0->getType(), 1.0);
// -X / X -> -1.0 and
// X / -X -> -1.0 are legal when NaNs are ignored.
// We can ignore signed zeros because +-0.0/+-0.0 is NaN and ignored.
if ((BinaryOperator::isFNeg(Op0, /*IgnoreZeroSign=*/true) &&
BinaryOperator::getFNegArgument(Op0) == Op1) ||
(BinaryOperator::isFNeg(Op1, /*IgnoreZeroSign=*/true) &&
BinaryOperator::getFNegArgument(Op1) == Op0))
return ConstantFP::get(Op0->getType(), -1.0);
}
return nullptr;
}
Value *llvm::SimplifyFDivInst(Value *Op0, Value *Op1, FastMathFlags FMF,
const DataLayout &DL,
const TargetLibraryInfo *TLI,
const DominatorTree *DT, AssumptionCache *AC,
const Instruction *CxtI) {
return ::SimplifyFDivInst(Op0, Op1, FMF, Query(DL, TLI, DT, AC, CxtI),
RecursionLimit);
}
/// SimplifyRem - Given operands for an SRem or URem, see if we can
/// fold the result. If not, this returns null.
static Value *SimplifyRem(Instruction::BinaryOps Opcode, Value *Op0, Value *Op1,
const Query &Q, unsigned MaxRecurse) {
if (Constant *C0 = dyn_cast<Constant>(Op0)) {
if (Constant *C1 = dyn_cast<Constant>(Op1)) {
Constant *Ops[] = { C0, C1 };
return ConstantFoldInstOperands(Opcode, C0->getType(), Ops, Q.DL, Q.TLI);
}
}
// X % undef -> undef
if (match(Op1, m_Undef()))
return Op1;
// undef % X -> 0
if (match(Op0, m_Undef()))
return Constant::getNullValue(Op0->getType());
// 0 % X -> 0, we don't need to preserve faults!
if (match(Op0, m_Zero()))
return Op0;
// X % 0 -> undef, we don't need to preserve faults!
if (match(Op1, m_Zero()))
return UndefValue::get(Op0->getType());
// X % 1 -> 0
if (match(Op1, m_One()))
return Constant::getNullValue(Op0->getType());
if (Op0->getType()->isIntegerTy(1))
// It can't be remainder by zero, hence it must be remainder by one.
return Constant::getNullValue(Op0->getType());
// X % X -> 0
if (Op0 == Op1)
return Constant::getNullValue(Op0->getType());
// (X % Y) % Y -> X % Y
if ((Opcode == Instruction::SRem &&
match(Op0, m_SRem(m_Value(), m_Specific(Op1)))) ||
(Opcode == Instruction::URem &&
match(Op0, m_URem(m_Value(), m_Specific(Op1)))))
return Op0;
// If the operation is with the result of a select instruction, check whether
// operating on either branch of the select always yields the same value.
if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1))
if (Value *V = ThreadBinOpOverSelect(Opcode, Op0, Op1, Q, MaxRecurse))
return V;
// If the operation is with the result of a phi instruction, check whether
// operating on all incoming values of the phi always yields the same value.
if (isa<PHINode>(Op0) || isa<PHINode>(Op1))
if (Value *V = ThreadBinOpOverPHI(Opcode, Op0, Op1, Q, MaxRecurse))
return V;
return nullptr;
}
/// SimplifySRemInst - Given operands for an SRem, see if we can
/// fold the result. If not, this returns null.
static Value *SimplifySRemInst(Value *Op0, Value *Op1, const Query &Q,
unsigned MaxRecurse) {
if (Value *V = SimplifyRem(Instruction::SRem, Op0, Op1, Q, MaxRecurse))
return V;
return nullptr;
}
Value *llvm::SimplifySRemInst(Value *Op0, Value *Op1, const DataLayout &DL,
const TargetLibraryInfo *TLI,
const DominatorTree *DT, AssumptionCache *AC,
const Instruction *CxtI) {
return ::SimplifySRemInst(Op0, Op1, Query(DL, TLI, DT, AC, CxtI),
RecursionLimit);
}
/// SimplifyURemInst - Given operands for a URem, see if we can
/// fold the result. If not, this returns null.
static Value *SimplifyURemInst(Value *Op0, Value *Op1, const Query &Q,
unsigned MaxRecurse) {
if (Value *V = SimplifyRem(Instruction::URem, Op0, Op1, Q, MaxRecurse))
return V;
return nullptr;
}
Value *llvm::SimplifyURemInst(Value *Op0, Value *Op1, const DataLayout &DL,
const TargetLibraryInfo *TLI,
const DominatorTree *DT, AssumptionCache *AC,
const Instruction *CxtI) {
return ::SimplifyURemInst(Op0, Op1, Query(DL, TLI, DT, AC, CxtI),
RecursionLimit);
}
static Value *SimplifyFRemInst(Value *Op0, Value *Op1, FastMathFlags FMF,
const Query &Q, unsigned) {
// HLSL Change Begins.
if (Constant *C0 = dyn_cast<Constant>(Op0)) {
if (Constant *C1 = dyn_cast<Constant>(Op1)) {
Constant *Ops[] = {C0, C1};
return ConstantFoldInstOperands(Instruction::FRem, C0->getType(), Ops,
Q.DL, Q.TLI);
}
if (ConstantFP *FP = dyn_cast<ConstantFP>(C0))
if (FP->getValueAPF().isNaN())
return Op0;
}
if (ConstantFP *FP = dyn_cast<ConstantFP>(Op1))
if (FP->getValueAPF().isNaN())
return Op1;
// HLSL Change Ends.
// undef % X -> undef (the undef could be a snan).
if (match(Op0, m_Undef()))
return Op0;
// X % undef -> undef
if (match(Op1, m_Undef()))
return Op1;
// 0 % X -> 0
// Requires that NaNs are off (X could be zero) and signed zeroes are
// ignored (X could be positive or negative, so the output sign is unknown).
if (FMF.noNaNs() && FMF.noSignedZeros() && match(Op0, m_AnyZero()))
return Op0;
return nullptr;
}
Value *llvm::SimplifyFRemInst(Value *Op0, Value *Op1, FastMathFlags FMF,
const DataLayout &DL,
const TargetLibraryInfo *TLI,
const DominatorTree *DT, AssumptionCache *AC,
const Instruction *CxtI) {
return ::SimplifyFRemInst(Op0, Op1, FMF, Query(DL, TLI, DT, AC, CxtI),
RecursionLimit);
}
/// isUndefShift - Returns true if a shift by \c Amount always yields undef.
static bool isUndefShift(Value *Amount) {
Constant *C = dyn_cast<Constant>(Amount);
if (!C)
return false;
// X shift by undef -> undef because it may shift by the bitwidth.
if (isa<UndefValue>(C))
return true;
// Shifting by the bitwidth or more is undefined.
if (ConstantInt *CI = dyn_cast<ConstantInt>(C))
if (CI->getValue().getLimitedValue() >=
CI->getType()->getScalarSizeInBits())
return true;
// If all lanes of a vector shift are undefined the whole shift is.
if (isa<ConstantVector>(C) || isa<ConstantDataVector>(C)) {
for (unsigned I = 0, E = C->getType()->getVectorNumElements(); I != E; ++I)
if (!isUndefShift(C->getAggregateElement(I)))
return false;
return true;
}
return false;
}
/// SimplifyShift - Given operands for an Shl, LShr or AShr, see if we can
/// fold the result. If not, this returns null.
static Value *SimplifyShift(unsigned Opcode, Value *Op0, Value *Op1,
const Query &Q, unsigned MaxRecurse) {
if (Constant *C0 = dyn_cast<Constant>(Op0)) {
if (Constant *C1 = dyn_cast<Constant>(Op1)) {
Constant *Ops[] = { C0, C1 };
return ConstantFoldInstOperands(Opcode, C0->getType(), Ops, Q.DL, Q.TLI);
}
}
// 0 shift by X -> 0
if (match(Op0, m_Zero()))
return Op0;
// X shift by 0 -> X
if (match(Op1, m_Zero()))
return Op0;
// Fold undefined shifts.
if (isUndefShift(Op1))
return UndefValue::get(Op0->getType());
// If the operation is with the result of a select instruction, check whether
// operating on either branch of the select always yields the same value.
if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1))
if (Value *V = ThreadBinOpOverSelect(Opcode, Op0, Op1, Q, MaxRecurse))
return V;
// If the operation is with the result of a phi instruction, check whether
// operating on all incoming values of the phi always yields the same value.
if (isa<PHINode>(Op0) || isa<PHINode>(Op1))
if (Value *V = ThreadBinOpOverPHI(Opcode, Op0, Op1, Q, MaxRecurse))
return V;
return nullptr;
}
/// \brief Given operands for an Shl, LShr or AShr, see if we can
/// fold the result. If not, this returns null.
static Value *SimplifyRightShift(unsigned Opcode, Value *Op0, Value *Op1,
bool isExact, const Query &Q,
unsigned MaxRecurse) {
if (Value *V = SimplifyShift(Opcode, Op0, Op1, Q, MaxRecurse))
return V;
// X >> X -> 0
if (Op0 == Op1)
return Constant::getNullValue(Op0->getType());
// undef >> X -> 0
// undef >> X -> undef (if it's exact)
if (match(Op0, m_Undef()))
return isExact ? Op0 : Constant::getNullValue(Op0->getType());
// The low bit cannot be shifted out of an exact shift if it is set.
if (isExact) {
unsigned BitWidth = Op0->getType()->getScalarSizeInBits();
APInt Op0KnownZero(BitWidth, 0);
APInt Op0KnownOne(BitWidth, 0);
computeKnownBits(Op0, Op0KnownZero, Op0KnownOne, Q.DL, /*Depth=*/0, Q.AC,
Q.CxtI, Q.DT);
if (Op0KnownOne[0])
return Op0;
}
return nullptr;
}
/// SimplifyShlInst - Given operands for an Shl, see if we can
/// fold the result. If not, this returns null.
static Value *SimplifyShlInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
const Query &Q, unsigned MaxRecurse) {
if (Value *V = SimplifyShift(Instruction::Shl, Op0, Op1, Q, MaxRecurse))
return V;
// undef << X -> 0
// undef << X -> undef if (if it's NSW/NUW)
if (match(Op0, m_Undef()))
return isNSW || isNUW ? Op0 : Constant::getNullValue(Op0->getType());
// (X >> A) << A -> X
Value *X;
if (match(Op0, m_Exact(m_Shr(m_Value(X), m_Specific(Op1)))))
return X;
return nullptr;
}
Value *llvm::SimplifyShlInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
const DataLayout &DL, const TargetLibraryInfo *TLI,
const DominatorTree *DT, AssumptionCache *AC,
const Instruction *CxtI) {
return ::SimplifyShlInst(Op0, Op1, isNSW, isNUW, Query(DL, TLI, DT, AC, CxtI),
RecursionLimit);
}
/// SimplifyLShrInst - Given operands for an LShr, see if we can
/// fold the result. If not, this returns null.
static Value *SimplifyLShrInst(Value *Op0, Value *Op1, bool isExact,
const Query &Q, unsigned MaxRecurse) {
if (Value *V = SimplifyRightShift(Instruction::LShr, Op0, Op1, isExact, Q,
MaxRecurse))
return V;
// (X << A) >> A -> X
Value *X;
if (match(Op0, m_NUWShl(m_Value(X), m_Specific(Op1))))
return X;
return nullptr;
}
Value *llvm::SimplifyLShrInst(Value *Op0, Value *Op1, bool isExact,
const DataLayout &DL,
const TargetLibraryInfo *TLI,
const DominatorTree *DT, AssumptionCache *AC,
const Instruction *CxtI) {
return ::SimplifyLShrInst(Op0, Op1, isExact, Query(DL, TLI, DT, AC, CxtI),
RecursionLimit);
}
/// SimplifyAShrInst - Given operands for an AShr, see if we can
/// fold the result. If not, this returns null.
static Value *SimplifyAShrInst(Value *Op0, Value *Op1, bool isExact,
const Query &Q, unsigned MaxRecurse) {
if (Value *V = SimplifyRightShift(Instruction::AShr, Op0, Op1, isExact, Q,
MaxRecurse))
return V;
// all ones >>a X -> all ones
if (match(Op0, m_AllOnes()))
return Op0;
// (X << A) >> A -> X
Value *X;
if (match(Op0, m_NSWShl(m_Value(X), m_Specific(Op1))))
return X;
// Arithmetic shifting an all-sign-bit value is a no-op.
unsigned NumSignBits = ComputeNumSignBits(Op0, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
if (NumSignBits == Op0->getType()->getScalarSizeInBits())
return Op0;
return nullptr;
}
Value *llvm::SimplifyAShrInst(Value *Op0, Value *Op1, bool isExact,
const DataLayout &DL,
const TargetLibraryInfo *TLI,
const DominatorTree *DT, AssumptionCache *AC,
const Instruction *CxtI) {
return ::SimplifyAShrInst(Op0, Op1, isExact, Query(DL, TLI, DT, AC, CxtI),
RecursionLimit);
}
static Value *simplifyUnsignedRangeCheck(ICmpInst *ZeroICmp,
ICmpInst *UnsignedICmp, bool IsAnd) {
Value *X, *Y;
ICmpInst::Predicate EqPred;
if (!match(ZeroICmp, m_ICmp(EqPred, m_Value(Y), m_Zero())) ||
!ICmpInst::isEquality(EqPred))
return nullptr;
ICmpInst::Predicate UnsignedPred;
if (match(UnsignedICmp, m_ICmp(UnsignedPred, m_Value(X), m_Specific(Y))) &&
ICmpInst::isUnsigned(UnsignedPred))
;
else if (match(UnsignedICmp,
m_ICmp(UnsignedPred, m_Value(Y), m_Specific(X))) &&
ICmpInst::isUnsigned(UnsignedPred))
UnsignedPred = ICmpInst::getSwappedPredicate(UnsignedPred);
else
return nullptr;
// X < Y && Y != 0 --> X < Y
// X < Y || Y != 0 --> Y != 0
if (UnsignedPred == ICmpInst::ICMP_ULT && EqPred == ICmpInst::ICMP_NE)
return IsAnd ? UnsignedICmp : ZeroICmp;
// X >= Y || Y != 0 --> true
// X >= Y || Y == 0 --> X >= Y
if (UnsignedPred == ICmpInst::ICMP_UGE && !IsAnd) {
if (EqPred == ICmpInst::ICMP_NE)
return getTrue(UnsignedICmp->getType());
return UnsignedICmp;
}
// X < Y && Y == 0 --> false
if (UnsignedPred == ICmpInst::ICMP_ULT && EqPred == ICmpInst::ICMP_EQ &&
IsAnd)
return getFalse(UnsignedICmp->getType());
return nullptr;
}
// Simplify (and (icmp ...) (icmp ...)) to true when we can tell that the range
// of possible values cannot be satisfied.
static Value *SimplifyAndOfICmps(ICmpInst *Op0, ICmpInst *Op1) {
ICmpInst::Predicate Pred0, Pred1;
ConstantInt *CI1, *CI2;
Value *V;
if (Value *X = simplifyUnsignedRangeCheck(Op0, Op1, /*IsAnd=*/true))
return X;
if (!match(Op0, m_ICmp(Pred0, m_Add(m_Value(V), m_ConstantInt(CI1)),
m_ConstantInt(CI2))))
return nullptr;
if (!match(Op1, m_ICmp(Pred1, m_Specific(V), m_Specific(CI1))))
return nullptr;
Type *ITy = Op0->getType();
auto *AddInst = cast<BinaryOperator>(Op0->getOperand(0));
bool isNSW = AddInst->hasNoSignedWrap();
bool isNUW = AddInst->hasNoUnsignedWrap();
const APInt &CI1V = CI1->getValue();
const APInt &CI2V = CI2->getValue();
const APInt Delta = CI2V - CI1V;
if (CI1V.isStrictlyPositive()) {
if (Delta == 2) {
if (Pred0 == ICmpInst::ICMP_ULT && Pred1 == ICmpInst::ICMP_SGT)
return getFalse(ITy);
if (Pred0 == ICmpInst::ICMP_SLT && Pred1 == ICmpInst::ICMP_SGT && isNSW)
return getFalse(ITy);
}
if (Delta == 1) {
if (Pred0 == ICmpInst::ICMP_ULE && Pred1 == ICmpInst::ICMP_SGT)
return getFalse(ITy);
if (Pred0 == ICmpInst::ICMP_SLE && Pred1 == ICmpInst::ICMP_SGT && isNSW)
return getFalse(ITy);
}
}
if (CI1V.getBoolValue() && isNUW) {
if (Delta == 2)
if (Pred0 == ICmpInst::ICMP_ULT && Pred1 == ICmpInst::ICMP_UGT)
return getFalse(ITy);
if (Delta == 1)
if (Pred0 == ICmpInst::ICMP_ULE && Pred1 == ICmpInst::ICMP_UGT)
return getFalse(ITy);
}
return nullptr;
}
/// SimplifyAndInst - Given operands for an And, see if we can
/// fold the result. If not, this returns null.
static Value *SimplifyAndInst(Value *Op0, Value *Op1, const Query &Q,
unsigned MaxRecurse) {
if (Constant *CLHS = dyn_cast<Constant>(Op0)) {
if (Constant *CRHS = dyn_cast<Constant>(Op1)) {
Constant *Ops[] = { CLHS, CRHS };
return ConstantFoldInstOperands(Instruction::And, CLHS->getType(),
Ops, Q.DL, Q.TLI);
}
// Canonicalize the constant to the RHS.
std::swap(Op0, Op1);
}
// X & undef -> 0
if (match(Op1, m_Undef()))
return Constant::getNullValue(Op0->getType());
// X & X = X
if (Op0 == Op1)
return Op0;
// X & 0 = 0
if (match(Op1, m_Zero()))
return Op1;
// X & -1 = X
if (match(Op1, m_AllOnes()))
return Op0;
// A & ~A = ~A & A = 0
if (match(Op0, m_Not(m_Specific(Op1))) ||
match(Op1, m_Not(m_Specific(Op0))))
return Constant::getNullValue(Op0->getType());
// (A | ?) & A = A
Value *A = nullptr, *B = nullptr;
if (match(Op0, m_Or(m_Value(A), m_Value(B))) &&
(A == Op1 || B == Op1))
return Op1;
// A & (A | ?) = A
if (match(Op1, m_Or(m_Value(A), m_Value(B))) &&
(A == Op0 || B == Op0))
return Op0;
// A & (-A) = A if A is a power of two or zero.
if (match(Op0, m_Neg(m_Specific(Op1))) ||
match(Op1, m_Neg(m_Specific(Op0)))) {
if (isKnownToBeAPowerOfTwo(Op0, Q.DL, /*OrZero*/ true, 0, Q.AC, Q.CxtI,
Q.DT))
return Op0;
if (isKnownToBeAPowerOfTwo(Op1, Q.DL, /*OrZero*/ true, 0, Q.AC, Q.CxtI,
Q.DT))
return Op1;
}
if (auto *ICILHS = dyn_cast<ICmpInst>(Op0)) {
if (auto *ICIRHS = dyn_cast<ICmpInst>(Op1)) {
if (Value *V = SimplifyAndOfICmps(ICILHS, ICIRHS))
return V;
if (Value *V = SimplifyAndOfICmps(ICIRHS, ICILHS))
return V;
}
}
// Try some generic simplifications for associative operations.
if (Value *V = SimplifyAssociativeBinOp(Instruction::And, Op0, Op1, Q,
MaxRecurse))
return V;
// And distributes over Or. Try some generic simplifications based on this.
if (Value *V = ExpandBinOp(Instruction::And, Op0, Op1, Instruction::Or,
Q, MaxRecurse))
return V;
// And distributes over Xor. Try some generic simplifications based on this.
if (Value *V = ExpandBinOp(Instruction::And, Op0, Op1, Instruction::Xor,
Q, MaxRecurse))
return V;
// If the operation is with the result of a select instruction, check whether
// operating on either branch of the select always yields the same value.
if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1))
if (Value *V = ThreadBinOpOverSelect(Instruction::And, Op0, Op1, Q,
MaxRecurse))
return V;
// If the operation is with the result of a phi instruction, check whether
// operating on all incoming values of the phi always yields the same value.
if (isa<PHINode>(Op0) || isa<PHINode>(Op1))
if (Value *V = ThreadBinOpOverPHI(Instruction::And, Op0, Op1, Q,
MaxRecurse))
return V;
return nullptr;
}
Value *llvm::SimplifyAndInst(Value *Op0, Value *Op1, const DataLayout &DL,
const TargetLibraryInfo *TLI,
const DominatorTree *DT, AssumptionCache *AC,
const Instruction *CxtI) {
return ::SimplifyAndInst(Op0, Op1, Query(DL, TLI, DT, AC, CxtI),
RecursionLimit);
}
// Simplify (or (icmp ...) (icmp ...)) to true when we can tell that the union
// contains all possible values.
static Value *SimplifyOrOfICmps(ICmpInst *Op0, ICmpInst *Op1) {
ICmpInst::Predicate Pred0, Pred1;
ConstantInt *CI1, *CI2;
Value *V;
if (Value *X = simplifyUnsignedRangeCheck(Op0, Op1, /*IsAnd=*/false))
return X;
if (!match(Op0, m_ICmp(Pred0, m_Add(m_Value(V), m_ConstantInt(CI1)),
m_ConstantInt(CI2))))
return nullptr;
if (!match(Op1, m_ICmp(Pred1, m_Specific(V), m_Specific(CI1))))
return nullptr;
Type *ITy = Op0->getType();
auto *AddInst = cast<BinaryOperator>(Op0->getOperand(0));
bool isNSW = AddInst->hasNoSignedWrap();
bool isNUW = AddInst->hasNoUnsignedWrap();
const APInt &CI1V = CI1->getValue();
const APInt &CI2V = CI2->getValue();
const APInt Delta = CI2V - CI1V;
if (CI1V.isStrictlyPositive()) {
if (Delta == 2) {
if (Pred0 == ICmpInst::ICMP_UGE && Pred1 == ICmpInst::ICMP_SLE)
return getTrue(ITy);
if (Pred0 == ICmpInst::ICMP_SGE && Pred1 == ICmpInst::ICMP_SLE && isNSW)
return getTrue(ITy);
}
if (Delta == 1) {
if (Pred0 == ICmpInst::ICMP_UGT && Pred1 == ICmpInst::ICMP_SLE)
return getTrue(ITy);
if (Pred0 == ICmpInst::ICMP_SGT && Pred1 == ICmpInst::ICMP_SLE && isNSW)
return getTrue(ITy);
}
}
if (CI1V.getBoolValue() && isNUW) {
if (Delta == 2)
if (Pred0 == ICmpInst::ICMP_UGE && Pred1 == ICmpInst::ICMP_ULE)
return getTrue(ITy);
if (Delta == 1)
if (Pred0 == ICmpInst::ICMP_UGT && Pred1 == ICmpInst::ICMP_ULE)
return getTrue(ITy);
}
return nullptr;
}
/// SimplifyOrInst - Given operands for an Or, see if we can
/// fold the result. If not, this returns null.
static Value *SimplifyOrInst(Value *Op0, Value *Op1, const Query &Q,
unsigned MaxRecurse) {
if (Constant *CLHS = dyn_cast<Constant>(Op0)) {
if (Constant *CRHS = dyn_cast<Constant>(Op1)) {
Constant *Ops[] = { CLHS, CRHS };
return ConstantFoldInstOperands(Instruction::Or, CLHS->getType(),
Ops, Q.DL, Q.TLI);
}
// Canonicalize the constant to the RHS.
std::swap(Op0, Op1);
}
// X | undef -> -1
if (match(Op1, m_Undef()))
return Constant::getAllOnesValue(Op0->getType());
// X | X = X
if (Op0 == Op1)
return Op0;
// X | 0 = X
if (match(Op1, m_Zero()))
return Op0;
// X | -1 = -1
if (match(Op1, m_AllOnes()))
return Op1;
// A | ~A = ~A | A = -1
if (match(Op0, m_Not(m_Specific(Op1))) ||
match(Op1, m_Not(m_Specific(Op0))))
return Constant::getAllOnesValue(Op0->getType());
// (A & ?) | A = A
Value *A = nullptr, *B = nullptr;
if (match(Op0, m_And(m_Value(A), m_Value(B))) &&
(A == Op1 || B == Op1))
return Op1;
// A | (A & ?) = A
if (match(Op1, m_And(m_Value(A), m_Value(B))) &&
(A == Op0 || B == Op0))
return Op0;
// ~(A & ?) | A = -1
if (match(Op0, m_Not(m_And(m_Value(A), m_Value(B)))) &&
(A == Op1 || B == Op1))
return Constant::getAllOnesValue(Op1->getType());
// A | ~(A & ?) = -1
if (match(Op1, m_Not(m_And(m_Value(A), m_Value(B)))) &&
(A == Op0 || B == Op0))
return Constant::getAllOnesValue(Op0->getType());
if (auto *ICILHS = dyn_cast<ICmpInst>(Op0)) {
if (auto *ICIRHS = dyn_cast<ICmpInst>(Op1)) {
if (Value *V = SimplifyOrOfICmps(ICILHS, ICIRHS))
return V;
if (Value *V = SimplifyOrOfICmps(ICIRHS, ICILHS))
return V;
}
}
// Try some generic simplifications for associative operations.
if (Value *V = SimplifyAssociativeBinOp(Instruction::Or, Op0, Op1, Q,
MaxRecurse))
return V;
// Or distributes over And. Try some generic simplifications based on this.
if (Value *V = ExpandBinOp(Instruction::Or, Op0, Op1, Instruction::And, Q,
MaxRecurse))
return V;
// If the operation is with the result of a select instruction, check whether
// operating on either branch of the select always yields the same value.
if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1))
if (Value *V = ThreadBinOpOverSelect(Instruction::Or, Op0, Op1, Q,
MaxRecurse))
return V;
// (A & C)|(B & D)
Value *C = nullptr, *D = nullptr;
if (match(Op0, m_And(m_Value(A), m_Value(C))) &&
match(Op1, m_And(m_Value(B), m_Value(D)))) {
ConstantInt *C1 = dyn_cast<ConstantInt>(C);
ConstantInt *C2 = dyn_cast<ConstantInt>(D);
if (C1 && C2 && (C1->getValue() == ~C2->getValue())) {
// (A & C1)|(B & C2)
// If we have: ((V + N) & C1) | (V & C2)
// .. and C2 = ~C1 and C2 is 0+1+ and (N & C2) == 0
// replace with V+N.
Value *V1, *V2;
if ((C2->getValue() & (C2->getValue() + 1)) == 0 && // C2 == 0+1+
match(A, m_Add(m_Value(V1), m_Value(V2)))) {
// Add commutes, try both ways.
if (V1 == B &&
MaskedValueIsZero(V2, C2->getValue(), Q.DL, 0, Q.AC, Q.CxtI, Q.DT))
return A;
if (V2 == B &&
MaskedValueIsZero(V1, C2->getValue(), Q.DL, 0, Q.AC, Q.CxtI, Q.DT))
return A;
}
// Or commutes, try both ways.
if ((C1->getValue() & (C1->getValue() + 1)) == 0 &&
match(B, m_Add(m_Value(V1), m_Value(V2)))) {
// Add commutes, try both ways.
if (V1 == A &&
MaskedValueIsZero(V2, C1->getValue(), Q.DL, 0, Q.AC, Q.CxtI, Q.DT))
return B;
if (V2 == A &&
MaskedValueIsZero(V1, C1->getValue(), Q.DL, 0, Q.AC, Q.CxtI, Q.DT))
return B;
}
}
}
// If the operation is with the result of a phi instruction, check whether
// operating on all incoming values of the phi always yields the same value.
if (isa<PHINode>(Op0) || isa<PHINode>(Op1))
if (Value *V = ThreadBinOpOverPHI(Instruction::Or, Op0, Op1, Q, MaxRecurse))
return V;
return nullptr;
}
Value *llvm::SimplifyOrInst(Value *Op0, Value *Op1, const DataLayout &DL,
const TargetLibraryInfo *TLI,
const DominatorTree *DT, AssumptionCache *AC,
const Instruction *CxtI) {
return ::SimplifyOrInst(Op0, Op1, Query(DL, TLI, DT, AC, CxtI),
RecursionLimit);
}
/// SimplifyXorInst - Given operands for a Xor, see if we can
/// fold the result. If not, this returns null.
static Value *SimplifyXorInst(Value *Op0, Value *Op1, const Query &Q,
unsigned MaxRecurse) {
if (Constant *CLHS = dyn_cast<Constant>(Op0)) {
if (Constant *CRHS = dyn_cast<Constant>(Op1)) {
Constant *Ops[] = { CLHS, CRHS };
return ConstantFoldInstOperands(Instruction::Xor, CLHS->getType(),
Ops, Q.DL, Q.TLI);
}
// Canonicalize the constant to the RHS.
std::swap(Op0, Op1);
}
// A ^ undef -> undef
if (match(Op1, m_Undef()))
return Op1;
// A ^ 0 = A
if (match(Op1, m_Zero()))
return Op0;
// A ^ A = 0
if (Op0 == Op1)
return Constant::getNullValue(Op0->getType());
// A ^ ~A = ~A ^ A = -1
if (match(Op0, m_Not(m_Specific(Op1))) ||
match(Op1, m_Not(m_Specific(Op0))))
return Constant::getAllOnesValue(Op0->getType());
// Try some generic simplifications for associative operations.
if (Value *V = SimplifyAssociativeBinOp(Instruction::Xor, Op0, Op1, Q,
MaxRecurse))
return V;
// Threading Xor over selects and phi nodes is pointless, so don't bother.
// Threading over the select in "A ^ select(cond, B, C)" means evaluating
// "A^B" and "A^C" and seeing if they are equal; but they are equal if and
// only if B and C are equal. If B and C are equal then (since we assume
// that operands have already been simplified) "select(cond, B, C)" should
// have been simplified to the common value of B and C already. Analysing
// "A^B" and "A^C" thus gains nothing, but costs compile time. Similarly
// for threading over phi nodes.
return nullptr;
}
Value *llvm::SimplifyXorInst(Value *Op0, Value *Op1, const DataLayout &DL,
const TargetLibraryInfo *TLI,
const DominatorTree *DT, AssumptionCache *AC,
const Instruction *CxtI) {
return ::SimplifyXorInst(Op0, Op1, Query(DL, TLI, DT, AC, CxtI),
RecursionLimit);
}
static Type *GetCompareTy(Value *Op) {
return CmpInst::makeCmpResultType(Op->getType());
}
/// ExtractEquivalentCondition - Rummage around inside V looking for something
/// equivalent to the comparison "LHS Pred RHS". Return such a value if found,
/// otherwise return null. Helper function for analyzing max/min idioms.
static Value *ExtractEquivalentCondition(Value *V, CmpInst::Predicate Pred,
Value *LHS, Value *RHS) {
SelectInst *SI = dyn_cast<SelectInst>(V);
if (!SI)
return nullptr;
CmpInst *Cmp = dyn_cast<CmpInst>(SI->getCondition());
if (!Cmp)
return nullptr;
Value *CmpLHS = Cmp->getOperand(0), *CmpRHS = Cmp->getOperand(1);
if (Pred == Cmp->getPredicate() && LHS == CmpLHS && RHS == CmpRHS)
return Cmp;
if (Pred == CmpInst::getSwappedPredicate(Cmp->getPredicate()) &&
LHS == CmpRHS && RHS == CmpLHS)
return Cmp;
return nullptr;
}
// A significant optimization not implemented here is assuming that alloca
// addresses are not equal to incoming argument values. They don't *alias*,
// as we say, but that doesn't mean they aren't equal, so we take a
// conservative approach.
//
// This is inspired in part by C++11 5.10p1:
// "Two pointers of the same type compare equal if and only if they are both
// null, both point to the same function, or both represent the same
// address."
//
// This is pretty permissive.
//
// It's also partly due to C11 6.5.9p6:
// "Two pointers compare equal if and only if both are null pointers, both are
// pointers to the same object (including a pointer to an object and a
// subobject at its beginning) or function, both are pointers to one past the
// last element of the same array object, or one is a pointer to one past the
// end of one array object and the other is a pointer to the start of a
// different array object that happens to immediately follow the first array
// object in the address space.)
//
// C11's version is more restrictive, however there's no reason why an argument
// couldn't be a one-past-the-end value for a stack object in the caller and be
// equal to the beginning of a stack object in the callee.
//
// If the C and C++ standards are ever made sufficiently restrictive in this
// area, it may be possible to update LLVM's semantics accordingly and reinstate
// this optimization.
static Constant *computePointerICmp(const DataLayout &DL,
const TargetLibraryInfo *TLI,
CmpInst::Predicate Pred, Value *LHS,
Value *RHS) {
// First, skip past any trivial no-ops.
LHS = LHS->stripPointerCasts();
RHS = RHS->stripPointerCasts();
// A non-null pointer is not equal to a null pointer.
if (llvm::isKnownNonNull(LHS, TLI) && isa<ConstantPointerNull>(RHS) &&
(Pred == CmpInst::ICMP_EQ || Pred == CmpInst::ICMP_NE))
return ConstantInt::get(GetCompareTy(LHS),
!CmpInst::isTrueWhenEqual(Pred));
// We can only fold certain predicates on pointer comparisons.
switch (Pred) {
default:
return nullptr;
// Equality comaprisons are easy to fold.
case CmpInst::ICMP_EQ:
case CmpInst::ICMP_NE:
break;
// We can only handle unsigned relational comparisons because 'inbounds' on
// a GEP only protects against unsigned wrapping.
case CmpInst::ICMP_UGT:
case CmpInst::ICMP_UGE:
case CmpInst::ICMP_ULT:
case CmpInst::ICMP_ULE:
// However, we have to switch them to their signed variants to handle
// negative indices from the base pointer.
Pred = ICmpInst::getSignedPredicate(Pred);
break;
}
// Strip off any constant offsets so that we can reason about them.
// It's tempting to use getUnderlyingObject or even just stripInBoundsOffsets
// here and compare base addresses like AliasAnalysis does, however there are
// numerous hazards. AliasAnalysis and its utilities rely on special rules
// governing loads and stores which don't apply to icmps. Also, AliasAnalysis
// doesn't need to guarantee pointer inequality when it says NoAlias.
Constant *LHSOffset = stripAndComputeConstantOffsets(DL, LHS);
Constant *RHSOffset = stripAndComputeConstantOffsets(DL, RHS);
// If LHS and RHS are related via constant offsets to the same base
// value, we can replace it with an icmp which just compares the offsets.
if (LHS == RHS)
return ConstantExpr::getICmp(Pred, LHSOffset, RHSOffset);
// Various optimizations for (in)equality comparisons.
if (Pred == CmpInst::ICMP_EQ || Pred == CmpInst::ICMP_NE) {
// Different non-empty allocations that exist at the same time have
// different addresses (if the program can tell). Global variables always
// exist, so they always exist during the lifetime of each other and all
// allocas. Two different allocas usually have different addresses...
//
// However, if there's an @llvm.stackrestore dynamically in between two
// allocas, they may have the same address. It's tempting to reduce the
// scope of the problem by only looking at *static* allocas here. That would
// cover the majority of allocas while significantly reducing the likelihood
// of having an @llvm.stackrestore pop up in the middle. However, it's not
// actually impossible for an @llvm.stackrestore to pop up in the middle of
// an entry block. Also, if we have a block that's not attached to a
// function, we can't tell if it's "static" under the current definition.
// Theoretically, this problem could be fixed by creating a new kind of
// instruction kind specifically for static allocas. Such a new instruction
// could be required to be at the top of the entry block, thus preventing it
// from being subject to a @llvm.stackrestore. Instcombine could even
// convert regular allocas into these special allocas. It'd be nifty.
// However, until then, this problem remains open.
//
// So, we'll assume that two non-empty allocas have different addresses
// for now.
//
// With all that, if the offsets are within the bounds of their allocations
// (and not one-past-the-end! so we can't use inbounds!), and their
// allocations aren't the same, the pointers are not equal.
//
// Note that it's not necessary to check for LHS being a global variable
// address, due to canonicalization and constant folding.
if (isa<AllocaInst>(LHS) &&
(isa<AllocaInst>(RHS) || isa<GlobalVariable>(RHS))) {
ConstantInt *LHSOffsetCI = dyn_cast<ConstantInt>(LHSOffset);
ConstantInt *RHSOffsetCI = dyn_cast<ConstantInt>(RHSOffset);
uint64_t LHSSize, RHSSize;
if (LHSOffsetCI && RHSOffsetCI &&
getObjectSize(LHS, LHSSize, DL, TLI) &&
getObjectSize(RHS, RHSSize, DL, TLI)) {
const APInt &LHSOffsetValue = LHSOffsetCI->getValue();
const APInt &RHSOffsetValue = RHSOffsetCI->getValue();
if (!LHSOffsetValue.isNegative() &&
!RHSOffsetValue.isNegative() &&
LHSOffsetValue.ult(LHSSize) &&
RHSOffsetValue.ult(RHSSize)) {
return ConstantInt::get(GetCompareTy(LHS),
!CmpInst::isTrueWhenEqual(Pred));
}
}
// Repeat the above check but this time without depending on DataLayout
// or being able to compute a precise size.
if (!cast<PointerType>(LHS->getType())->isEmptyTy() &&
!cast<PointerType>(RHS->getType())->isEmptyTy() &&
LHSOffset->isNullValue() &&
RHSOffset->isNullValue())
return ConstantInt::get(GetCompareTy(LHS),
!CmpInst::isTrueWhenEqual(Pred));
}
// Even if an non-inbounds GEP occurs along the path we can still optimize
// equality comparisons concerning the result. We avoid walking the whole
// chain again by starting where the last calls to
// stripAndComputeConstantOffsets left off and accumulate the offsets.
Constant *LHSNoBound = stripAndComputeConstantOffsets(DL, LHS, true);
Constant *RHSNoBound = stripAndComputeConstantOffsets(DL, RHS, true);
if (LHS == RHS)
return ConstantExpr::getICmp(Pred,
ConstantExpr::getAdd(LHSOffset, LHSNoBound),
ConstantExpr::getAdd(RHSOffset, RHSNoBound));
// If one side of the equality comparison must come from a noalias call
// (meaning a system memory allocation function), and the other side must
// come from a pointer that cannot overlap with dynamically-allocated
// memory within the lifetime of the current function (allocas, byval
// arguments, globals), then determine the comparison result here.
SmallVector<Value *, 8> LHSUObjs, RHSUObjs;
GetUnderlyingObjects(LHS, LHSUObjs, DL);
GetUnderlyingObjects(RHS, RHSUObjs, DL);
// Is the set of underlying objects all noalias calls?
auto IsNAC = [](SmallVectorImpl<Value *> &Objects) {
return std::all_of(Objects.begin(), Objects.end(),
[](Value *V){ return isNoAliasCall(V); });
};
// Is the set of underlying objects all things which must be disjoint from
// noalias calls. For allocas, we consider only static ones (dynamic
// allocas might be transformed into calls to malloc not simultaneously
// live with the compared-to allocation). For globals, we exclude symbols
// that might be resolve lazily to symbols in another dynamically-loaded
// library (and, thus, could be malloc'ed by the implementation).
auto IsAllocDisjoint = [](SmallVectorImpl<Value *> &Objects) {
return std::all_of(Objects.begin(), Objects.end(),
[](Value *V){
if (const AllocaInst *AI = dyn_cast<AllocaInst>(V))
return AI->getParent() && AI->getParent()->getParent() &&
AI->isStaticAlloca();
if (const GlobalValue *GV = dyn_cast<GlobalValue>(V))
return (GV->hasLocalLinkage() ||
GV->hasHiddenVisibility() ||
GV->hasProtectedVisibility() ||
GV->hasUnnamedAddr()) &&
!GV->isThreadLocal();
if (const Argument *A = dyn_cast<Argument>(V))
return A->hasByValAttr();
return false;
});
};
if ((IsNAC(LHSUObjs) && IsAllocDisjoint(RHSUObjs)) ||
(IsNAC(RHSUObjs) && IsAllocDisjoint(LHSUObjs)))
return ConstantInt::get(GetCompareTy(LHS),
!CmpInst::isTrueWhenEqual(Pred));
}
// Otherwise, fail.
return nullptr;
}
/// SimplifyICmpInst - Given operands for an ICmpInst, see if we can
/// fold the result. If not, this returns null.
static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
const Query &Q, unsigned MaxRecurse) {
CmpInst::Predicate Pred = (CmpInst::Predicate)Predicate;
assert(CmpInst::isIntPredicate(Pred) && "Not an integer compare!");
if (Constant *CLHS = dyn_cast<Constant>(LHS)) {
if (Constant *CRHS = dyn_cast<Constant>(RHS))
return ConstantFoldCompareInstOperands(Pred, CLHS, CRHS, Q.DL, Q.TLI);
// If we have a constant, make sure it is on the RHS.
std::swap(LHS, RHS);
Pred = CmpInst::getSwappedPredicate(Pred);
}
Type *ITy = GetCompareTy(LHS); // The return type.
Type *OpTy = LHS->getType(); // The operand type.
// icmp X, X -> true/false
// X icmp undef -> true/false. For example, icmp ugt %X, undef -> false
// because X could be 0.
if (LHS == RHS || isa<UndefValue>(RHS))
return ConstantInt::get(ITy, CmpInst::isTrueWhenEqual(Pred));
// Special case logic when the operands have i1 type.
if (OpTy->getScalarType()->isIntegerTy(1)) {
switch (Pred) {
default: break;
case ICmpInst::ICMP_EQ:
// X == 1 -> X
if (match(RHS, m_One()))
return LHS;
break;
case ICmpInst::ICMP_NE:
// X != 0 -> X
if (match(RHS, m_Zero()))
return LHS;
break;
case ICmpInst::ICMP_UGT:
// X >u 0 -> X
if (match(RHS, m_Zero()))
return LHS;
break;
case ICmpInst::ICMP_UGE:
// X >=u 1 -> X
if (match(RHS, m_One()))
return LHS;
break;
case ICmpInst::ICMP_SLT:
// X <s 0 -> X
if (match(RHS, m_Zero()))
return LHS;
break;
case ICmpInst::ICMP_SLE:
// X <=s -1 -> X
if (match(RHS, m_One()))
return LHS;
break;
}
}
// If we are comparing with zero then try hard since this is a common case.
if (match(RHS, m_Zero())) {
bool LHSKnownNonNegative, LHSKnownNegative;
switch (Pred) {
default: llvm_unreachable("Unknown ICmp predicate!");
case ICmpInst::ICMP_ULT:
return getFalse(ITy);
case ICmpInst::ICMP_UGE:
return getTrue(ITy);
case ICmpInst::ICMP_EQ:
case ICmpInst::ICMP_ULE:
if (isKnownNonZero(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT))
return getFalse(ITy);
break;
case ICmpInst::ICMP_NE:
case ICmpInst::ICMP_UGT:
if (isKnownNonZero(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT))
return getTrue(ITy);
break;
case ICmpInst::ICMP_SLT:
ComputeSignBit(LHS, LHSKnownNonNegative, LHSKnownNegative, Q.DL, 0, Q.AC,
Q.CxtI, Q.DT);
if (LHSKnownNegative)
return getTrue(ITy);
if (LHSKnownNonNegative)
return getFalse(ITy);
break;
case ICmpInst::ICMP_SLE:
ComputeSignBit(LHS, LHSKnownNonNegative, LHSKnownNegative, Q.DL, 0, Q.AC,
Q.CxtI, Q.DT);
if (LHSKnownNegative)
return getTrue(ITy);
if (LHSKnownNonNegative &&
isKnownNonZero(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT))
return getFalse(ITy);
break;
case ICmpInst::ICMP_SGE:
ComputeSignBit(LHS, LHSKnownNonNegative, LHSKnownNegative, Q.DL, 0, Q.AC,
Q.CxtI, Q.DT);
if (LHSKnownNegative)
return getFalse(ITy);
if (LHSKnownNonNegative)
return getTrue(ITy);
break;
case ICmpInst::ICMP_SGT:
ComputeSignBit(LHS, LHSKnownNonNegative, LHSKnownNegative, Q.DL, 0, Q.AC,
Q.CxtI, Q.DT);
if (LHSKnownNegative)
return getFalse(ITy);
if (LHSKnownNonNegative &&
isKnownNonZero(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT))
return getTrue(ITy);
break;
}
}
// See if we are doing a comparison with a constant integer.
if (ConstantInt *CI = dyn_cast<ConstantInt>(RHS)) {
// Rule out tautological comparisons (eg., ult 0 or uge 0).
ConstantRange RHS_CR = ICmpInst::makeConstantRange(Pred, CI->getValue());
if (RHS_CR.isEmptySet())
return ConstantInt::getFalse(CI->getContext());
if (RHS_CR.isFullSet())
return ConstantInt::getTrue(CI->getContext());
// Many binary operators with constant RHS have easy to compute constant
// range. Use them to check whether the comparison is a tautology.
unsigned Width = CI->getBitWidth();
APInt Lower = APInt(Width, 0);
APInt Upper = APInt(Width, 0);
ConstantInt *CI2;
if (match(LHS, m_URem(m_Value(), m_ConstantInt(CI2)))) {
// 'urem x, CI2' produces [0, CI2).
Upper = CI2->getValue();
} else if (match(LHS, m_SRem(m_Value(), m_ConstantInt(CI2)))) {
// 'srem x, CI2' produces (-|CI2|, |CI2|).
Upper = CI2->getValue().abs();
Lower = (-Upper) + 1;
} else if (match(LHS, m_UDiv(m_ConstantInt(CI2), m_Value()))) {
// 'udiv CI2, x' produces [0, CI2].
Upper = CI2->getValue() + 1;
} else if (match(LHS, m_UDiv(m_Value(), m_ConstantInt(CI2)))) {
// 'udiv x, CI2' produces [0, UINT_MAX / CI2].
APInt NegOne = APInt::getAllOnesValue(Width);
if (!CI2->isZero())
Upper = NegOne.udiv(CI2->getValue()) + 1;
} else if (match(LHS, m_SDiv(m_ConstantInt(CI2), m_Value()))) {
if (CI2->isMinSignedValue()) {
// 'sdiv INT_MIN, x' produces [INT_MIN, INT_MIN / -2].
Lower = CI2->getValue();
Upper = Lower.lshr(1) + 1;
} else {
// 'sdiv CI2, x' produces [-|CI2|, |CI2|].
Upper = CI2->getValue().abs() + 1;
Lower = (-Upper) + 1;
}
} else if (match(LHS, m_SDiv(m_Value(), m_ConstantInt(CI2)))) {
APInt IntMin = APInt::getSignedMinValue(Width);
APInt IntMax = APInt::getSignedMaxValue(Width);
APInt Val = CI2->getValue();
if (Val.isAllOnesValue()) {
// 'sdiv x, -1' produces [INT_MIN + 1, INT_MAX]
// where CI2 != -1 and CI2 != 0 and CI2 != 1
Lower = IntMin + 1;
Upper = IntMax + 1;
} else if (Val.countLeadingZeros() < Width - 1) {
// 'sdiv x, CI2' produces [INT_MIN / CI2, INT_MAX / CI2]
// where CI2 != -1 and CI2 != 0 and CI2 != 1
Lower = IntMin.sdiv(Val);
Upper = IntMax.sdiv(Val);
if (Lower.sgt(Upper))
std::swap(Lower, Upper);
Upper = Upper + 1;
assert(Upper != Lower && "Upper part of range has wrapped!");
}
} else if (match(LHS, m_NUWShl(m_ConstantInt(CI2), m_Value()))) {
// 'shl nuw CI2, x' produces [CI2, CI2 << CLZ(CI2)]
Lower = CI2->getValue();
Upper = Lower.shl(Lower.countLeadingZeros()) + 1;
} else if (match(LHS, m_NSWShl(m_ConstantInt(CI2), m_Value()))) {
if (CI2->isNegative()) {
// 'shl nsw CI2, x' produces [CI2 << CLO(CI2)-1, CI2]
unsigned ShiftAmount = CI2->getValue().countLeadingOnes() - 1;
Lower = CI2->getValue().shl(ShiftAmount);
Upper = CI2->getValue() + 1;
} else {
// 'shl nsw CI2, x' produces [CI2, CI2 << CLZ(CI2)-1]
unsigned ShiftAmount = CI2->getValue().countLeadingZeros() - 1;
Lower = CI2->getValue();
Upper = CI2->getValue().shl(ShiftAmount) + 1;
}
} else if (match(LHS, m_LShr(m_Value(), m_ConstantInt(CI2)))) {
// 'lshr x, CI2' produces [0, UINT_MAX >> CI2].
APInt NegOne = APInt::getAllOnesValue(Width);
if (CI2->getValue().ult(Width))
Upper = NegOne.lshr(CI2->getValue()) + 1;
} else if (match(LHS, m_LShr(m_ConstantInt(CI2), m_Value()))) {
// 'lshr CI2, x' produces [CI2 >> (Width-1), CI2].
unsigned ShiftAmount = Width - 1;
if (!CI2->isZero() && cast<BinaryOperator>(LHS)->isExact())
ShiftAmount = CI2->getValue().countTrailingZeros();
Lower = CI2->getValue().lshr(ShiftAmount);
Upper = CI2->getValue() + 1;
} else if (match(LHS, m_AShr(m_Value(), m_ConstantInt(CI2)))) {
// 'ashr x, CI2' produces [INT_MIN >> CI2, INT_MAX >> CI2].
APInt IntMin = APInt::getSignedMinValue(Width);
APInt IntMax = APInt::getSignedMaxValue(Width);
if (CI2->getValue().ult(Width)) {
Lower = IntMin.ashr(CI2->getValue());
Upper = IntMax.ashr(CI2->getValue()) + 1;
}
} else if (match(LHS, m_AShr(m_ConstantInt(CI2), m_Value()))) {
unsigned ShiftAmount = Width - 1;
if (!CI2->isZero() && cast<BinaryOperator>(LHS)->isExact())
ShiftAmount = CI2->getValue().countTrailingZeros();
if (CI2->isNegative()) {
// 'ashr CI2, x' produces [CI2, CI2 >> (Width-1)]
Lower = CI2->getValue();
Upper = CI2->getValue().ashr(ShiftAmount) + 1;
} else {
// 'ashr CI2, x' produces [CI2 >> (Width-1), CI2]
Lower = CI2->getValue().ashr(ShiftAmount);
Upper = CI2->getValue() + 1;
}
} else if (match(LHS, m_Or(m_Value(), m_ConstantInt(CI2)))) {
// 'or x, CI2' produces [CI2, UINT_MAX].
Lower = CI2->getValue();
} else if (match(LHS, m_And(m_Value(), m_ConstantInt(CI2)))) {
// 'and x, CI2' produces [0, CI2].
Upper = CI2->getValue() + 1;
}
if (Lower != Upper) {
ConstantRange LHS_CR = ConstantRange(Lower, Upper);
if (RHS_CR.contains(LHS_CR))
return ConstantInt::getTrue(RHS->getContext());
if (RHS_CR.inverse().contains(LHS_CR))
return ConstantInt::getFalse(RHS->getContext());
}
}
// Compare of cast, for example (zext X) != 0 -> X != 0
if (isa<CastInst>(LHS) && (isa<Constant>(RHS) || isa<CastInst>(RHS))) {
Instruction *LI = cast<CastInst>(LHS);
Value *SrcOp = LI->getOperand(0);
Type *SrcTy = SrcOp->getType();
Type *DstTy = LI->getType();
// Turn icmp (ptrtoint x), (ptrtoint/constant) into a compare of the input
// if the integer type is the same size as the pointer type.
if (MaxRecurse && isa<PtrToIntInst>(LI) &&
Q.DL.getTypeSizeInBits(SrcTy) == DstTy->getPrimitiveSizeInBits()) {
if (Constant *RHSC = dyn_cast<Constant>(RHS)) {
// Transfer the cast to the constant.
if (Value *V = SimplifyICmpInst(Pred, SrcOp,
ConstantExpr::getIntToPtr(RHSC, SrcTy),
Q, MaxRecurse-1))
return V;
} else if (PtrToIntInst *RI = dyn_cast<PtrToIntInst>(RHS)) {
if (RI->getOperand(0)->getType() == SrcTy)
// Compare without the cast.
if (Value *V = SimplifyICmpInst(Pred, SrcOp, RI->getOperand(0),
Q, MaxRecurse-1))
return V;
}
}
if (isa<ZExtInst>(LHS)) {
// Turn icmp (zext X), (zext Y) into a compare of X and Y if they have the
// same type.
if (ZExtInst *RI = dyn_cast<ZExtInst>(RHS)) {
if (MaxRecurse && SrcTy == RI->getOperand(0)->getType())
// Compare X and Y. Note that signed predicates become unsigned.
if (Value *V = SimplifyICmpInst(ICmpInst::getUnsignedPredicate(Pred),
SrcOp, RI->getOperand(0), Q,
MaxRecurse-1))
return V;
}
// Turn icmp (zext X), Cst into a compare of X and Cst if Cst is extended
// too. If not, then try to deduce the result of the comparison.
else if (ConstantInt *CI = dyn_cast<ConstantInt>(RHS)) {
// Compute the constant that would happen if we truncated to SrcTy then
// reextended to DstTy.
Constant *Trunc = ConstantExpr::getTrunc(CI, SrcTy);
Constant *RExt = ConstantExpr::getCast(CastInst::ZExt, Trunc, DstTy);
// If the re-extended constant didn't change then this is effectively
// also a case of comparing two zero-extended values.
if (RExt == CI && MaxRecurse)
if (Value *V = SimplifyICmpInst(ICmpInst::getUnsignedPredicate(Pred),
SrcOp, Trunc, Q, MaxRecurse-1))
return V;
// Otherwise the upper bits of LHS are zero while RHS has a non-zero bit
// there. Use this to work out the result of the comparison.
if (RExt != CI) {
switch (Pred) {
default: llvm_unreachable("Unknown ICmp predicate!");
// LHS <u RHS.
case ICmpInst::ICMP_EQ:
case ICmpInst::ICMP_UGT:
case ICmpInst::ICMP_UGE:
return ConstantInt::getFalse(CI->getContext());
case ICmpInst::ICMP_NE:
case ICmpInst::ICMP_ULT:
case ICmpInst::ICMP_ULE:
return ConstantInt::getTrue(CI->getContext());
// LHS is non-negative. If RHS is negative then LHS >s LHS. If RHS
// is non-negative then LHS <s RHS.
case ICmpInst::ICMP_SGT:
case ICmpInst::ICMP_SGE:
return CI->getValue().isNegative() ?
ConstantInt::getTrue(CI->getContext()) :
ConstantInt::getFalse(CI->getContext());
case ICmpInst::ICMP_SLT:
case ICmpInst::ICMP_SLE:
return CI->getValue().isNegative() ?
ConstantInt::getFalse(CI->getContext()) :
ConstantInt::getTrue(CI->getContext());
}
}
}
}
if (isa<SExtInst>(LHS)) {
// Turn icmp (sext X), (sext Y) into a compare of X and Y if they have the
// same type.
if (SExtInst *RI = dyn_cast<SExtInst>(RHS)) {
if (MaxRecurse && SrcTy == RI->getOperand(0)->getType())
// Compare X and Y. Note that the predicate does not change.
if (Value *V = SimplifyICmpInst(Pred, SrcOp, RI->getOperand(0),
Q, MaxRecurse-1))
return V;
}
// Turn icmp (sext X), Cst into a compare of X and Cst if Cst is extended
// too. If not, then try to deduce the result of the comparison.
else if (ConstantInt *CI = dyn_cast<ConstantInt>(RHS)) {
// Compute the constant that would happen if we truncated to SrcTy then
// reextended to DstTy.
Constant *Trunc = ConstantExpr::getTrunc(CI, SrcTy);
Constant *RExt = ConstantExpr::getCast(CastInst::SExt, Trunc, DstTy);
// If the re-extended constant didn't change then this is effectively
// also a case of comparing two sign-extended values.
if (RExt == CI && MaxRecurse)
if (Value *V = SimplifyICmpInst(Pred, SrcOp, Trunc, Q, MaxRecurse-1))
return V;
// Otherwise the upper bits of LHS are all equal, while RHS has varying
// bits there. Use this to work out the result of the comparison.
if (RExt != CI) {
switch (Pred) {
default: llvm_unreachable("Unknown ICmp predicate!");
case ICmpInst::ICMP_EQ:
return ConstantInt::getFalse(CI->getContext());
case ICmpInst::ICMP_NE:
return ConstantInt::getTrue(CI->getContext());
// If RHS is non-negative then LHS <s RHS. If RHS is negative then
// LHS >s RHS.
case ICmpInst::ICMP_SGT:
case ICmpInst::ICMP_SGE:
return CI->getValue().isNegative() ?
ConstantInt::getTrue(CI->getContext()) :
ConstantInt::getFalse(CI->getContext());
case ICmpInst::ICMP_SLT:
case ICmpInst::ICMP_SLE:
return CI->getValue().isNegative() ?
ConstantInt::getFalse(CI->getContext()) :
ConstantInt::getTrue(CI->getContext());
// If LHS is non-negative then LHS <u RHS. If LHS is negative then
// LHS >u RHS.
case ICmpInst::ICMP_UGT:
case ICmpInst::ICMP_UGE:
// Comparison is true iff the LHS <s 0.
if (MaxRecurse)
if (Value *V = SimplifyICmpInst(ICmpInst::ICMP_SLT, SrcOp,
Constant::getNullValue(SrcTy),
Q, MaxRecurse-1))
return V;
break;
case ICmpInst::ICMP_ULT:
case ICmpInst::ICMP_ULE:
// Comparison is true iff the LHS >=s 0.
if (MaxRecurse)
if (Value *V = SimplifyICmpInst(ICmpInst::ICMP_SGE, SrcOp,
Constant::getNullValue(SrcTy),
Q, MaxRecurse-1))
return V;
break;
}
}
}
}
}
// Special logic for binary operators.
BinaryOperator *LBO = dyn_cast<BinaryOperator>(LHS);
BinaryOperator *RBO = dyn_cast<BinaryOperator>(RHS);
if (MaxRecurse && (LBO || RBO)) {
// Analyze the case when either LHS or RHS is an add instruction.
Value *A = nullptr, *B = nullptr, *C = nullptr, *D = nullptr;
// LHS = A + B (or A and B are null); RHS = C + D (or C and D are null).
bool NoLHSWrapProblem = false, NoRHSWrapProblem = false;
if (LBO && LBO->getOpcode() == Instruction::Add) {
A = LBO->getOperand(0); B = LBO->getOperand(1);
NoLHSWrapProblem = ICmpInst::isEquality(Pred) ||
(CmpInst::isUnsigned(Pred) && LBO->hasNoUnsignedWrap()) ||
(CmpInst::isSigned(Pred) && LBO->hasNoSignedWrap());
}
if (RBO && RBO->getOpcode() == Instruction::Add) {
C = RBO->getOperand(0); D = RBO->getOperand(1);
NoRHSWrapProblem = ICmpInst::isEquality(Pred) ||
(CmpInst::isUnsigned(Pred) && RBO->hasNoUnsignedWrap()) ||
(CmpInst::isSigned(Pred) && RBO->hasNoSignedWrap());
}
// icmp (X+Y), X -> icmp Y, 0 for equalities or if there is no overflow.
if ((A == RHS || B == RHS) && NoLHSWrapProblem)
if (Value *V = SimplifyICmpInst(Pred, A == RHS ? B : A,
Constant::getNullValue(RHS->getType()),
Q, MaxRecurse-1))
return V;
// icmp X, (X+Y) -> icmp 0, Y for equalities or if there is no overflow.
if ((C == LHS || D == LHS) && NoRHSWrapProblem)
if (Value *V = SimplifyICmpInst(Pred,
Constant::getNullValue(LHS->getType()),
C == LHS ? D : C, Q, MaxRecurse-1))
return V;
// icmp (X+Y), (X+Z) -> icmp Y,Z for equalities or if there is no overflow.
if (A && C && (A == C || A == D || B == C || B == D) &&
NoLHSWrapProblem && NoRHSWrapProblem) {
// Determine Y and Z in the form icmp (X+Y), (X+Z).
Value *Y, *Z;
if (A == C) {
// C + B == C + D -> B == D
Y = B;
Z = D;
} else if (A == D) {
// D + B == C + D -> B == C
Y = B;
Z = C;
} else if (B == C) {
// A + C == C + D -> A == D
Y = A;
Z = D;
} else {
assert(B == D);
// A + D == C + D -> A == C
Y = A;
Z = C;
}
if (Value *V = SimplifyICmpInst(Pred, Y, Z, Q, MaxRecurse-1))
return V;
}
}
// icmp pred (or X, Y), X
if (LBO && match(LBO, m_CombineOr(m_Or(m_Value(), m_Specific(RHS)),
m_Or(m_Specific(RHS), m_Value())))) {
if (Pred == ICmpInst::ICMP_ULT)
return getFalse(ITy);
if (Pred == ICmpInst::ICMP_UGE)
return getTrue(ITy);
}
// icmp pred X, (or X, Y)
if (RBO && match(RBO, m_CombineOr(m_Or(m_Value(), m_Specific(LHS)),
m_Or(m_Specific(LHS), m_Value())))) {
if (Pred == ICmpInst::ICMP_ULE)
return getTrue(ITy);
if (Pred == ICmpInst::ICMP_UGT)
return getFalse(ITy);
}
// icmp pred (and X, Y), X
if (LBO && match(LBO, m_CombineOr(m_And(m_Value(), m_Specific(RHS)),
m_And(m_Specific(RHS), m_Value())))) {
if (Pred == ICmpInst::ICMP_UGT)
return getFalse(ITy);
if (Pred == ICmpInst::ICMP_ULE)
return getTrue(ITy);
}
// icmp pred X, (and X, Y)
if (RBO && match(RBO, m_CombineOr(m_And(m_Value(), m_Specific(LHS)),
m_And(m_Specific(LHS), m_Value())))) {
if (Pred == ICmpInst::ICMP_UGE)
return getTrue(ITy);
if (Pred == ICmpInst::ICMP_ULT)
return getFalse(ITy);
}
// 0 - (zext X) pred C
if (!CmpInst::isUnsigned(Pred) && match(LHS, m_Neg(m_ZExt(m_Value())))) {
if (ConstantInt *RHSC = dyn_cast<ConstantInt>(RHS)) {
if (RHSC->getValue().isStrictlyPositive()) {
if (Pred == ICmpInst::ICMP_SLT)
return ConstantInt::getTrue(RHSC->getContext());
if (Pred == ICmpInst::ICMP_SGE)
return ConstantInt::getFalse(RHSC->getContext());
if (Pred == ICmpInst::ICMP_EQ)
return ConstantInt::getFalse(RHSC->getContext());
if (Pred == ICmpInst::ICMP_NE)
return ConstantInt::getTrue(RHSC->getContext());
}
if (RHSC->getValue().isNonNegative()) {
if (Pred == ICmpInst::ICMP_SLE)
return ConstantInt::getTrue(RHSC->getContext());
if (Pred == ICmpInst::ICMP_SGT)
return ConstantInt::getFalse(RHSC->getContext());
}
}
}
// icmp pred (urem X, Y), Y
if (LBO && match(LBO, m_URem(m_Value(), m_Specific(RHS)))) {
bool KnownNonNegative, KnownNegative;
switch (Pred) {
default:
break;
case ICmpInst::ICMP_SGT:
case ICmpInst::ICMP_SGE:
ComputeSignBit(RHS, KnownNonNegative, KnownNegative, Q.DL, 0, Q.AC,
Q.CxtI, Q.DT);
if (!KnownNonNegative)
break;
LLVM_FALLTHROUGH; // HLSL Change
case ICmpInst::ICMP_EQ:
case ICmpInst::ICMP_UGT:
case ICmpInst::ICMP_UGE:
return getFalse(ITy);
case ICmpInst::ICMP_SLT:
case ICmpInst::ICMP_SLE:
ComputeSignBit(RHS, KnownNonNegative, KnownNegative, Q.DL, 0, Q.AC,
Q.CxtI, Q.DT);
if (!KnownNonNegative)
break;
LLVM_FALLTHROUGH; // HLSL Change
case ICmpInst::ICMP_NE:
case ICmpInst::ICMP_ULT:
case ICmpInst::ICMP_ULE:
return getTrue(ITy);
}
}
// icmp pred X, (urem Y, X)
if (RBO && match(RBO, m_URem(m_Value(), m_Specific(LHS)))) {
bool KnownNonNegative, KnownNegative;
switch (Pred) {
default:
break;
case ICmpInst::ICMP_SGT:
case ICmpInst::ICMP_SGE:
ComputeSignBit(LHS, KnownNonNegative, KnownNegative, Q.DL, 0, Q.AC,
Q.CxtI, Q.DT);
if (!KnownNonNegative)
break;
LLVM_FALLTHROUGH; // HLSL Change
case ICmpInst::ICMP_NE:
case ICmpInst::ICMP_UGT:
case ICmpInst::ICMP_UGE:
return getTrue(ITy);
case ICmpInst::ICMP_SLT:
case ICmpInst::ICMP_SLE:
ComputeSignBit(LHS, KnownNonNegative, KnownNegative, Q.DL, 0, Q.AC,
Q.CxtI, Q.DT);
if (!KnownNonNegative)
break;
LLVM_FALLTHROUGH; // HLSL Change
case ICmpInst::ICMP_EQ:
case ICmpInst::ICMP_ULT:
case ICmpInst::ICMP_ULE:
return getFalse(ITy);
}
}
// x udiv y <=u x.
if (LBO && match(LBO, m_UDiv(m_Specific(RHS), m_Value()))) {
// icmp pred (X /u Y), X
if (Pred == ICmpInst::ICMP_UGT)
return getFalse(ITy);
if (Pred == ICmpInst::ICMP_ULE)
return getTrue(ITy);
}
// handle:
// CI2 << X == CI
// CI2 << X != CI
//
// where CI2 is a power of 2 and CI isn't
if (auto *CI = dyn_cast<ConstantInt>(RHS)) {
const APInt *CI2Val, *CIVal = &CI->getValue();
if (LBO && match(LBO, m_Shl(m_APInt(CI2Val), m_Value())) &&
CI2Val->isPowerOf2()) {
if (!CIVal->isPowerOf2()) {
// CI2 << X can equal zero in some circumstances,
// this simplification is unsafe if CI is zero.
//
// We know it is safe if:
// - The shift is nsw, we can't shift out the one bit.
// - The shift is nuw, we can't shift out the one bit.
// - CI2 is one
// - CI isn't zero
if (LBO->hasNoSignedWrap() || LBO->hasNoUnsignedWrap() ||
*CI2Val == 1 || !CI->isZero()) {
if (Pred == ICmpInst::ICMP_EQ)
return ConstantInt::getFalse(RHS->getContext());
if (Pred == ICmpInst::ICMP_NE)
return ConstantInt::getTrue(RHS->getContext());
}
}
if (CIVal->isSignBit() && *CI2Val == 1) {
if (Pred == ICmpInst::ICMP_UGT)
return ConstantInt::getFalse(RHS->getContext());
if (Pred == ICmpInst::ICMP_ULE)
return ConstantInt::getTrue(RHS->getContext());
}
}
}
if (MaxRecurse && LBO && RBO && LBO->getOpcode() == RBO->getOpcode() &&
LBO->getOperand(1) == RBO->getOperand(1)) {
switch (LBO->getOpcode()) {
default: break;
case Instruction::UDiv:
case Instruction::LShr:
if (ICmpInst::isSigned(Pred))
break;
LLVM_FALLTHROUGH; // HLSL Change
case Instruction::SDiv:
case Instruction::AShr:
if (!LBO->isExact() || !RBO->isExact())
break;
if (Value *V = SimplifyICmpInst(Pred, LBO->getOperand(0),
RBO->getOperand(0), Q, MaxRecurse-1))
return V;
break;
case Instruction::Shl: {
bool NUW = LBO->hasNoUnsignedWrap() && RBO->hasNoUnsignedWrap();
bool NSW = LBO->hasNoSignedWrap() && RBO->hasNoSignedWrap();
if (!NUW && !NSW)
break;
if (!NSW && ICmpInst::isSigned(Pred))
break;
if (Value *V = SimplifyICmpInst(Pred, LBO->getOperand(0),
RBO->getOperand(0), Q, MaxRecurse-1))
return V;
break;
}
}
}
// Simplify comparisons involving max/min.
Value *A, *B;
CmpInst::Predicate P = CmpInst::BAD_ICMP_PREDICATE;
CmpInst::Predicate EqP; // Chosen so that "A == max/min(A,B)" iff "A EqP B".
// Signed variants on "max(a,b)>=a -> true".
if (match(LHS, m_SMax(m_Value(A), m_Value(B))) && (A == RHS || B == RHS)) {
if (A != RHS) std::swap(A, B); // smax(A, B) pred A.
EqP = CmpInst::ICMP_SGE; // "A == smax(A, B)" iff "A sge B".
// We analyze this as smax(A, B) pred A.
P = Pred;
} else if (match(RHS, m_SMax(m_Value(A), m_Value(B))) &&
(A == LHS || B == LHS)) {
if (A != LHS) std::swap(A, B); // A pred smax(A, B).
EqP = CmpInst::ICMP_SGE; // "A == smax(A, B)" iff "A sge B".
// We analyze this as smax(A, B) swapped-pred A.
P = CmpInst::getSwappedPredicate(Pred);
} else if (match(LHS, m_SMin(m_Value(A), m_Value(B))) &&
(A == RHS || B == RHS)) {
if (A != RHS) std::swap(A, B); // smin(A, B) pred A.
EqP = CmpInst::ICMP_SLE; // "A == smin(A, B)" iff "A sle B".
// We analyze this as smax(-A, -B) swapped-pred -A.
// Note that we do not need to actually form -A or -B thanks to EqP.
P = CmpInst::getSwappedPredicate(Pred);
} else if (match(RHS, m_SMin(m_Value(A), m_Value(B))) &&
(A == LHS || B == LHS)) {
if (A != LHS) std::swap(A, B); // A pred smin(A, B).
EqP = CmpInst::ICMP_SLE; // "A == smin(A, B)" iff "A sle B".
// We analyze this as smax(-A, -B) pred -A.
// Note that we do not need to actually form -A or -B thanks to EqP.
P = Pred;
}
if (P != CmpInst::BAD_ICMP_PREDICATE) {
// Cases correspond to "max(A, B) p A".
switch (P) {
default:
break;
case CmpInst::ICMP_EQ:
case CmpInst::ICMP_SLE:
// Equivalent to "A EqP B". This may be the same as the condition tested
// in the max/min; if so, we can just return that.
if (Value *V = ExtractEquivalentCondition(LHS, EqP, A, B))
return V;
if (Value *V = ExtractEquivalentCondition(RHS, EqP, A, B))
return V;
// Otherwise, see if "A EqP B" simplifies.
if (MaxRecurse)
if (Value *V = SimplifyICmpInst(EqP, A, B, Q, MaxRecurse-1))
return V;
break;
case CmpInst::ICMP_NE:
case CmpInst::ICMP_SGT: {
CmpInst::Predicate InvEqP = CmpInst::getInversePredicate(EqP);
// Equivalent to "A InvEqP B". This may be the same as the condition
// tested in the max/min; if so, we can just return that.
if (Value *V = ExtractEquivalentCondition(LHS, InvEqP, A, B))
return V;
if (Value *V = ExtractEquivalentCondition(RHS, InvEqP, A, B))
return V;
// Otherwise, see if "A InvEqP B" simplifies.
if (MaxRecurse)
if (Value *V = SimplifyICmpInst(InvEqP, A, B, Q, MaxRecurse-1))
return V;
break;
}
case CmpInst::ICMP_SGE:
// Always true.
return getTrue(ITy);
case CmpInst::ICMP_SLT:
// Always false.
return getFalse(ITy);
}
}
// Unsigned variants on "max(a,b)>=a -> true".
P = CmpInst::BAD_ICMP_PREDICATE;
if (match(LHS, m_UMax(m_Value(A), m_Value(B))) && (A == RHS || B == RHS)) {
if (A != RHS) std::swap(A, B); // umax(A, B) pred A.
EqP = CmpInst::ICMP_UGE; // "A == umax(A, B)" iff "A uge B".
// We analyze this as umax(A, B) pred A.
P = Pred;
} else if (match(RHS, m_UMax(m_Value(A), m_Value(B))) &&
(A == LHS || B == LHS)) {
if (A != LHS) std::swap(A, B); // A pred umax(A, B).
EqP = CmpInst::ICMP_UGE; // "A == umax(A, B)" iff "A uge B".
// We analyze this as umax(A, B) swapped-pred A.
P = CmpInst::getSwappedPredicate(Pred);
} else if (match(LHS, m_UMin(m_Value(A), m_Value(B))) &&
(A == RHS || B == RHS)) {
if (A != RHS) std::swap(A, B); // umin(A, B) pred A.
EqP = CmpInst::ICMP_ULE; // "A == umin(A, B)" iff "A ule B".
// We analyze this as umax(-A, -B) swapped-pred -A.
// Note that we do not need to actually form -A or -B thanks to EqP.
P = CmpInst::getSwappedPredicate(Pred);
} else if (match(RHS, m_UMin(m_Value(A), m_Value(B))) &&
(A == LHS || B == LHS)) {
if (A != LHS) std::swap(A, B); // A pred umin(A, B).
EqP = CmpInst::ICMP_ULE; // "A == umin(A, B)" iff "A ule B".
// We analyze this as umax(-A, -B) pred -A.
// Note that we do not need to actually form -A or -B thanks to EqP.
P = Pred;
}
if (P != CmpInst::BAD_ICMP_PREDICATE) {
// Cases correspond to "max(A, B) p A".
switch (P) {
default:
break;
case CmpInst::ICMP_EQ:
case CmpInst::ICMP_ULE:
// Equivalent to "A EqP B". This may be the same as the condition tested
// in the max/min; if so, we can just return that.
if (Value *V = ExtractEquivalentCondition(LHS, EqP, A, B))
return V;
if (Value *V = ExtractEquivalentCondition(RHS, EqP, A, B))
return V;
// Otherwise, see if "A EqP B" simplifies.
if (MaxRecurse)
if (Value *V = SimplifyICmpInst(EqP, A, B, Q, MaxRecurse-1))
return V;
break;
case CmpInst::ICMP_NE:
case CmpInst::ICMP_UGT: {
CmpInst::Predicate InvEqP = CmpInst::getInversePredicate(EqP);
// Equivalent to "A InvEqP B". This may be the same as the condition
// tested in the max/min; if so, we can just return that.
if (Value *V = ExtractEquivalentCondition(LHS, InvEqP, A, B))
return V;
if (Value *V = ExtractEquivalentCondition(RHS, InvEqP, A, B))
return V;
// Otherwise, see if "A InvEqP B" simplifies.
if (MaxRecurse)
if (Value *V = SimplifyICmpInst(InvEqP, A, B, Q, MaxRecurse-1))
return V;
break;
}
case CmpInst::ICMP_UGE:
// Always true.
return getTrue(ITy);
case CmpInst::ICMP_ULT:
// Always false.
return getFalse(ITy);
}
}
// Variants on "max(x,y) >= min(x,z)".
Value *C, *D;
if (match(LHS, m_SMax(m_Value(A), m_Value(B))) &&
match(RHS, m_SMin(m_Value(C), m_Value(D))) &&
(A == C || A == D || B == C || B == D)) {
// max(x, ?) pred min(x, ?).
if (Pred == CmpInst::ICMP_SGE)
// Always true.
return getTrue(ITy);
if (Pred == CmpInst::ICMP_SLT)
// Always false.
return getFalse(ITy);
} else if (match(LHS, m_SMin(m_Value(A), m_Value(B))) &&
match(RHS, m_SMax(m_Value(C), m_Value(D))) &&
(A == C || A == D || B == C || B == D)) {
// min(x, ?) pred max(x, ?).
if (Pred == CmpInst::ICMP_SLE)
// Always true.
return getTrue(ITy);
if (Pred == CmpInst::ICMP_SGT)
// Always false.
return getFalse(ITy);
} else if (match(LHS, m_UMax(m_Value(A), m_Value(B))) &&
match(RHS, m_UMin(m_Value(C), m_Value(D))) &&
(A == C || A == D || B == C || B == D)) {
// max(x, ?) pred min(x, ?).
if (Pred == CmpInst::ICMP_UGE)
// Always true.
return getTrue(ITy);
if (Pred == CmpInst::ICMP_ULT)
// Always false.
return getFalse(ITy);
} else if (match(LHS, m_UMin(m_Value(A), m_Value(B))) &&
match(RHS, m_UMax(m_Value(C), m_Value(D))) &&
(A == C || A == D || B == C || B == D)) {
// min(x, ?) pred max(x, ?).
if (Pred == CmpInst::ICMP_ULE)
// Always true.
return getTrue(ITy);
if (Pred == CmpInst::ICMP_UGT)
// Always false.
return getFalse(ITy);
}
// Simplify comparisons of related pointers using a powerful, recursive
// GEP-walk when we have target data available..
if (LHS->getType()->isPointerTy())
if (Constant *C = computePointerICmp(Q.DL, Q.TLI, Pred, LHS, RHS))
return C;
if (GetElementPtrInst *GLHS = dyn_cast<GetElementPtrInst>(LHS)) {
if (GEPOperator *GRHS = dyn_cast<GEPOperator>(RHS)) {
if (GLHS->getPointerOperand() == GRHS->getPointerOperand() &&
GLHS->hasAllConstantIndices() && GRHS->hasAllConstantIndices() &&
(ICmpInst::isEquality(Pred) ||
(GLHS->isInBounds() && GRHS->isInBounds() &&
Pred == ICmpInst::getSignedPredicate(Pred)))) {
// The bases are equal and the indices are constant. Build a constant
// expression GEP with the same indices and a null base pointer to see
// what constant folding can make out of it.
Constant *Null = Constant::getNullValue(GLHS->getPointerOperandType());
SmallVector<Value *, 4> IndicesLHS(GLHS->idx_begin(), GLHS->idx_end());
Constant *NewLHS = ConstantExpr::getGetElementPtr(
GLHS->getSourceElementType(), Null, IndicesLHS);
SmallVector<Value *, 4> IndicesRHS(GRHS->idx_begin(), GRHS->idx_end());
Constant *NewRHS = ConstantExpr::getGetElementPtr(
GLHS->getSourceElementType(), Null, IndicesRHS);
return ConstantExpr::getICmp(Pred, NewLHS, NewRHS);
}
}
}
// If a bit is known to be zero for A and known to be one for B,
// then A and B cannot be equal.
if (ICmpInst::isEquality(Pred)) {
if (ConstantInt *CI = dyn_cast<ConstantInt>(RHS)) {
uint32_t BitWidth = CI->getBitWidth();
APInt LHSKnownZero(BitWidth, 0);
APInt LHSKnownOne(BitWidth, 0);
computeKnownBits(LHS, LHSKnownZero, LHSKnownOne, Q.DL, /*Depth=*/0, Q.AC,
Q.CxtI, Q.DT);
const APInt &RHSVal = CI->getValue();
if (((LHSKnownZero & RHSVal) != 0) || ((LHSKnownOne & ~RHSVal) != 0))
return Pred == ICmpInst::ICMP_EQ
? ConstantInt::getFalse(CI->getContext())
: ConstantInt::getTrue(CI->getContext());
}
}
// If the comparison is with the result of a select instruction, check whether
// comparing with either branch of the select always yields the same value.
if (isa<SelectInst>(LHS) || isa<SelectInst>(RHS))
if (Value *V = ThreadCmpOverSelect(Pred, LHS, RHS, Q, MaxRecurse))
return V;
// If the comparison is with the result of a phi instruction, check whether
// doing the compare with each incoming phi value yields a common result.
if (isa<PHINode>(LHS) || isa<PHINode>(RHS))
if (Value *V = ThreadCmpOverPHI(Pred, LHS, RHS, Q, MaxRecurse))
return V;
return nullptr;
}
Value *llvm::SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
const DataLayout &DL,
const TargetLibraryInfo *TLI,
const DominatorTree *DT, AssumptionCache *AC,
Instruction *CxtI) {
return ::SimplifyICmpInst(Predicate, LHS, RHS, Query(DL, TLI, DT, AC, CxtI),
RecursionLimit);
}
/// SimplifyFCmpInst - Given operands for an FCmpInst, see if we can
/// fold the result. If not, this returns null.
static Value *SimplifyFCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
FastMathFlags FMF, const Query &Q,
unsigned MaxRecurse) {
CmpInst::Predicate Pred = (CmpInst::Predicate)Predicate;
assert(CmpInst::isFPPredicate(Pred) && "Not an FP compare!");
if (Constant *CLHS = dyn_cast<Constant>(LHS)) {
if (Constant *CRHS = dyn_cast<Constant>(RHS))
return ConstantFoldCompareInstOperands(Pred, CLHS, CRHS, Q.DL, Q.TLI);
// If we have a constant, make sure it is on the RHS.
std::swap(LHS, RHS);
Pred = CmpInst::getSwappedPredicate(Pred);
}
// Fold trivial predicates.
if (Pred == FCmpInst::FCMP_FALSE)
return ConstantInt::get(GetCompareTy(LHS), 0);
if (Pred == FCmpInst::FCMP_TRUE)
return ConstantInt::get(GetCompareTy(LHS), 1);
// UNO/ORD predicates can be trivially folded if NaNs are ignored.
if (FMF.noNaNs()) {
if (Pred == FCmpInst::FCMP_UNO)
return ConstantInt::get(GetCompareTy(LHS), 0);
if (Pred == FCmpInst::FCMP_ORD)
return ConstantInt::get(GetCompareTy(LHS), 1);
}
// fcmp pred x, undef and fcmp pred undef, x
// fold to true if unordered, false if ordered
if (isa<UndefValue>(LHS) || isa<UndefValue>(RHS)) {
// Choosing NaN for the undef will always make unordered comparison succeed
// and ordered comparison fail.
return ConstantInt::get(GetCompareTy(LHS), CmpInst::isUnordered(Pred));
}
// fcmp x,x -> true/false. Not all compares are foldable.
if (LHS == RHS) {
if (CmpInst::isTrueWhenEqual(Pred))
return ConstantInt::get(GetCompareTy(LHS), 1);
if (CmpInst::isFalseWhenEqual(Pred))
return ConstantInt::get(GetCompareTy(LHS), 0);
}
// Handle fcmp with constant RHS
if (ConstantFP *CFP = dyn_cast<ConstantFP>(RHS)) {
// If the constant is a nan, see if we can fold the comparison based on it.
if (CFP->getValueAPF().isNaN()) {
if (FCmpInst::isOrdered(Pred)) // True "if ordered and foo"
return ConstantInt::getFalse(CFP->getContext());
assert(FCmpInst::isUnordered(Pred) &&
"Comparison must be either ordered or unordered!");
// True if unordered.
return ConstantInt::getTrue(CFP->getContext());
}
// Check whether the constant is an infinity.
if (CFP->getValueAPF().isInfinity()) {
if (CFP->getValueAPF().isNegative()) {
switch (Pred) {
case FCmpInst::FCMP_OLT:
// No value is ordered and less than negative infinity.
return ConstantInt::getFalse(CFP->getContext());
case FCmpInst::FCMP_UGE:
// All values are unordered with or at least negative infinity.
return ConstantInt::getTrue(CFP->getContext());
default:
break;
}
} else {
switch (Pred) {
case FCmpInst::FCMP_OGT:
// No value is ordered and greater than infinity.
return ConstantInt::getFalse(CFP->getContext());
case FCmpInst::FCMP_ULE:
// All values are unordered with and at most infinity.
return ConstantInt::getTrue(CFP->getContext());
default:
break;
}
}
}
if (CFP->getValueAPF().isZero()) {
switch (Pred) {
case FCmpInst::FCMP_UGE:
if (CannotBeOrderedLessThanZero(LHS))
return ConstantInt::getTrue(CFP->getContext());
break;
case FCmpInst::FCMP_OLT:
// X < 0
if (CannotBeOrderedLessThanZero(LHS))
return ConstantInt::getFalse(CFP->getContext());
break;
default:
break;
}
}
}
// If the comparison is with the result of a select instruction, check whether
// comparing with either branch of the select always yields the same value.
if (isa<SelectInst>(LHS) || isa<SelectInst>(RHS))
if (Value *V = ThreadCmpOverSelect(Pred, LHS, RHS, Q, MaxRecurse))
return V;
// If the comparison is with the result of a phi instruction, check whether
// doing the compare with each incoming phi value yields a common result.
if (isa<PHINode>(LHS) || isa<PHINode>(RHS))
if (Value *V = ThreadCmpOverPHI(Pred, LHS, RHS, Q, MaxRecurse))
return V;
return nullptr;
}
Value *llvm::SimplifyFCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
FastMathFlags FMF, const DataLayout &DL,
const TargetLibraryInfo *TLI,
const DominatorTree *DT, AssumptionCache *AC,
const Instruction *CxtI) {
return ::SimplifyFCmpInst(Predicate, LHS, RHS, FMF,
Query(DL, TLI, DT, AC, CxtI), RecursionLimit);
}
/// SimplifyWithOpReplaced - See if V simplifies when its operand Op is
/// replaced with RepOp.
static const Value *SimplifyWithOpReplaced(Value *V, Value *Op, Value *RepOp,
const Query &Q,
unsigned MaxRecurse) {
// Trivial replacement.
if (V == Op)
return RepOp;
auto *I = dyn_cast<Instruction>(V);
if (!I)
return nullptr;
// If this is a binary operator, try to simplify it with the replaced op.
if (auto *B = dyn_cast<BinaryOperator>(I)) {
// Consider:
// %cmp = icmp eq i32 %x, 2147483647
// %add = add nsw i32 %x, 1
// %sel = select i1 %cmp, i32 -2147483648, i32 %add
//
// We can't replace %sel with %add unless we strip away the flags.
if (isa<OverflowingBinaryOperator>(B))
if (B->hasNoSignedWrap() || B->hasNoUnsignedWrap())
return nullptr;
if (isa<PossiblyExactOperator>(B))
if (B->isExact())
return nullptr;
if (MaxRecurse) {
if (B->getOperand(0) == Op)
return SimplifyBinOp(B->getOpcode(), RepOp, B->getOperand(1), Q,
MaxRecurse - 1);
if (B->getOperand(1) == Op)
return SimplifyBinOp(B->getOpcode(), B->getOperand(0), RepOp, Q,
MaxRecurse - 1);
}
}
// Same for CmpInsts.
if (CmpInst *C = dyn_cast<CmpInst>(I)) {
if (MaxRecurse) {
if (C->getOperand(0) == Op)
return SimplifyCmpInst(C->getPredicate(), RepOp, C->getOperand(1), Q,
MaxRecurse - 1);
if (C->getOperand(1) == Op)
return SimplifyCmpInst(C->getPredicate(), C->getOperand(0), RepOp, Q,
MaxRecurse - 1);
}
}
// TODO: We could hand off more cases to instsimplify here.
// If all operands are constant after substituting Op for RepOp then we can
// constant fold the instruction.
if (Constant *CRepOp = dyn_cast<Constant>(RepOp)) {
// Build a list of all constant operands.
SmallVector<Constant *, 8> ConstOps;
for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
if (I->getOperand(i) == Op)
ConstOps.push_back(CRepOp);
else if (Constant *COp = dyn_cast<Constant>(I->getOperand(i)))
ConstOps.push_back(COp);
else
break;
}
// All operands were constants, fold it.
if (ConstOps.size() == I->getNumOperands()) {
if (CmpInst *C = dyn_cast<CmpInst>(I))
return ConstantFoldCompareInstOperands(C->getPredicate(), ConstOps[0],
ConstOps[1], Q.DL, Q.TLI);
if (LoadInst *LI = dyn_cast<LoadInst>(I))
if (!LI->isVolatile())
return ConstantFoldLoadFromConstPtr(ConstOps[0], Q.DL);
return ConstantFoldInstOperands(I->getOpcode(), I->getType(), ConstOps,
Q.DL, Q.TLI);
}
}
return nullptr;
}
/// SimplifySelectInst - Given operands for a SelectInst, see if we can fold
/// the result. If not, this returns null.
static Value *SimplifySelectInst(Value *CondVal, Value *TrueVal,
Value *FalseVal, const Query &Q,
unsigned MaxRecurse) {
// select true, X, Y -> X
// select false, X, Y -> Y
if (Constant *CB = dyn_cast<Constant>(CondVal)) {
if (CB->isAllOnesValue())
return TrueVal;
if (CB->isNullValue())
return FalseVal;
}
// select C, X, X -> X
if (TrueVal == FalseVal)
return TrueVal;
if (isa<UndefValue>(CondVal)) { // select undef, X, Y -> X or Y
if (isa<Constant>(TrueVal))
return TrueVal;
return FalseVal;
}
if (isa<UndefValue>(TrueVal)) // select C, undef, X -> X
return FalseVal;
if (isa<UndefValue>(FalseVal)) // select C, X, undef -> X
return TrueVal;
if (const auto *ICI = dyn_cast<ICmpInst>(CondVal)) {
unsigned BitWidth = Q.DL.getTypeSizeInBits(TrueVal->getType());
ICmpInst::Predicate Pred = ICI->getPredicate();
Value *CmpLHS = ICI->getOperand(0);
Value *CmpRHS = ICI->getOperand(1);
APInt MinSignedValue = APInt::getSignBit(BitWidth);
Value *X;
const APInt *Y;
bool TrueWhenUnset;
bool IsBitTest = false;
if (ICmpInst::isEquality(Pred) &&
match(CmpLHS, m_And(m_Value(X), m_APInt(Y))) &&
match(CmpRHS, m_Zero())) {
IsBitTest = true;
TrueWhenUnset = Pred == ICmpInst::ICMP_EQ;
} else if (Pred == ICmpInst::ICMP_SLT && match(CmpRHS, m_Zero())) {
X = CmpLHS;
Y = &MinSignedValue;
IsBitTest = true;
TrueWhenUnset = false;
} else if (Pred == ICmpInst::ICMP_SGT && match(CmpRHS, m_AllOnes())) {
X = CmpLHS;
Y = &MinSignedValue;
IsBitTest = true;
TrueWhenUnset = true;
}
if (IsBitTest) {
const APInt *C;
// (X & Y) == 0 ? X & ~Y : X --> X
// (X & Y) != 0 ? X & ~Y : X --> X & ~Y
if (FalseVal == X && match(TrueVal, m_And(m_Specific(X), m_APInt(C))) &&
*Y == ~*C)
return TrueWhenUnset ? FalseVal : TrueVal;
// (X & Y) == 0 ? X : X & ~Y --> X & ~Y
// (X & Y) != 0 ? X : X & ~Y --> X
if (TrueVal == X && match(FalseVal, m_And(m_Specific(X), m_APInt(C))) &&
*Y == ~*C)
return TrueWhenUnset ? FalseVal : TrueVal;
if (Y->isPowerOf2()) {
// (X & Y) == 0 ? X | Y : X --> X | Y
// (X & Y) != 0 ? X | Y : X --> X
if (FalseVal == X && match(TrueVal, m_Or(m_Specific(X), m_APInt(C))) &&
*Y == *C)
return TrueWhenUnset ? TrueVal : FalseVal;
// (X & Y) == 0 ? X : X | Y --> X
// (X & Y) != 0 ? X : X | Y --> X | Y
if (TrueVal == X && match(FalseVal, m_Or(m_Specific(X), m_APInt(C))) &&
*Y == *C)
return TrueWhenUnset ? TrueVal : FalseVal;
}
}
if (ICI->hasOneUse()) {
const APInt *C;
if (match(CmpRHS, m_APInt(C))) {
// X < MIN ? T : F --> F
if (Pred == ICmpInst::ICMP_SLT && C->isMinSignedValue())
return FalseVal;
// X < MIN ? T : F --> F
if (Pred == ICmpInst::ICMP_ULT && C->isMinValue())
return FalseVal;
// X > MAX ? T : F --> F
if (Pred == ICmpInst::ICMP_SGT && C->isMaxSignedValue())
return FalseVal;
// X > MAX ? T : F --> F
if (Pred == ICmpInst::ICMP_UGT && C->isMaxValue())
return FalseVal;
}
}
// If we have an equality comparison then we know the value in one of the
// arms of the select. See if substituting this value into the arm and
// simplifying the result yields the same value as the other arm.
if (Pred == ICmpInst::ICMP_EQ) {
if (SimplifyWithOpReplaced(FalseVal, CmpLHS, CmpRHS, Q, MaxRecurse) ==
TrueVal ||
SimplifyWithOpReplaced(FalseVal, CmpRHS, CmpLHS, Q, MaxRecurse) ==
TrueVal)
return FalseVal;
if (SimplifyWithOpReplaced(TrueVal, CmpLHS, CmpRHS, Q, MaxRecurse) ==
FalseVal ||
SimplifyWithOpReplaced(TrueVal, CmpRHS, CmpLHS, Q, MaxRecurse) ==
FalseVal)
return FalseVal;
} else if (Pred == ICmpInst::ICMP_NE) {
if (SimplifyWithOpReplaced(TrueVal, CmpLHS, CmpRHS, Q, MaxRecurse) ==
FalseVal ||
SimplifyWithOpReplaced(TrueVal, CmpRHS, CmpLHS, Q, MaxRecurse) ==
FalseVal)
return TrueVal;
if (SimplifyWithOpReplaced(FalseVal, CmpLHS, CmpRHS, Q, MaxRecurse) ==
TrueVal ||
SimplifyWithOpReplaced(FalseVal, CmpRHS, CmpLHS, Q, MaxRecurse) ==
TrueVal)
return TrueVal;
}
}
return nullptr;
}
Value *llvm::SimplifySelectInst(Value *Cond, Value *TrueVal, Value *FalseVal,
const DataLayout &DL,
const TargetLibraryInfo *TLI,
const DominatorTree *DT, AssumptionCache *AC,
const Instruction *CxtI) {
return ::SimplifySelectInst(Cond, TrueVal, FalseVal,
Query(DL, TLI, DT, AC, CxtI), RecursionLimit);
}
/// SimplifyGEPInst - Given operands for an GetElementPtrInst, see if we can
/// fold the result. If not, this returns null.
static Value *SimplifyGEPInst(Type *SrcTy, ArrayRef<Value *> Ops,
const Query &Q, unsigned) {
// The type of the GEP pointer operand.
unsigned AS =
cast<PointerType>(Ops[0]->getType()->getScalarType())->getAddressSpace();
// getelementptr P -> P.
if (Ops.size() == 1)
return Ops[0];
// Compute the (pointer) type returned by the GEP instruction.
Type *LastType = GetElementPtrInst::getIndexedType(SrcTy, Ops.slice(1));
Type *GEPTy = PointerType::get(LastType, AS);
if (VectorType *VT = dyn_cast<VectorType>(Ops[0]->getType()))
GEPTy = VectorType::get(GEPTy, VT->getNumElements());
if (isa<UndefValue>(Ops[0]))
return UndefValue::get(GEPTy);
if (Ops.size() == 2) {
// getelementptr P, 0 -> P.
if (match(Ops[1], m_Zero()))
return Ops[0];
Type *Ty = SrcTy;
if (Ty->isSized()) {
Value *P;
uint64_t C;
uint64_t TyAllocSize = Q.DL.getTypeAllocSize(Ty);
// getelementptr P, N -> P if P points to a type of zero size.
if (TyAllocSize == 0)
return Ops[0];
// The following transforms are only safe if the ptrtoint cast
// doesn't truncate the pointers.
if (Ops[1]->getType()->getScalarSizeInBits() ==
Q.DL.getPointerSizeInBits(AS)) {
auto PtrToIntOrZero = [GEPTy](Value *P) -> Value * {
if (match(P, m_Zero()))
return Constant::getNullValue(GEPTy);
Value *Temp;
if (match(P, m_PtrToInt(m_Value(Temp))))
if (Temp->getType() == GEPTy)
return Temp;
return nullptr;
};
// getelementptr V, (sub P, V) -> P if P points to a type of size 1.
if (TyAllocSize == 1 &&
match(Ops[1], m_Sub(m_Value(P), m_PtrToInt(m_Specific(Ops[0])))))
if (Value *R = PtrToIntOrZero(P))
return R;
// getelementptr V, (ashr (sub P, V), C) -> Q
// if P points to a type of size 1 << C.
if (match(Ops[1],
m_AShr(m_Sub(m_Value(P), m_PtrToInt(m_Specific(Ops[0]))),
m_ConstantInt(C))) &&
TyAllocSize == 1ULL << C)
if (Value *R = PtrToIntOrZero(P))
return R;
// getelementptr V, (sdiv (sub P, V), C) -> Q
// if P points to a type of size C.
if (match(Ops[1],
m_SDiv(m_Sub(m_Value(P), m_PtrToInt(m_Specific(Ops[0]))),
m_SpecificInt(TyAllocSize))))
if (Value *R = PtrToIntOrZero(P))
return R;
}
}
}
// Check to see if this is constant foldable.
for (unsigned i = 0, e = Ops.size(); i != e; ++i)
if (!isa<Constant>(Ops[i]))
return nullptr;
return ConstantExpr::getGetElementPtr(SrcTy, cast<Constant>(Ops[0]),
Ops.slice(1));
}
Value *llvm::SimplifyGEPInst(ArrayRef<Value *> Ops, const DataLayout &DL,
const TargetLibraryInfo *TLI,
const DominatorTree *DT, AssumptionCache *AC,
const Instruction *CxtI) {
return ::SimplifyGEPInst(
cast<PointerType>(Ops[0]->getType()->getScalarType())->getElementType(),
Ops, Query(DL, TLI, DT, AC, CxtI), RecursionLimit);
}
/// SimplifyInsertValueInst - Given operands for an InsertValueInst, see if we
/// can fold the result. If not, this returns null.
static Value *SimplifyInsertValueInst(Value *Agg, Value *Val,
ArrayRef<unsigned> Idxs, const Query &Q,
unsigned) {
if (Constant *CAgg = dyn_cast<Constant>(Agg))
if (Constant *CVal = dyn_cast<Constant>(Val))
return ConstantFoldInsertValueInstruction(CAgg, CVal, Idxs);
// insertvalue x, undef, n -> x
if (match(Val, m_Undef()))
return Agg;
// insertvalue x, (extractvalue y, n), n
if (ExtractValueInst *EV = dyn_cast<ExtractValueInst>(Val))
if (EV->getAggregateOperand()->getType() == Agg->getType() &&
EV->getIndices() == Idxs) {
// insertvalue undef, (extractvalue y, n), n -> y
if (match(Agg, m_Undef()))
return EV->getAggregateOperand();
// insertvalue y, (extractvalue y, n), n -> y
if (Agg == EV->getAggregateOperand())
return Agg;
}
return nullptr;
}
Value *llvm::SimplifyInsertValueInst(
Value *Agg, Value *Val, ArrayRef<unsigned> Idxs, const DataLayout &DL,
const TargetLibraryInfo *TLI, const DominatorTree *DT, AssumptionCache *AC,
const Instruction *CxtI) {
return ::SimplifyInsertValueInst(Agg, Val, Idxs, Query(DL, TLI, DT, AC, CxtI),
RecursionLimit);
}
/// SimplifyExtractValueInst - Given operands for an ExtractValueInst, see if we
/// can fold the result. If not, this returns null.
static Value *SimplifyExtractValueInst(Value *Agg, ArrayRef<unsigned> Idxs,
const Query &, unsigned) {
if (auto *CAgg = dyn_cast<Constant>(Agg))
return ConstantFoldExtractValueInstruction(CAgg, Idxs);
// extractvalue x, (insertvalue y, elt, n), n -> elt
unsigned NumIdxs = Idxs.size();
for (auto *IVI = dyn_cast<InsertValueInst>(Agg); IVI != nullptr;
IVI = dyn_cast<InsertValueInst>(IVI->getAggregateOperand())) {
ArrayRef<unsigned> InsertValueIdxs = IVI->getIndices();
unsigned NumInsertValueIdxs = InsertValueIdxs.size();
unsigned NumCommonIdxs = std::min(NumInsertValueIdxs, NumIdxs);
if (InsertValueIdxs.slice(0, NumCommonIdxs) ==
Idxs.slice(0, NumCommonIdxs)) {
if (NumIdxs == NumInsertValueIdxs)
return IVI->getInsertedValueOperand();
break;
}
}
return nullptr;
}
Value *llvm::SimplifyExtractValueInst(Value *Agg, ArrayRef<unsigned> Idxs,
const DataLayout &DL,
const TargetLibraryInfo *TLI,
const DominatorTree *DT,
AssumptionCache *AC,
const Instruction *CxtI) {
return ::SimplifyExtractValueInst(Agg, Idxs, Query(DL, TLI, DT, AC, CxtI),
RecursionLimit);
}
/// SimplifyExtractElementInst - Given operands for an ExtractElementInst, see if we
/// can fold the result. If not, this returns null.
static Value *SimplifyExtractElementInst(Value *Vec, Value *Idx, const Query &,
unsigned) {
if (auto *CVec = dyn_cast<Constant>(Vec)) {
if (auto *CIdx = dyn_cast<Constant>(Idx))
return ConstantFoldExtractElementInstruction(CVec, CIdx);
// The index is not relevant if our vector is a splat.
if (auto *Splat = CVec->getSplatValue())
return Splat;
if (isa<UndefValue>(Vec))
return UndefValue::get(Vec->getType()->getVectorElementType());
}
// If extracting a specified index from the vector, see if we can recursively
// find a previously computed scalar that was inserted into the vector.
if (auto *IdxC = dyn_cast<ConstantInt>(Idx))
if (Value *Elt = findScalarElement(Vec, IdxC->getZExtValue()))
return Elt;
return nullptr;
}
Value *llvm::SimplifyExtractElementInst(
Value *Vec, Value *Idx, const DataLayout &DL, const TargetLibraryInfo *TLI,
const DominatorTree *DT, AssumptionCache *AC, const Instruction *CxtI) {
return ::SimplifyExtractElementInst(Vec, Idx, Query(DL, TLI, DT, AC, CxtI),
RecursionLimit);
}
/// SimplifyPHINode - See if we can fold the given phi. If not, returns null.
static Value *SimplifyPHINode(PHINode *PN, const Query &Q) {
// If all of the PHI's incoming values are the same then replace the PHI node
// with the common value.
Value *CommonValue = nullptr;
bool HasUndefInput = false;
for (Value *Incoming : PN->incoming_values()) {
// If the incoming value is the phi node itself, it can safely be skipped.
if (Incoming == PN) continue;
if (isa<UndefValue>(Incoming)) {
// Remember that we saw an undef value, but otherwise ignore them.
HasUndefInput = true;
continue;
}
if (CommonValue && Incoming != CommonValue)
return nullptr; // Not the same, bail out.
CommonValue = Incoming;
}
// If CommonValue is null then all of the incoming values were either undef or
// equal to the phi node itself.
if (!CommonValue)
return UndefValue::get(PN->getType());
// If we have a PHI node like phi(X, undef, X), where X is defined by some
// instruction, we cannot return X as the result of the PHI node unless it
// dominates the PHI block.
if (HasUndefInput)
return ValueDominatesPHI(CommonValue, PN, Q.DT) ? CommonValue : nullptr;
return CommonValue;
}
static Value *SimplifyTruncInst(Value *Op, Type *Ty, const Query &Q, unsigned) {
if (Constant *C = dyn_cast<Constant>(Op))
return ConstantFoldInstOperands(Instruction::Trunc, Ty, C, Q.DL, Q.TLI);
return nullptr;
}
Value *llvm::SimplifyTruncInst(Value *Op, Type *Ty, const DataLayout &DL,
const TargetLibraryInfo *TLI,
const DominatorTree *DT, AssumptionCache *AC,
const Instruction *CxtI) {
return ::SimplifyTruncInst(Op, Ty, Query(DL, TLI, DT, AC, CxtI),
RecursionLimit);
}
//=== Helper functions for higher up the class hierarchy.
/// SimplifyBinOp - Given operands for a BinaryOperator, see if we can
/// fold the result. If not, this returns null.
static Value *SimplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS,
const Query &Q, unsigned MaxRecurse) {
switch (Opcode) {
case Instruction::Add:
return SimplifyAddInst(LHS, RHS, /*isNSW*/false, /*isNUW*/false,
Q, MaxRecurse);
case Instruction::FAdd:
return SimplifyFAddInst(LHS, RHS, FastMathFlags(), Q, MaxRecurse);
case Instruction::Sub:
return SimplifySubInst(LHS, RHS, /*isNSW*/false, /*isNUW*/false,
Q, MaxRecurse);
case Instruction::FSub:
return SimplifyFSubInst(LHS, RHS, FastMathFlags(), Q, MaxRecurse);
case Instruction::Mul: return SimplifyMulInst (LHS, RHS, Q, MaxRecurse);
case Instruction::FMul:
return SimplifyFMulInst (LHS, RHS, FastMathFlags(), Q, MaxRecurse);
case Instruction::SDiv: return SimplifySDivInst(LHS, RHS, Q, MaxRecurse);
case Instruction::UDiv: return SimplifyUDivInst(LHS, RHS, Q, MaxRecurse);
case Instruction::FDiv:
return SimplifyFDivInst(LHS, RHS, FastMathFlags(), Q, MaxRecurse);
case Instruction::SRem: return SimplifySRemInst(LHS, RHS, Q, MaxRecurse);
case Instruction::URem: return SimplifyURemInst(LHS, RHS, Q, MaxRecurse);
case Instruction::FRem:
return SimplifyFRemInst(LHS, RHS, FastMathFlags(), Q, MaxRecurse);
case Instruction::Shl:
return SimplifyShlInst(LHS, RHS, /*isNSW*/false, /*isNUW*/false,
Q, MaxRecurse);
case Instruction::LShr:
return SimplifyLShrInst(LHS, RHS, /*isExact*/false, Q, MaxRecurse);
case Instruction::AShr:
return SimplifyAShrInst(LHS, RHS, /*isExact*/false, Q, MaxRecurse);
case Instruction::And: return SimplifyAndInst(LHS, RHS, Q, MaxRecurse);
case Instruction::Or: return SimplifyOrInst (LHS, RHS, Q, MaxRecurse);
case Instruction::Xor: return SimplifyXorInst(LHS, RHS, Q, MaxRecurse);
default:
if (Constant *CLHS = dyn_cast<Constant>(LHS))
if (Constant *CRHS = dyn_cast<Constant>(RHS)) {
Constant *COps[] = {CLHS, CRHS};
return ConstantFoldInstOperands(Opcode, LHS->getType(), COps, Q.DL,
Q.TLI);
}
// If the operation is associative, try some generic simplifications.
if (Instruction::isAssociative(Opcode))
if (Value *V = SimplifyAssociativeBinOp(Opcode, LHS, RHS, Q, MaxRecurse))
return V;
// If the operation is with the result of a select instruction check whether
// operating on either branch of the select always yields the same value.
if (isa<SelectInst>(LHS) || isa<SelectInst>(RHS))
if (Value *V = ThreadBinOpOverSelect(Opcode, LHS, RHS, Q, MaxRecurse))
return V;
// If the operation is with the result of a phi instruction, check whether
// operating on all incoming values of the phi always yields the same value.
if (isa<PHINode>(LHS) || isa<PHINode>(RHS))
if (Value *V = ThreadBinOpOverPHI(Opcode, LHS, RHS, Q, MaxRecurse))
return V;
return nullptr;
}
}
/// SimplifyFPBinOp - Given operands for a BinaryOperator, see if we can
/// fold the result. If not, this returns null.
/// In contrast to SimplifyBinOp, try to use FastMathFlag when folding the
/// result. In case we don't need FastMathFlags, simply fall to SimplifyBinOp.
static Value *SimplifyFPBinOp(unsigned Opcode, Value *LHS, Value *RHS,
const FastMathFlags &FMF, const Query &Q,
unsigned MaxRecurse) {
switch (Opcode) {
case Instruction::FAdd:
return SimplifyFAddInst(LHS, RHS, FMF, Q, MaxRecurse);
case Instruction::FSub:
return SimplifyFSubInst(LHS, RHS, FMF, Q, MaxRecurse);
case Instruction::FMul:
return SimplifyFMulInst(LHS, RHS, FMF, Q, MaxRecurse);
default:
return SimplifyBinOp(Opcode, LHS, RHS, Q, MaxRecurse);
}
}
Value *llvm::SimplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS,
const DataLayout &DL, const TargetLibraryInfo *TLI,
const DominatorTree *DT, AssumptionCache *AC,
const Instruction *CxtI) {
return ::SimplifyBinOp(Opcode, LHS, RHS, Query(DL, TLI, DT, AC, CxtI),
RecursionLimit);
}
Value *llvm::SimplifyFPBinOp(unsigned Opcode, Value *LHS, Value *RHS,
const FastMathFlags &FMF, const DataLayout &DL,
const TargetLibraryInfo *TLI,
const DominatorTree *DT, AssumptionCache *AC,
const Instruction *CxtI) {
return ::SimplifyFPBinOp(Opcode, LHS, RHS, FMF, Query(DL, TLI, DT, AC, CxtI),
RecursionLimit);
}
/// SimplifyCmpInst - Given operands for a CmpInst, see if we can
/// fold the result.
static Value *SimplifyCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
const Query &Q, unsigned MaxRecurse) {
if (CmpInst::isIntPredicate((CmpInst::Predicate)Predicate))
return SimplifyICmpInst(Predicate, LHS, RHS, Q, MaxRecurse);
return SimplifyFCmpInst(Predicate, LHS, RHS, FastMathFlags(), Q, MaxRecurse);
}
Value *llvm::SimplifyCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
const DataLayout &DL, const TargetLibraryInfo *TLI,
const DominatorTree *DT, AssumptionCache *AC,
const Instruction *CxtI) {
return ::SimplifyCmpInst(Predicate, LHS, RHS, Query(DL, TLI, DT, AC, CxtI),
RecursionLimit);
}
static bool IsIdempotent(Intrinsic::ID ID) {
switch (ID) {
default: return false;
// Unary idempotent: f(f(x)) = f(x)
case Intrinsic::fabs:
case Intrinsic::floor:
case Intrinsic::ceil:
case Intrinsic::trunc:
case Intrinsic::rint:
case Intrinsic::nearbyint:
case Intrinsic::round:
return true;
}
}
template <typename IterTy>
static Value *SimplifyIntrinsic(Function *F, IterTy ArgBegin, IterTy ArgEnd,
const Query &Q, unsigned MaxRecurse) {
Intrinsic::ID IID = F->getIntrinsicID();
unsigned NumOperands = std::distance(ArgBegin, ArgEnd);
Type *ReturnType = F->getReturnType();
// Binary Ops
if (NumOperands == 2) {
Value *LHS = *ArgBegin;
Value *RHS = *(ArgBegin + 1);
if (IID == Intrinsic::usub_with_overflow ||
IID == Intrinsic::ssub_with_overflow) {
// X - X -> { 0, false }
if (LHS == RHS)
return Constant::getNullValue(ReturnType);
// X - undef -> undef
// undef - X -> undef
if (isa<UndefValue>(LHS) || isa<UndefValue>(RHS))
return UndefValue::get(ReturnType);
}
if (IID == Intrinsic::uadd_with_overflow ||
IID == Intrinsic::sadd_with_overflow) {
// X + undef -> undef
if (isa<UndefValue>(RHS))
return UndefValue::get(ReturnType);
}
if (IID == Intrinsic::umul_with_overflow ||
IID == Intrinsic::smul_with_overflow) {
// X * 0 -> { 0, false }
if (match(RHS, m_Zero()))
return Constant::getNullValue(ReturnType);
// X * undef -> { 0, false }
if (match(RHS, m_Undef()))
return Constant::getNullValue(ReturnType);
}
}
// Perform idempotent optimizations
if (!IsIdempotent(IID))
return nullptr;
// Unary Ops
if (NumOperands == 1)
if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(*ArgBegin))
if (II->getIntrinsicID() == IID)
return II;
return nullptr;
}
template <typename IterTy>
static Value *SimplifyCall(Value *V, IterTy ArgBegin, IterTy ArgEnd,
const Query &Q, unsigned MaxRecurse) {
Type *Ty = V->getType();
if (PointerType *PTy = dyn_cast<PointerType>(Ty))
Ty = PTy->getElementType();
FunctionType *FTy = cast<FunctionType>(Ty);
// call undef -> undef
if (isa<UndefValue>(V))
return UndefValue::get(FTy->getReturnType());
Function *F = dyn_cast<Function>(V);
if (!F)
return nullptr;
if (F->isIntrinsic())
if (Value *Ret = SimplifyIntrinsic(F, ArgBegin, ArgEnd, Q, MaxRecurse))
return Ret;
if (!canConstantFoldCallTo(F))
return nullptr;
SmallVector<Constant *, 4> ConstantArgs;
ConstantArgs.reserve(ArgEnd - ArgBegin);
for (IterTy I = ArgBegin, E = ArgEnd; I != E; ++I) {
Constant *C = dyn_cast<Constant>(*I);
if (!C)
return nullptr;
ConstantArgs.push_back(C);
}
return ConstantFoldCall(F, ConstantArgs, Q.TLI);
}
Value *llvm::SimplifyCall(Value *V, User::op_iterator ArgBegin,
User::op_iterator ArgEnd, const DataLayout &DL,
const TargetLibraryInfo *TLI, const DominatorTree *DT,
AssumptionCache *AC, const Instruction *CxtI) {
return ::SimplifyCall(V, ArgBegin, ArgEnd, Query(DL, TLI, DT, AC, CxtI),
RecursionLimit);
}
Value *llvm::SimplifyCall(Value *V, ArrayRef<Value *> Args,
const DataLayout &DL, const TargetLibraryInfo *TLI,
const DominatorTree *DT, AssumptionCache *AC,
const Instruction *CxtI) {
return ::SimplifyCall(V, Args.begin(), Args.end(),
Query(DL, TLI, DT, AC, CxtI), RecursionLimit);
}
// HLSL Change - Begin
// Copied CastInst simplification from LLVM 8
static
Constant *foldConstVectorToAPInt(APInt &Result, Type *DestTy,
Constant *C, Type *SrcEltTy,
unsigned NumSrcElts,
const DataLayout &DL) {
// Now that we know that the input value is a vector of integers, just shift
// and insert them into our result.
unsigned BitShift = DL.getTypeSizeInBits(SrcEltTy);
for (unsigned i = 0; i != NumSrcElts; ++i) {
Constant *Element;
if (DL.isLittleEndian())
Element = C->getAggregateElement(NumSrcElts - i - 1);
else
Element = C->getAggregateElement(i);
if (Element && isa<UndefValue>(Element)) {
Result <<= BitShift;
continue;
}
auto *ElementCI = dyn_cast_or_null<ConstantInt>(Element);
if (!ElementCI)
return ConstantExpr::getBitCast(C, DestTy);
Result <<= BitShift;
Result |= ElementCI->getValue().zextOrSelf(Result.getBitWidth());
}
return nullptr;
}
/// Constant fold bitcast, symbolically evaluating it with DataLayout.
/// This always returns a non-null constant, but it may be a
/// ConstantExpr if unfoldable.
static
Constant *FoldBitCast(Constant *C, Type *DestTy, const DataLayout &DL) {
// Catch the obvious splat cases.
if (C->isNullValue() && !DestTy->isX86_MMXTy())
return Constant::getNullValue(DestTy);
if (C->isAllOnesValue() && !DestTy->isX86_MMXTy() &&
!DestTy->isPtrOrPtrVectorTy()) // Don't get ones for ptr types!
return Constant::getAllOnesValue(DestTy);
if (auto *VTy = dyn_cast<VectorType>(C->getType())) {
// Handle a vector->scalar integer/fp cast.
if (isa<IntegerType>(DestTy) || DestTy->isFloatingPointTy()) {
unsigned NumSrcElts = VTy->getNumElements();
Type *SrcEltTy = VTy->getElementType();
// If the vector is a vector of floating point, convert it to vector of int
// to simplify things.
if (SrcEltTy->isFloatingPointTy()) {
unsigned FPWidth = SrcEltTy->getPrimitiveSizeInBits();
Type *SrcIVTy =
VectorType::get(IntegerType::get(C->getContext(), FPWidth), NumSrcElts);
// Ask IR to do the conversion now that #elts line up.
C = ConstantExpr::getBitCast(C, SrcIVTy);
}
APInt Result(DL.getTypeSizeInBits(DestTy), 0);
if (Constant *CE = foldConstVectorToAPInt(Result, DestTy, C,
SrcEltTy, NumSrcElts, DL))
return CE;
if (isa<IntegerType>(DestTy))
return ConstantInt::get(DestTy, Result);
APFloat FP(DestTy->getFltSemantics(), Result);
return ConstantFP::get(DestTy->getContext(), FP);
}
}
// The code below only handles casts to vectors currently.
auto *DestVTy = dyn_cast<VectorType>(DestTy);
if (!DestVTy)
return ConstantExpr::getBitCast(C, DestTy);
// If this is a scalar -> vector cast, convert the input into a <1 x scalar>
// vector so the code below can handle it uniformly.
if (isa<ConstantFP>(C) || isa<ConstantInt>(C)) {
Constant *Ops = C; // don't take the address of C!
return FoldBitCast(ConstantVector::get(Ops), DestTy, DL);
}
// If this is a bitcast from constant vector -> vector, fold it.
if (!isa<ConstantDataVector>(C) && !isa<ConstantVector>(C))
return ConstantExpr::getBitCast(C, DestTy);
// If the element types match, IR can fold it.
unsigned NumDstElt = DestVTy->getNumElements();
unsigned NumSrcElt = C->getType()->getVectorNumElements();
if (NumDstElt == NumSrcElt)
return ConstantExpr::getBitCast(C, DestTy);
Type *SrcEltTy = C->getType()->getVectorElementType();
Type *DstEltTy = DestVTy->getElementType();
// Otherwise, we're changing the number of elements in a vector, which
// requires endianness information to do the right thing. For example,
// bitcast (<2 x i64> <i64 0, i64 1> to <4 x i32>)
// folds to (little endian):
// <4 x i32> <i32 0, i32 0, i32 1, i32 0>
// and to (big endian):
// <4 x i32> <i32 0, i32 0, i32 0, i32 1>
// First thing is first. We only want to think about integer here, so if
// we have something in FP form, recast it as integer.
if (DstEltTy->isFloatingPointTy()) {
// Fold to an vector of integers with same size as our FP type.
unsigned FPWidth = DstEltTy->getPrimitiveSizeInBits();
Type *DestIVTy =
VectorType::get(IntegerType::get(C->getContext(), FPWidth), NumDstElt);
// Recursively handle this integer conversion, if possible.
C = FoldBitCast(C, DestIVTy, DL);
// Finally, IR can handle this now that #elts line up.
return ConstantExpr::getBitCast(C, DestTy);
}
// Okay, we know the destination is integer, if the input is FP, convert
// it to integer first.
if (SrcEltTy->isFloatingPointTy()) {
unsigned FPWidth = SrcEltTy->getPrimitiveSizeInBits();
Type *SrcIVTy =
VectorType::get(IntegerType::get(C->getContext(), FPWidth), NumSrcElt);
// Ask IR to do the conversion now that #elts line up.
C = ConstantExpr::getBitCast(C, SrcIVTy);
// If IR wasn't able to fold it, bail out.
if (!isa<ConstantVector>(C) && // FIXME: Remove ConstantVector.
!isa<ConstantDataVector>(C))
return C;
}
// Now we know that the input and output vectors are both integer vectors
// of the same size, and that their #elements is not the same. Do the
// conversion here, which depends on whether the input or output has
// more elements.
bool isLittleEndian = DL.isLittleEndian();
SmallVector<Constant*, 32> Result;
if (NumDstElt < NumSrcElt) {
// Handle: bitcast (<4 x i32> <i32 0, i32 1, i32 2, i32 3> to <2 x i64>)
Constant *Zero = Constant::getNullValue(DstEltTy);
unsigned Ratio = NumSrcElt/NumDstElt;
unsigned SrcBitSize = SrcEltTy->getPrimitiveSizeInBits();
unsigned SrcElt = 0;
for (unsigned i = 0; i != NumDstElt; ++i) {
// Build each element of the result.
Constant *Elt = Zero;
unsigned ShiftAmt = isLittleEndian ? 0 : SrcBitSize*(Ratio-1);
for (unsigned j = 0; j != Ratio; ++j) {
Constant *Src = C->getAggregateElement(SrcElt++);
if (Src && isa<UndefValue>(Src))
Src = Constant::getNullValue(C->getType()->getVectorElementType());
else
Src = dyn_cast_or_null<ConstantInt>(Src);
if (!Src) // Reject constantexpr elements.
return ConstantExpr::getBitCast(C, DestTy);
// Zero extend the element to the right size.
Src = ConstantExpr::getZExt(Src, Elt->getType());
// Shift it to the right place, depending on endianness.
Src = ConstantExpr::getShl(Src,
ConstantInt::get(Src->getType(), ShiftAmt));
ShiftAmt += isLittleEndian ? SrcBitSize : -SrcBitSize;
// Mix it in.
Elt = ConstantExpr::getOr(Elt, Src);
}
Result.push_back(Elt);
}
return ConstantVector::get(Result);
}
// Handle: bitcast (<2 x i64> <i64 0, i64 1> to <4 x i32>)
unsigned Ratio = NumDstElt/NumSrcElt;
unsigned DstBitSize = DL.getTypeSizeInBits(DstEltTy);
// Loop over each source value, expanding into multiple results.
for (unsigned i = 0; i != NumSrcElt; ++i) {
auto *Element = C->getAggregateElement(i);
if (!Element) // Reject constantexpr elements.
return ConstantExpr::getBitCast(C, DestTy);
if (isa<UndefValue>(Element)) {
// Correctly Propagate undef values.
Result.append(Ratio, UndefValue::get(DstEltTy));
continue;
}
auto *Src = dyn_cast<ConstantInt>(Element);
if (!Src)
return ConstantExpr::getBitCast(C, DestTy);
unsigned ShiftAmt = isLittleEndian ? 0 : DstBitSize*(Ratio-1);
for (unsigned j = 0; j != Ratio; ++j) {
// Shift the piece of the value into the right place, depending on
// endianness.
Constant *Elt = ConstantExpr::getLShr(Src,
ConstantInt::get(Src->getType(), ShiftAmt));
ShiftAmt += isLittleEndian ? DstBitSize : -DstBitSize;
// Truncate the element to an integer with the same pointer size and
// convert the element back to a pointer using a inttoptr.
if (DstEltTy->isPointerTy()) {
IntegerType *DstIntTy = Type::getIntNTy(C->getContext(), DstBitSize);
Constant *CE = ConstantExpr::getTrunc(Elt, DstIntTy);
Result.push_back(ConstantExpr::getIntToPtr(CE, DstEltTy));
continue;
}
// Truncate and remember this piece.
Result.push_back(ConstantExpr::getTrunc(Elt, DstEltTy));
}
}
return ConstantVector::get(Result);
}
static
Constant *ConstantFoldCastOperand(unsigned Opcode, Constant *C,
Type *DestTy, const DataLayout &DL) {
assert(Instruction::isCast(Opcode));
switch (Opcode) {
default:
llvm_unreachable("Missing case");
case Instruction::PtrToInt:
// If the input is a inttoptr, eliminate the pair. This requires knowing
// the width of a pointer, so it can't be done in ConstantExpr::getCast.
if (auto *CE = dyn_cast<ConstantExpr>(C)) {
if (CE->getOpcode() == Instruction::IntToPtr) {
Constant *Input = CE->getOperand(0);
unsigned InWidth = Input->getType()->getScalarSizeInBits();
unsigned PtrWidth = DL.getPointerTypeSizeInBits(CE->getType());
if (PtrWidth < InWidth) {
Constant *Mask =
ConstantInt::get(CE->getContext(),
APInt::getLowBitsSet(InWidth, PtrWidth));
Input = ConstantExpr::getAnd(Input, Mask);
}
// Do a zext or trunc to get to the dest size.
return ConstantExpr::getIntegerCast(Input, DestTy, false);
}
}
return ConstantExpr::getCast(Opcode, C, DestTy);
case Instruction::IntToPtr:
// If the input is a ptrtoint, turn the pair into a ptr to ptr bitcast if
// the int size is >= the ptr size and the address spaces are the same.
// This requires knowing the width of a pointer, so it can't be done in
// ConstantExpr::getCast.
if (auto *CE = dyn_cast<ConstantExpr>(C)) {
if (CE->getOpcode() == Instruction::PtrToInt) {
Constant *SrcPtr = CE->getOperand(0);
unsigned SrcPtrSize = DL.getPointerTypeSizeInBits(SrcPtr->getType());
unsigned MidIntSize = CE->getType()->getScalarSizeInBits();
if (MidIntSize >= SrcPtrSize) {
unsigned SrcAS = SrcPtr->getType()->getPointerAddressSpace();
if (SrcAS == DestTy->getPointerAddressSpace())
return FoldBitCast(CE->getOperand(0), DestTy, DL);
}
}
}
return ConstantExpr::getCast(Opcode, C, DestTy);
case Instruction::Trunc:
case Instruction::ZExt:
case Instruction::SExt:
case Instruction::FPTrunc:
case Instruction::FPExt:
case Instruction::UIToFP:
case Instruction::SIToFP:
case Instruction::FPToUI:
case Instruction::FPToSI:
case Instruction::AddrSpaceCast:
return ConstantExpr::getCast(Opcode, C, DestTy);
case Instruction::BitCast:
return FoldBitCast(C, DestTy, DL);
}
}
static Value *SimplifyCastInst(unsigned CastOpc, Value *Op,
Type *Ty, const DataLayout &DL) {
if (auto *C = dyn_cast<Constant>(Op))
return ConstantFoldCastOperand(CastOpc, C, Ty, DL);
if (auto *CI = dyn_cast<CastInst>(Op)) {
auto *Src = CI->getOperand(0);
Type *SrcTy = Src->getType();
Type *MidTy = CI->getType();
Type *DstTy = Ty;
if (Src->getType() == Ty) {
auto FirstOp = static_cast<Instruction::CastOps>(CI->getOpcode());
auto SecondOp = static_cast<Instruction::CastOps>(CastOpc);
Type *SrcIntPtrTy =
SrcTy->isPtrOrPtrVectorTy() ? DL.getIntPtrType(SrcTy) : nullptr;
Type *MidIntPtrTy =
MidTy->isPtrOrPtrVectorTy() ? DL.getIntPtrType(MidTy) : nullptr;
Type *DstIntPtrTy =
DstTy->isPtrOrPtrVectorTy() ? DL.getIntPtrType(DstTy) : nullptr;
if (CastInst::isEliminableCastPair(FirstOp, SecondOp, SrcTy, MidTy, DstTy,
SrcIntPtrTy, MidIntPtrTy,
DstIntPtrTy) == Instruction::BitCast)
return Src;
}
}
// bitcast x -> x
if (CastOpc == Instruction::BitCast)
if (Op->getType() == Ty)
return Op;
return nullptr;
}
Value *llvm::SimplifyCastInst(unsigned CastOpc, Value *Op,
Type *Ty, const DataLayout &DL) {
return ::SimplifyCastInst(CastOpc, Op, Ty, DL);
}
// HLSL Change - End
/// SimplifyInstruction - See if we can compute a simplified version of this
/// instruction. If not, this returns null.
Value *llvm::SimplifyInstruction(Instruction *I, const DataLayout &DL,
const TargetLibraryInfo *TLI,
const DominatorTree *DT, AssumptionCache *AC) {
Value *Result;
switch (I->getOpcode()) {
default:
Result = ConstantFoldInstruction(I, DL, TLI);
break;
case Instruction::FAdd:
Result = SimplifyFAddInst(I->getOperand(0), I->getOperand(1),
I->getFastMathFlags(), DL, TLI, DT, AC, I);
break;
case Instruction::Add:
Result = SimplifyAddInst(I->getOperand(0), I->getOperand(1),
cast<BinaryOperator>(I)->hasNoSignedWrap(),
cast<BinaryOperator>(I)->hasNoUnsignedWrap(), DL,
TLI, DT, AC, I);
break;
case Instruction::FSub:
Result = SimplifyFSubInst(I->getOperand(0), I->getOperand(1),
I->getFastMathFlags(), DL, TLI, DT, AC, I);
break;
case Instruction::Sub:
Result = SimplifySubInst(I->getOperand(0), I->getOperand(1),
cast<BinaryOperator>(I)->hasNoSignedWrap(),
cast<BinaryOperator>(I)->hasNoUnsignedWrap(), DL,
TLI, DT, AC, I);
break;
case Instruction::FMul:
Result = SimplifyFMulInst(I->getOperand(0), I->getOperand(1),
I->getFastMathFlags(), DL, TLI, DT, AC, I);
break;
case Instruction::Mul:
Result =
SimplifyMulInst(I->getOperand(0), I->getOperand(1), DL, TLI, DT, AC, I);
break;
case Instruction::SDiv:
Result = SimplifySDivInst(I->getOperand(0), I->getOperand(1), DL, TLI, DT,
AC, I);
break;
case Instruction::UDiv:
Result = SimplifyUDivInst(I->getOperand(0), I->getOperand(1), DL, TLI, DT,
AC, I);
break;
case Instruction::FDiv:
Result = SimplifyFDivInst(I->getOperand(0), I->getOperand(1),
I->getFastMathFlags(), DL, TLI, DT, AC, I);
break;
case Instruction::SRem:
Result = SimplifySRemInst(I->getOperand(0), I->getOperand(1), DL, TLI, DT,
AC, I);
break;
case Instruction::URem:
Result = SimplifyURemInst(I->getOperand(0), I->getOperand(1), DL, TLI, DT,
AC, I);
break;
case Instruction::FRem:
Result = SimplifyFRemInst(I->getOperand(0), I->getOperand(1),
I->getFastMathFlags(), DL, TLI, DT, AC, I);
break;
case Instruction::Shl:
Result = SimplifyShlInst(I->getOperand(0), I->getOperand(1),
cast<BinaryOperator>(I)->hasNoSignedWrap(),
cast<BinaryOperator>(I)->hasNoUnsignedWrap(), DL,
TLI, DT, AC, I);
break;
case Instruction::LShr:
Result = SimplifyLShrInst(I->getOperand(0), I->getOperand(1),
cast<BinaryOperator>(I)->isExact(), DL, TLI, DT,
AC, I);
break;
case Instruction::AShr:
Result = SimplifyAShrInst(I->getOperand(0), I->getOperand(1),
cast<BinaryOperator>(I)->isExact(), DL, TLI, DT,
AC, I);
break;
case Instruction::And:
Result =
SimplifyAndInst(I->getOperand(0), I->getOperand(1), DL, TLI, DT, AC, I);
break;
case Instruction::Or:
Result =
SimplifyOrInst(I->getOperand(0), I->getOperand(1), DL, TLI, DT, AC, I);
break;
case Instruction::Xor:
Result =
SimplifyXorInst(I->getOperand(0), I->getOperand(1), DL, TLI, DT, AC, I);
break;
case Instruction::ICmp:
Result =
SimplifyICmpInst(cast<ICmpInst>(I)->getPredicate(), I->getOperand(0),
I->getOperand(1), DL, TLI, DT, AC, I);
break;
case Instruction::FCmp:
Result = SimplifyFCmpInst(cast<FCmpInst>(I)->getPredicate(),
I->getOperand(0), I->getOperand(1),
I->getFastMathFlags(), DL, TLI, DT, AC, I);
break;
case Instruction::Select:
Result = SimplifySelectInst(I->getOperand(0), I->getOperand(1),
I->getOperand(2), DL, TLI, DT, AC, I);
break;
case Instruction::GetElementPtr: {
SmallVector<Value*, 8> Ops(I->op_begin(), I->op_end());
Result = SimplifyGEPInst(Ops, DL, TLI, DT, AC, I);
break;
}
case Instruction::InsertValue: {
InsertValueInst *IV = cast<InsertValueInst>(I);
Result = SimplifyInsertValueInst(IV->getAggregateOperand(),
IV->getInsertedValueOperand(),
IV->getIndices(), DL, TLI, DT, AC, I);
break;
}
case Instruction::ExtractValue: {
auto *EVI = cast<ExtractValueInst>(I);
Result = SimplifyExtractValueInst(EVI->getAggregateOperand(),
EVI->getIndices(), DL, TLI, DT, AC, I);
break;
}
case Instruction::ExtractElement: {
auto *EEI = cast<ExtractElementInst>(I);
Result = SimplifyExtractElementInst(
EEI->getVectorOperand(), EEI->getIndexOperand(), DL, TLI, DT, AC, I);
break;
}
case Instruction::PHI:
Result = SimplifyPHINode(cast<PHINode>(I), Query(DL, TLI, DT, AC, I));
break;
case Instruction::Call: {
CallSite CS(cast<CallInst>(I));
// HLSL Change Begin - simplify dxil calls.
if (Function *Callee = CS.getCalledFunction()) {
if (hlsl::CanSimplify(Callee)) {
SmallVector<Value *, 4> Args(CS.arg_begin(), CS.arg_end());
if (Value *DxilResult = hlsl::SimplifyDxilCall(CS.getCalledFunction(), Args, I, /* MayInsert */ true)) {
Result = DxilResult;
break;
}
}
}
// HLSL Change End.
Result = SimplifyCall(CS.getCalledValue(), CS.arg_begin(), CS.arg_end(), DL,
TLI, DT, AC, I);
break;
}
case Instruction::Trunc:
Result =
SimplifyTruncInst(I->getOperand(0), I->getType(), DL, TLI, DT, AC, I);
break;
}
/// If called on unreachable code, the above logic may report that the
/// instruction simplified to itself. Make life easier for users by
/// detecting that case here, returning a safe value instead.
return Result == I ? UndefValue::get(I->getType()) : Result;
}
/// \brief Implementation of recursive simplification through an instructions
/// uses.
///
/// This is the common implementation of the recursive simplification routines.
/// If we have a pre-simplified value in 'SimpleV', that is forcibly used to
/// replace the instruction 'I'. Otherwise, we simply add 'I' to the list of
/// instructions to process and attempt to simplify it using
/// InstructionSimplify.
///
/// This routine returns 'true' only when *it* simplifies something. The passed
/// in simplified value does not count toward this.
static bool replaceAndRecursivelySimplifyImpl(Instruction *I, Value *SimpleV,
const TargetLibraryInfo *TLI,
const DominatorTree *DT,
AssumptionCache *AC) {
bool Simplified = false;
SmallSetVector<Instruction *, 8> Worklist;
const DataLayout &DL = I->getModule()->getDataLayout();
// If we have an explicit value to collapse to, do that round of the
// simplification loop by hand initially.
if (SimpleV) {
for (User *U : I->users())
if (U != I)
Worklist.insert(cast<Instruction>(U));
// Replace the instruction with its simplified value.
I->replaceAllUsesWith(SimpleV);
// Gracefully handle edge cases where the instruction is not wired into any
// parent block.
if (I->getParent())
I->eraseFromParent();
} else {
Worklist.insert(I);
}
// Note that we must test the size on each iteration, the worklist can grow.
for (unsigned Idx = 0; Idx != Worklist.size(); ++Idx) {
I = Worklist[Idx];
// See if this instruction simplifies.
SimpleV = SimplifyInstruction(I, DL, TLI, DT, AC);
if (!SimpleV)
continue;
Simplified = true;
// Stash away all the uses of the old instruction so we can check them for
// recursive simplifications after a RAUW. This is cheaper than checking all
// uses of To on the recursive step in most cases.
for (User *U : I->users())
Worklist.insert(cast<Instruction>(U));
// Replace the instruction with its simplified value.
I->replaceAllUsesWith(SimpleV);
// Gracefully handle edge cases where the instruction is not wired into any
// parent block.
if (I->getParent())
I->eraseFromParent();
}
return Simplified;
}
bool llvm::recursivelySimplifyInstruction(Instruction *I,
const TargetLibraryInfo *TLI,
const DominatorTree *DT,
AssumptionCache *AC) {
return replaceAndRecursivelySimplifyImpl(I, nullptr, TLI, DT, AC);
}
bool llvm::replaceAndRecursivelySimplify(Instruction *I, Value *SimpleV,
const TargetLibraryInfo *TLI,
const DominatorTree *DT,
AssumptionCache *AC) {
assert(I != SimpleV && "replaceAndRecursivelySimplify(X,X) is not valid!");
assert(SimpleV && "Must provide a simplified value.");
return replaceAndRecursivelySimplifyImpl(I, SimpleV, TLI, DT, AC);
}
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/Analysis/DomPrinter.cpp | //===- DomPrinter.cpp - DOT printer for the dominance trees ------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines '-dot-dom' and '-dot-postdom' analysis passes, which emit
// a dom.<fnname>.dot or postdom.<fnname>.dot file for each function in the
// program, with a graph of the dominance/postdominance tree of that
// function.
//
// There are also passes available to directly call dotty ('-view-dom' or
// '-view-postdom'). By appending '-only' like '-dot-dom-only' only the
// names of the bbs are printed, but the content is hidden.
//
//===----------------------------------------------------------------------===//
#include "llvm/Analysis/DomPrinter.h"
#include "llvm/Analysis/DOTGraphTraitsPass.h"
#include "llvm/Analysis/PostDominators.h"
using namespace llvm;
namespace llvm {
template<>
struct DOTGraphTraits<DomTreeNode*> : public DefaultDOTGraphTraits {
DOTGraphTraits (bool isSimple=false)
: DefaultDOTGraphTraits(isSimple) {}
std::string getNodeLabel(DomTreeNode *Node, DomTreeNode *Graph) {
BasicBlock *BB = Node->getBlock();
if (!BB)
return "Post dominance root node";
if (isSimple())
return DOTGraphTraits<const Function*>
::getSimpleNodeLabel(BB, BB->getParent());
else
return DOTGraphTraits<const Function*>
::getCompleteNodeLabel(BB, BB->getParent());
}
};
template<>
struct DOTGraphTraits<DominatorTree*> : public DOTGraphTraits<DomTreeNode*> {
DOTGraphTraits (bool isSimple=false)
: DOTGraphTraits<DomTreeNode*>(isSimple) {}
static std::string getGraphName(DominatorTree *DT) {
return "Dominator tree";
}
std::string getNodeLabel(DomTreeNode *Node, DominatorTree *G) {
return DOTGraphTraits<DomTreeNode*>::getNodeLabel(Node, G->getRootNode());
}
};
template<>
struct DOTGraphTraits<PostDominatorTree*>
: public DOTGraphTraits<DomTreeNode*> {
DOTGraphTraits (bool isSimple=false)
: DOTGraphTraits<DomTreeNode*>(isSimple) {}
static std::string getGraphName(PostDominatorTree *DT) {
return "Post dominator tree";
}
std::string getNodeLabel(DomTreeNode *Node, PostDominatorTree *G ) {
return DOTGraphTraits<DomTreeNode*>::getNodeLabel(Node, G->getRootNode());
}
};
}
namespace {
struct DominatorTreeWrapperPassAnalysisGraphTraits {
static DominatorTree *getGraph(DominatorTreeWrapperPass *DTWP) {
return &DTWP->getDomTree();
}
};
struct DomViewer : public DOTGraphTraitsViewer<
DominatorTreeWrapperPass, false, DominatorTree *,
DominatorTreeWrapperPassAnalysisGraphTraits> {
static char ID;
DomViewer()
: DOTGraphTraitsViewer<DominatorTreeWrapperPass, false, DominatorTree *,
DominatorTreeWrapperPassAnalysisGraphTraits>(
"dom", ID) {
initializeDomViewerPass(*PassRegistry::getPassRegistry());
}
};
struct DomOnlyViewer : public DOTGraphTraitsViewer<
DominatorTreeWrapperPass, true, DominatorTree *,
DominatorTreeWrapperPassAnalysisGraphTraits> {
static char ID;
DomOnlyViewer()
: DOTGraphTraitsViewer<DominatorTreeWrapperPass, true, DominatorTree *,
DominatorTreeWrapperPassAnalysisGraphTraits>(
"domonly", ID) {
initializeDomOnlyViewerPass(*PassRegistry::getPassRegistry());
}
};
struct PostDomViewer
: public DOTGraphTraitsViewer<PostDominatorTree, false> {
static char ID;
PostDomViewer() :
DOTGraphTraitsViewer<PostDominatorTree, false>("postdom", ID){
initializePostDomViewerPass(*PassRegistry::getPassRegistry());
}
};
struct PostDomOnlyViewer
: public DOTGraphTraitsViewer<PostDominatorTree, true> {
static char ID;
PostDomOnlyViewer() :
DOTGraphTraitsViewer<PostDominatorTree, true>("postdomonly", ID){
initializePostDomOnlyViewerPass(*PassRegistry::getPassRegistry());
}
};
} // end anonymous namespace
char DomViewer::ID = 0;
INITIALIZE_PASS(DomViewer, "view-dom",
"View dominance tree of function", false, false)
char DomOnlyViewer::ID = 0;
INITIALIZE_PASS(DomOnlyViewer, "view-dom-only",
"View dominance tree of function (with no function bodies)",
false, false)
char PostDomViewer::ID = 0;
INITIALIZE_PASS(PostDomViewer, "view-postdom",
"View postdominance tree of function", false, false)
char PostDomOnlyViewer::ID = 0;
INITIALIZE_PASS(PostDomOnlyViewer, "view-postdom-only",
"View postdominance tree of function "
"(with no function bodies)",
false, false)
namespace {
struct DomPrinter : public DOTGraphTraitsPrinter<
DominatorTreeWrapperPass, false, DominatorTree *,
DominatorTreeWrapperPassAnalysisGraphTraits> {
static char ID;
DomPrinter()
: DOTGraphTraitsPrinter<DominatorTreeWrapperPass, false, DominatorTree *,
DominatorTreeWrapperPassAnalysisGraphTraits>(
"dom", ID) {
initializeDomPrinterPass(*PassRegistry::getPassRegistry());
}
};
struct DomOnlyPrinter : public DOTGraphTraitsPrinter<
DominatorTreeWrapperPass, true, DominatorTree *,
DominatorTreeWrapperPassAnalysisGraphTraits> {
static char ID;
DomOnlyPrinter()
: DOTGraphTraitsPrinter<DominatorTreeWrapperPass, true, DominatorTree *,
DominatorTreeWrapperPassAnalysisGraphTraits>(
"domonly", ID) {
initializeDomOnlyPrinterPass(*PassRegistry::getPassRegistry());
}
};
struct PostDomPrinter
: public DOTGraphTraitsPrinter<PostDominatorTree, false> {
static char ID;
PostDomPrinter() :
DOTGraphTraitsPrinter<PostDominatorTree, false>("postdom", ID) {
initializePostDomPrinterPass(*PassRegistry::getPassRegistry());
}
};
struct PostDomOnlyPrinter
: public DOTGraphTraitsPrinter<PostDominatorTree, true> {
static char ID;
PostDomOnlyPrinter() :
DOTGraphTraitsPrinter<PostDominatorTree, true>("postdomonly", ID) {
initializePostDomOnlyPrinterPass(*PassRegistry::getPassRegistry());
}
};
} // end anonymous namespace
char DomPrinter::ID = 0;
INITIALIZE_PASS(DomPrinter, "dot-dom",
"Print dominance tree of function to 'dot' file",
false, false)
char DomOnlyPrinter::ID = 0;
INITIALIZE_PASS(DomOnlyPrinter, "dot-dom-only",
"Print dominance tree of function to 'dot' file "
"(with no function bodies)",
false, false)
char PostDomPrinter::ID = 0;
INITIALIZE_PASS(PostDomPrinter, "dot-postdom",
"Print postdominance tree of function to 'dot' file",
false, false)
char PostDomOnlyPrinter::ID = 0;
INITIALIZE_PASS(PostDomOnlyPrinter, "dot-postdom-only",
"Print postdominance tree of function to 'dot' file "
"(with no function bodies)",
false, false)
// Create methods available outside of this file, to use them
// "include/llvm/LinkAllPasses.h". Otherwise the pass would be deleted by
// the link time optimization.
FunctionPass *llvm::createDomPrinterPass() {
return new DomPrinter();
}
FunctionPass *llvm::createDomOnlyPrinterPass() {
return new DomOnlyPrinter();
}
FunctionPass *llvm::createDomViewerPass() {
return new DomViewer();
}
FunctionPass *llvm::createDomOnlyViewerPass() {
return new DomOnlyViewer();
}
FunctionPass *llvm::createPostDomPrinterPass() {
return new PostDomPrinter();
}
FunctionPass *llvm::createPostDomOnlyPrinterPass() {
return new PostDomOnlyPrinter();
}
FunctionPass *llvm::createPostDomViewerPass() {
return new PostDomViewer();
}
FunctionPass *llvm::createPostDomOnlyViewerPass() {
return new PostDomOnlyViewer();
}
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/Analysis/IntervalPartition.cpp | //===- IntervalPartition.cpp - Interval Partition module code -------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file contains the definition of the IntervalPartition class, which
// calculates and represent the interval partition of a function.
//
//===----------------------------------------------------------------------===//
#include "llvm/Analysis/IntervalIterator.h"
using namespace llvm;
char IntervalPartition::ID = 0;
INITIALIZE_PASS(IntervalPartition, "intervals",
"Interval Partition Construction", true, true)
//===----------------------------------------------------------------------===//
// IntervalPartition Implementation
//===----------------------------------------------------------------------===//
// releaseMemory - Reset state back to before function was analyzed
void IntervalPartition::releaseMemory() {
for (unsigned i = 0, e = Intervals.size(); i != e; ++i)
delete Intervals[i];
IntervalMap.clear();
Intervals.clear();
RootInterval = nullptr;
}
void IntervalPartition::print(raw_ostream &O, const Module*) const {
for(unsigned i = 0, e = Intervals.size(); i != e; ++i)
Intervals[i]->print(O);
}
// addIntervalToPartition - Add an interval to the internal list of intervals,
// and then add mappings from all of the basic blocks in the interval to the
// interval itself (in the IntervalMap).
//
void IntervalPartition::addIntervalToPartition(Interval *I) {
Intervals.push_back(I);
// Add mappings for all of the basic blocks in I to the IntervalPartition
for (Interval::node_iterator It = I->Nodes.begin(), End = I->Nodes.end();
It != End; ++It)
IntervalMap.insert(std::make_pair(*It, I));
}
// updatePredecessors - Interval generation only sets the successor fields of
// the interval data structures. After interval generation is complete,
// run through all of the intervals and propagate successor info as
// predecessor info.
//
void IntervalPartition::updatePredecessors(Interval *Int) {
BasicBlock *Header = Int->getHeaderNode();
for (Interval::succ_iterator I = Int->Successors.begin(),
E = Int->Successors.end(); I != E; ++I)
getBlockInterval(*I)->Predecessors.push_back(Header);
}
// IntervalPartition ctor - Build the first level interval partition for the
// specified function...
//
bool IntervalPartition::runOnFunction(Function &F) {
// Pass false to intervals_begin because we take ownership of it's memory
function_interval_iterator I = intervals_begin(&F, false);
assert(I != intervals_end(&F) && "No intervals in function!?!?!");
addIntervalToPartition(RootInterval = *I);
++I; // After the first one...
// Add the rest of the intervals to the partition.
for (function_interval_iterator E = intervals_end(&F); I != E; ++I)
addIntervalToPartition(*I);
// Now that we know all of the successor information, propagate this to the
// predecessors for each block.
for (unsigned i = 0, e = Intervals.size(); i != e; ++i)
updatePredecessors(Intervals[i]);
return false;
}
// IntervalPartition ctor - Build a reduced interval partition from an
// existing interval graph. This takes an additional boolean parameter to
// distinguish it from a copy constructor. Always pass in false for now.
//
IntervalPartition::IntervalPartition(IntervalPartition &IP, bool)
: FunctionPass(ID) {
assert(IP.getRootInterval() && "Cannot operate on empty IntervalPartitions!");
// Pass false to intervals_begin because we take ownership of it's memory
interval_part_interval_iterator I = intervals_begin(IP, false);
assert(I != intervals_end(IP) && "No intervals in interval partition!?!?!");
addIntervalToPartition(RootInterval = *I);
++I; // After the first one...
// Add the rest of the intervals to the partition.
for (interval_part_interval_iterator E = intervals_end(IP); I != E; ++I)
addIntervalToPartition(*I);
// Now that we know all of the successor information, propagate this to the
// predecessors for each block.
for (unsigned i = 0, e = Intervals.size(); i != e; ++i)
updatePredecessors(Intervals[i]);
}
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/Analysis/TargetTransformInfo.cpp | //===- llvm/Analysis/TargetTransformInfo.cpp ------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "llvm/Analysis/TargetTransformInfo.h"
#include "llvm/Analysis/TargetTransformInfoImpl.h"
#include "llvm/IR/CallSite.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/Instruction.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/Operator.h"
#include "llvm/Support/ErrorHandling.h"
using namespace llvm;
#define DEBUG_TYPE "tti"
namespace {
/// \brief No-op implementation of the TTI interface using the utility base
/// classes.
///
/// This is used when no target specific information is available.
struct NoTTIImpl : TargetTransformInfoImplCRTPBase<NoTTIImpl> {
explicit NoTTIImpl(const DataLayout &DL)
: TargetTransformInfoImplCRTPBase<NoTTIImpl>(DL) {}
};
}
TargetTransformInfo::TargetTransformInfo(const DataLayout &DL)
: TTIImpl(new Model<NoTTIImpl>(NoTTIImpl(DL))) {}
TargetTransformInfo::~TargetTransformInfo() {}
TargetTransformInfo::TargetTransformInfo(TargetTransformInfo &&Arg)
: TTIImpl(std::move(Arg.TTIImpl)) {}
TargetTransformInfo &TargetTransformInfo::operator=(TargetTransformInfo &&RHS) {
TTIImpl = std::move(RHS.TTIImpl);
return *this;
}
unsigned TargetTransformInfo::getOperationCost(unsigned Opcode, Type *Ty,
Type *OpTy) const {
return TTIImpl->getOperationCost(Opcode, Ty, OpTy);
}
unsigned TargetTransformInfo::getCallCost(FunctionType *FTy,
int NumArgs) const {
return TTIImpl->getCallCost(FTy, NumArgs);
}
unsigned
TargetTransformInfo::getCallCost(const Function *F,
ArrayRef<const Value *> Arguments) const {
return TTIImpl->getCallCost(F, Arguments);
}
unsigned
TargetTransformInfo::getIntrinsicCost(Intrinsic::ID IID, Type *RetTy,
ArrayRef<const Value *> Arguments) const {
return TTIImpl->getIntrinsicCost(IID, RetTy, Arguments);
}
unsigned TargetTransformInfo::getUserCost(const User *U) const {
return TTIImpl->getUserCost(U);
}
bool TargetTransformInfo::hasBranchDivergence() const {
return TTIImpl->hasBranchDivergence();
}
bool TargetTransformInfo::isSourceOfDivergence(const Value *V) const {
return TTIImpl->isSourceOfDivergence(V);
}
bool TargetTransformInfo::isLoweredToCall(const Function *F) const {
return TTIImpl->isLoweredToCall(F);
}
void TargetTransformInfo::getUnrollingPreferences(
Loop *L, UnrollingPreferences &UP) const {
return TTIImpl->getUnrollingPreferences(L, UP);
}
bool TargetTransformInfo::isLegalAddImmediate(int64_t Imm) const {
return TTIImpl->isLegalAddImmediate(Imm);
}
bool TargetTransformInfo::isLegalICmpImmediate(int64_t Imm) const {
return TTIImpl->isLegalICmpImmediate(Imm);
}
bool TargetTransformInfo::isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV,
int64_t BaseOffset,
bool HasBaseReg,
int64_t Scale,
unsigned AddrSpace) const {
return TTIImpl->isLegalAddressingMode(Ty, BaseGV, BaseOffset, HasBaseReg,
Scale, AddrSpace);
}
bool TargetTransformInfo::isLegalMaskedStore(Type *DataType,
int Consecutive) const {
return TTIImpl->isLegalMaskedStore(DataType, Consecutive);
}
bool TargetTransformInfo::isLegalMaskedLoad(Type *DataType,
int Consecutive) const {
return TTIImpl->isLegalMaskedLoad(DataType, Consecutive);
}
int TargetTransformInfo::getScalingFactorCost(Type *Ty, GlobalValue *BaseGV,
int64_t BaseOffset,
bool HasBaseReg,
int64_t Scale,
unsigned AddrSpace) const {
return TTIImpl->getScalingFactorCost(Ty, BaseGV, BaseOffset, HasBaseReg,
Scale, AddrSpace);
}
bool TargetTransformInfo::isTruncateFree(Type *Ty1, Type *Ty2) const {
return TTIImpl->isTruncateFree(Ty1, Ty2);
}
bool TargetTransformInfo::isProfitableToHoist(Instruction *I) const {
return TTIImpl->isProfitableToHoist(I);
}
bool TargetTransformInfo::isTypeLegal(Type *Ty) const {
return TTIImpl->isTypeLegal(Ty);
}
unsigned TargetTransformInfo::getJumpBufAlignment() const {
return TTIImpl->getJumpBufAlignment();
}
unsigned TargetTransformInfo::getJumpBufSize() const {
return TTIImpl->getJumpBufSize();
}
bool TargetTransformInfo::shouldBuildLookupTables() const {
return TTIImpl->shouldBuildLookupTables();
}
bool TargetTransformInfo::enableAggressiveInterleaving(bool LoopHasReductions) const {
return TTIImpl->enableAggressiveInterleaving(LoopHasReductions);
}
TargetTransformInfo::PopcntSupportKind
TargetTransformInfo::getPopcntSupport(unsigned IntTyWidthInBit) const {
return TTIImpl->getPopcntSupport(IntTyWidthInBit);
}
bool TargetTransformInfo::haveFastSqrt(Type *Ty) const {
return TTIImpl->haveFastSqrt(Ty);
}
unsigned TargetTransformInfo::getFPOpCost(Type *Ty) const {
return TTIImpl->getFPOpCost(Ty);
}
unsigned TargetTransformInfo::getIntImmCost(const APInt &Imm, Type *Ty) const {
return TTIImpl->getIntImmCost(Imm, Ty);
}
unsigned TargetTransformInfo::getIntImmCost(unsigned Opcode, unsigned Idx,
const APInt &Imm, Type *Ty) const {
return TTIImpl->getIntImmCost(Opcode, Idx, Imm, Ty);
}
unsigned TargetTransformInfo::getIntImmCost(Intrinsic::ID IID, unsigned Idx,
const APInt &Imm, Type *Ty) const {
return TTIImpl->getIntImmCost(IID, Idx, Imm, Ty);
}
unsigned TargetTransformInfo::getNumberOfRegisters(bool Vector) const {
return TTIImpl->getNumberOfRegisters(Vector);
}
unsigned TargetTransformInfo::getRegisterBitWidth(bool Vector) const {
return TTIImpl->getRegisterBitWidth(Vector);
}
unsigned TargetTransformInfo::getMaxInterleaveFactor(unsigned VF) const {
return TTIImpl->getMaxInterleaveFactor(VF);
}
unsigned TargetTransformInfo::getArithmeticInstrCost(
unsigned Opcode, Type *Ty, OperandValueKind Opd1Info,
OperandValueKind Opd2Info, OperandValueProperties Opd1PropInfo,
OperandValueProperties Opd2PropInfo) const {
return TTIImpl->getArithmeticInstrCost(Opcode, Ty, Opd1Info, Opd2Info,
Opd1PropInfo, Opd2PropInfo);
}
unsigned TargetTransformInfo::getShuffleCost(ShuffleKind Kind, Type *Ty,
int Index, Type *SubTp) const {
return TTIImpl->getShuffleCost(Kind, Ty, Index, SubTp);
}
unsigned TargetTransformInfo::getCastInstrCost(unsigned Opcode, Type *Dst,
Type *Src) const {
return TTIImpl->getCastInstrCost(Opcode, Dst, Src);
}
unsigned TargetTransformInfo::getCFInstrCost(unsigned Opcode) const {
return TTIImpl->getCFInstrCost(Opcode);
}
unsigned TargetTransformInfo::getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
Type *CondTy) const {
return TTIImpl->getCmpSelInstrCost(Opcode, ValTy, CondTy);
}
unsigned TargetTransformInfo::getVectorInstrCost(unsigned Opcode, Type *Val,
unsigned Index) const {
return TTIImpl->getVectorInstrCost(Opcode, Val, Index);
}
unsigned TargetTransformInfo::getMemoryOpCost(unsigned Opcode, Type *Src,
unsigned Alignment,
unsigned AddressSpace) const {
return TTIImpl->getMemoryOpCost(Opcode, Src, Alignment, AddressSpace);
}
unsigned
TargetTransformInfo::getMaskedMemoryOpCost(unsigned Opcode, Type *Src,
unsigned Alignment,
unsigned AddressSpace) const {
return TTIImpl->getMaskedMemoryOpCost(Opcode, Src, Alignment, AddressSpace);
}
unsigned TargetTransformInfo::getInterleavedMemoryOpCost(
unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices,
unsigned Alignment, unsigned AddressSpace) const {
return TTIImpl->getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
Alignment, AddressSpace);
}
unsigned
TargetTransformInfo::getIntrinsicInstrCost(Intrinsic::ID ID, Type *RetTy,
ArrayRef<Type *> Tys) const {
return TTIImpl->getIntrinsicInstrCost(ID, RetTy, Tys);
}
unsigned TargetTransformInfo::getCallInstrCost(Function *F, Type *RetTy,
ArrayRef<Type *> Tys) const {
return TTIImpl->getCallInstrCost(F, RetTy, Tys);
}
unsigned TargetTransformInfo::getNumberOfParts(Type *Tp) const {
return TTIImpl->getNumberOfParts(Tp);
}
unsigned TargetTransformInfo::getAddressComputationCost(Type *Tp,
bool IsComplex) const {
return TTIImpl->getAddressComputationCost(Tp, IsComplex);
}
unsigned TargetTransformInfo::getReductionCost(unsigned Opcode, Type *Ty,
bool IsPairwiseForm) const {
return TTIImpl->getReductionCost(Opcode, Ty, IsPairwiseForm);
}
unsigned
TargetTransformInfo::getCostOfKeepingLiveOverCall(ArrayRef<Type *> Tys) const {
return TTIImpl->getCostOfKeepingLiveOverCall(Tys);
}
bool TargetTransformInfo::getTgtMemIntrinsic(IntrinsicInst *Inst,
MemIntrinsicInfo &Info) const {
return TTIImpl->getTgtMemIntrinsic(Inst, Info);
}
Value *TargetTransformInfo::getOrCreateResultFromMemIntrinsic(
IntrinsicInst *Inst, Type *ExpectedType) const {
return TTIImpl->getOrCreateResultFromMemIntrinsic(Inst, ExpectedType);
}
bool TargetTransformInfo::hasCompatibleFunctionAttributes(
const Function *Caller, const Function *Callee) const {
return TTIImpl->hasCompatibleFunctionAttributes(Caller, Callee);
}
TargetTransformInfo::Concept::~Concept() {}
TargetIRAnalysis::TargetIRAnalysis() : TTICallback(&getDefaultTTI) {}
TargetIRAnalysis::TargetIRAnalysis(
std::function<Result(Function &)> TTICallback)
: TTICallback(TTICallback) {}
TargetIRAnalysis::Result TargetIRAnalysis::run(Function &F) {
return TTICallback(F);
}
char TargetIRAnalysis::PassID;
TargetIRAnalysis::Result TargetIRAnalysis::getDefaultTTI(Function &F) {
return Result(F.getParent()->getDataLayout());
}
// Register the basic pass.
INITIALIZE_PASS(TargetTransformInfoWrapperPass, "tti",
"Target Transform Information", false, true)
char TargetTransformInfoWrapperPass::ID = 0;
void TargetTransformInfoWrapperPass::anchor() {}
TargetTransformInfoWrapperPass::TargetTransformInfoWrapperPass()
: ImmutablePass(ID) {
initializeTargetTransformInfoWrapperPassPass(
*PassRegistry::getPassRegistry());
}
TargetTransformInfoWrapperPass::TargetTransformInfoWrapperPass(
TargetIRAnalysis TIRA)
: ImmutablePass(ID), TIRA(std::move(TIRA)) {
initializeTargetTransformInfoWrapperPassPass(
*PassRegistry::getPassRegistry());
}
TargetTransformInfo &TargetTransformInfoWrapperPass::getTTI(Function &F) {
TTI = TIRA.run(F);
return *TTI;
}
ImmutablePass *
llvm::createTargetTransformInfoWrapperPass(TargetIRAnalysis TIRA) {
return new TargetTransformInfoWrapperPass(std::move(TIRA));
}
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/Analysis/ScopedNoAliasAA.cpp | //===- ScopedNoAliasAA.cpp - Scoped No-Alias Alias Analysis ---------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines the ScopedNoAlias alias-analysis pass, which implements
// metadata-based scoped no-alias support.
//
// Alias-analysis scopes are defined by an id (which can be a string or some
// other metadata node), a domain node, and an optional descriptive string.
// A domain is defined by an id (which can be a string or some other metadata
// node), and an optional descriptive string.
//
// !dom0 = metadata !{ metadata !"domain of foo()" }
// !scope1 = metadata !{ metadata !scope1, metadata !dom0, metadata !"scope 1" }
// !scope2 = metadata !{ metadata !scope2, metadata !dom0, metadata !"scope 2" }
//
// Loads and stores can be tagged with an alias-analysis scope, and also, with
// a noalias tag for a specific scope:
//
// ... = load %ptr1, !alias.scope !{ !scope1 }
// ... = load %ptr2, !alias.scope !{ !scope1, !scope2 }, !noalias !{ !scope1 }
//
// When evaluating an aliasing query, if one of the instructions is associated
// has a set of noalias scopes in some domain that is superset of the alias
// scopes in that domain of some other instruction, then the two memory
// accesses are assumed not to alias.
//
//===----------------------------------------------------------------------===//
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/Analysis/Passes.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Metadata.h"
#include "llvm/IR/Module.h"
#include "llvm/Pass.h"
#include "llvm/Support/CommandLine.h"
using namespace llvm;
// A handy option for disabling scoped no-alias functionality. The same effect
// can also be achieved by stripping the associated metadata tags from IR, but
// this option is sometimes more convenient.
#if 0 // HLSL Change Starts - option pending
static cl::opt<bool>
EnableScopedNoAlias("enable-scoped-noalias", cl::init(true));
#else
static const bool EnableScopedNoAlias = true;
#endif // HLSL Change Ends
namespace {
/// AliasScopeNode - This is a simple wrapper around an MDNode which provides
/// a higher-level interface by hiding the details of how alias analysis
/// information is encoded in its operands.
class AliasScopeNode {
const MDNode *Node;
public:
AliasScopeNode() : Node(0) {}
explicit AliasScopeNode(const MDNode *N) : Node(N) {}
/// getNode - Get the MDNode for this AliasScopeNode.
const MDNode *getNode() const { return Node; }
/// getDomain - Get the MDNode for this AliasScopeNode's domain.
const MDNode *getDomain() const {
if (Node->getNumOperands() < 2)
return nullptr;
return dyn_cast_or_null<MDNode>(Node->getOperand(1));
}
};
/// ScopedNoAliasAA - This is a simple alias analysis
/// implementation that uses scoped-noalias metadata to answer queries.
class ScopedNoAliasAA : public ImmutablePass, public AliasAnalysis {
public:
static char ID; // Class identification, replacement for typeinfo
ScopedNoAliasAA() : ImmutablePass(ID) {
initializeScopedNoAliasAAPass(*PassRegistry::getPassRegistry());
}
bool doInitialization(Module &M) override;
/// getAdjustedAnalysisPointer - This method is used when a pass implements
/// an analysis interface through multiple inheritance. If needed, it
/// should override this to adjust the this pointer as needed for the
/// specified pass info.
void *getAdjustedAnalysisPointer(const void *PI) override {
if (PI == &AliasAnalysis::ID)
return (AliasAnalysis*)this;
return this;
}
protected:
bool mayAliasInScopes(const MDNode *Scopes, const MDNode *NoAlias) const;
void collectMDInDomain(const MDNode *List, const MDNode *Domain,
SmallPtrSetImpl<const MDNode *> &Nodes) const;
private:
void getAnalysisUsage(AnalysisUsage &AU) const override;
AliasResult alias(const MemoryLocation &LocA,
const MemoryLocation &LocB) override;
bool pointsToConstantMemory(const MemoryLocation &Loc, bool OrLocal) override;
ModRefBehavior getModRefBehavior(ImmutableCallSite CS) override;
ModRefBehavior getModRefBehavior(const Function *F) override;
ModRefResult getModRefInfo(ImmutableCallSite CS,
const MemoryLocation &Loc) override;
ModRefResult getModRefInfo(ImmutableCallSite CS1,
ImmutableCallSite CS2) override;
};
} // End of anonymous namespace
// Register this pass...
char ScopedNoAliasAA::ID = 0;
INITIALIZE_AG_PASS(ScopedNoAliasAA, AliasAnalysis, "scoped-noalias",
"Scoped NoAlias Alias Analysis", false, true, false)
ImmutablePass *llvm::createScopedNoAliasAAPass() {
return new ScopedNoAliasAA();
}
bool ScopedNoAliasAA::doInitialization(Module &M) {
InitializeAliasAnalysis(this, &M.getDataLayout());
return true;
}
void
ScopedNoAliasAA::getAnalysisUsage(AnalysisUsage &AU) const {
AU.setPreservesAll();
AliasAnalysis::getAnalysisUsage(AU);
}
void
ScopedNoAliasAA::collectMDInDomain(const MDNode *List, const MDNode *Domain,
SmallPtrSetImpl<const MDNode *> &Nodes) const {
for (unsigned i = 0, ie = List->getNumOperands(); i != ie; ++i)
if (const MDNode *MD = dyn_cast<MDNode>(List->getOperand(i)))
if (AliasScopeNode(MD).getDomain() == Domain)
Nodes.insert(MD);
}
bool
ScopedNoAliasAA::mayAliasInScopes(const MDNode *Scopes,
const MDNode *NoAlias) const {
if (!Scopes || !NoAlias)
return true;
// Collect the set of scope domains relevant to the noalias scopes.
SmallPtrSet<const MDNode *, 16> Domains;
for (unsigned i = 0, ie = NoAlias->getNumOperands(); i != ie; ++i)
if (const MDNode *NAMD = dyn_cast<MDNode>(NoAlias->getOperand(i)))
if (const MDNode *Domain = AliasScopeNode(NAMD).getDomain())
Domains.insert(Domain);
// We alias unless, for some domain, the set of noalias scopes in that domain
// is a superset of the set of alias scopes in that domain.
for (const MDNode *Domain : Domains) {
SmallPtrSet<const MDNode *, 16> NANodes, ScopeNodes;
collectMDInDomain(NoAlias, Domain, NANodes);
collectMDInDomain(Scopes, Domain, ScopeNodes);
if (!ScopeNodes.size())
continue;
// To not alias, all of the nodes in ScopeNodes must be in NANodes.
bool FoundAll = true;
for (const MDNode *SMD : ScopeNodes)
if (!NANodes.count(SMD)) {
FoundAll = false;
break;
}
if (FoundAll)
return false;
}
return true;
}
AliasResult ScopedNoAliasAA::alias(const MemoryLocation &LocA,
const MemoryLocation &LocB) {
if (!EnableScopedNoAlias)
return AliasAnalysis::alias(LocA, LocB);
// Get the attached MDNodes.
const MDNode *AScopes = LocA.AATags.Scope,
*BScopes = LocB.AATags.Scope;
const MDNode *ANoAlias = LocA.AATags.NoAlias,
*BNoAlias = LocB.AATags.NoAlias;
if (!mayAliasInScopes(AScopes, BNoAlias))
return NoAlias;
if (!mayAliasInScopes(BScopes, ANoAlias))
return NoAlias;
// If they may alias, chain to the next AliasAnalysis.
return AliasAnalysis::alias(LocA, LocB);
}
bool ScopedNoAliasAA::pointsToConstantMemory(const MemoryLocation &Loc,
bool OrLocal) {
return AliasAnalysis::pointsToConstantMemory(Loc, OrLocal);
}
AliasAnalysis::ModRefBehavior
ScopedNoAliasAA::getModRefBehavior(ImmutableCallSite CS) {
return AliasAnalysis::getModRefBehavior(CS);
}
AliasAnalysis::ModRefBehavior
ScopedNoAliasAA::getModRefBehavior(const Function *F) {
return AliasAnalysis::getModRefBehavior(F);
}
AliasAnalysis::ModRefResult
ScopedNoAliasAA::getModRefInfo(ImmutableCallSite CS,
const MemoryLocation &Loc) {
if (!EnableScopedNoAlias)
return AliasAnalysis::getModRefInfo(CS, Loc);
if (!mayAliasInScopes(Loc.AATags.Scope, CS.getInstruction()->getMetadata(
LLVMContext::MD_noalias)))
return NoModRef;
if (!mayAliasInScopes(
CS.getInstruction()->getMetadata(LLVMContext::MD_alias_scope),
Loc.AATags.NoAlias))
return NoModRef;
return AliasAnalysis::getModRefInfo(CS, Loc);
}
AliasAnalysis::ModRefResult
ScopedNoAliasAA::getModRefInfo(ImmutableCallSite CS1, ImmutableCallSite CS2) {
if (!EnableScopedNoAlias)
return AliasAnalysis::getModRefInfo(CS1, CS2);
if (!mayAliasInScopes(
CS1.getInstruction()->getMetadata(LLVMContext::MD_alias_scope),
CS2.getInstruction()->getMetadata(LLVMContext::MD_noalias)))
return NoModRef;
if (!mayAliasInScopes(
CS2.getInstruction()->getMetadata(LLVMContext::MD_alias_scope),
CS1.getInstruction()->getMetadata(LLVMContext::MD_noalias)))
return NoModRef;
return AliasAnalysis::getModRefInfo(CS1, CS2);
}
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/Analysis/VectorUtils2.cpp | //===----------- VectorUtils2.cpp - findScalarElement function -----------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines vectorizer utility function findScalarElement.
// Splitting this function from VectorUtils.cpp into a separate file
// makes dxilconv.dll 121kB smaller (x86 release, compiler optimization for
// size).
//
//===----------------------------------------------------------------------===//
#include "llvm/Analysis/VectorUtils.h"
#include "llvm/IR/PatternMatch.h"
#include "llvm/IR/Value.h"
/// \brief Given a vector and an element number, see if the scalar value is
/// already around as a register, for example if it were inserted then extracted
/// from the vector.
llvm::Value *llvm::findScalarElement(llvm::Value *V, unsigned EltNo) {
assert(V->getType()->isVectorTy() && "Not looking at a vector?");
VectorType *VTy = cast<VectorType>(V->getType());
unsigned Width = VTy->getNumElements();
if (EltNo >= Width) // Out of range access.
return UndefValue::get(VTy->getElementType());
if (Constant *C = dyn_cast<Constant>(V))
return C->getAggregateElement(EltNo);
if (InsertElementInst *III = dyn_cast<InsertElementInst>(V)) {
// If this is an insert to a variable element, we don't know what it is.
if (!isa<ConstantInt>(III->getOperand(2)))
return nullptr;
unsigned IIElt = cast<ConstantInt>(III->getOperand(2))->getZExtValue();
// If this is an insert to the element we are looking for, return the
// inserted value.
if (EltNo == IIElt)
return III->getOperand(1);
// Otherwise, the insertelement doesn't modify the value, recurse on its
// vector input.
return findScalarElement(III->getOperand(0), EltNo);
}
if (ShuffleVectorInst *SVI = dyn_cast<ShuffleVectorInst>(V)) {
unsigned LHSWidth = SVI->getOperand(0)->getType()->getVectorNumElements();
int InEl = SVI->getMaskValue(EltNo);
if (InEl < 0)
return UndefValue::get(VTy->getElementType());
if (InEl < (int)LHSWidth)
return findScalarElement(SVI->getOperand(0), InEl);
return findScalarElement(SVI->getOperand(1), InEl - LHSWidth);
}
// Extract a value from a vector add operation with a constant zero.
Value *Val = nullptr;
Constant *Con = nullptr;
if (match(V,
llvm::PatternMatch::m_Add(llvm::PatternMatch::m_Value(Val),
llvm::PatternMatch::m_Constant(Con)))) {
if (Constant *Elt = Con->getAggregateElement(EltNo))
if (Elt->isNullValue())
return findScalarElement(Val, EltNo);
}
// Otherwise, we don't know.
return nullptr;
}
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/Analysis/ConstantFolding.cpp | //===-- ConstantFolding.cpp - Fold instructions into constants ------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines routines for folding instructions into constants.
//
// Also, to supplement the basic IR ConstantExpr simplifications,
// this file defines some additional folding routines that can make use of
// DataLayout information. These functions cannot go in IR due to library
// dependency issues.
//
//===----------------------------------------------------------------------===//
#include "llvm/Analysis/ConstantFolding.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/Analysis/TargetLibraryInfo.h"
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/Config/config.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/GetElementPtrTypeIterator.h"
#include "llvm/IR/GlobalVariable.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/Operator.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MathExtras.h"
#include <cerrno>
#include <cmath>
#include "llvm/Analysis/DxilConstantFolding.h" // HLSL Change
#ifdef HAVE_FENV_H
#include <fenv.h>
#endif
using namespace llvm;
//===----------------------------------------------------------------------===//
// Constant Folding internal helper functions
//===----------------------------------------------------------------------===//
/// Constant fold bitcast, symbolically evaluating it with DataLayout.
/// This always returns a non-null constant, but it may be a
/// ConstantExpr if unfoldable.
static Constant *FoldBitCast(Constant *C, Type *DestTy, const DataLayout &DL) {
// Catch the obvious splat cases.
if (C->isNullValue() && !DestTy->isX86_MMXTy())
return Constant::getNullValue(DestTy);
if (C->isAllOnesValue() && !DestTy->isX86_MMXTy() &&
!DestTy->isPtrOrPtrVectorTy()) // Don't get ones for ptr types!
return Constant::getAllOnesValue(DestTy);
// Handle a vector->integer cast.
if (IntegerType *IT = dyn_cast<IntegerType>(DestTy)) {
VectorType *VTy = dyn_cast<VectorType>(C->getType());
if (!VTy)
return ConstantExpr::getBitCast(C, DestTy);
unsigned NumSrcElts = VTy->getNumElements();
Type *SrcEltTy = VTy->getElementType();
// If the vector is a vector of floating point, convert it to vector of int
// to simplify things.
if (SrcEltTy->isFloatingPointTy()) {
unsigned FPWidth = SrcEltTy->getPrimitiveSizeInBits();
Type *SrcIVTy =
VectorType::get(IntegerType::get(C->getContext(), FPWidth), NumSrcElts);
// Ask IR to do the conversion now that #elts line up.
C = ConstantExpr::getBitCast(C, SrcIVTy);
}
ConstantDataVector *CDV = dyn_cast<ConstantDataVector>(C);
if (!CDV)
return ConstantExpr::getBitCast(C, DestTy);
// Now that we know that the input value is a vector of integers, just shift
// and insert them into our result.
unsigned BitShift = DL.getTypeAllocSizeInBits(SrcEltTy);
APInt Result(IT->getBitWidth(), 0);
for (unsigned i = 0; i != NumSrcElts; ++i) {
Result <<= BitShift;
if (DL.isLittleEndian())
Result |= CDV->getElementAsInteger(NumSrcElts-i-1);
else
Result |= CDV->getElementAsInteger(i);
}
return ConstantInt::get(IT, Result);
}
// The code below only handles casts to vectors currently.
VectorType *DestVTy = dyn_cast<VectorType>(DestTy);
if (!DestVTy)
return ConstantExpr::getBitCast(C, DestTy);
// If this is a scalar -> vector cast, convert the input into a <1 x scalar>
// vector so the code below can handle it uniformly.
if (isa<ConstantFP>(C) || isa<ConstantInt>(C)) {
Constant *Ops = C; // don't take the address of C!
return FoldBitCast(ConstantVector::get(Ops), DestTy, DL);
}
// If this is a bitcast from constant vector -> vector, fold it.
if (!isa<ConstantDataVector>(C) && !isa<ConstantVector>(C))
return ConstantExpr::getBitCast(C, DestTy);
// If the element types match, IR can fold it.
unsigned NumDstElt = DestVTy->getNumElements();
unsigned NumSrcElt = C->getType()->getVectorNumElements();
if (NumDstElt == NumSrcElt)
return ConstantExpr::getBitCast(C, DestTy);
Type *SrcEltTy = C->getType()->getVectorElementType();
Type *DstEltTy = DestVTy->getElementType();
// Otherwise, we're changing the number of elements in a vector, which
// requires endianness information to do the right thing. For example,
// bitcast (<2 x i64> <i64 0, i64 1> to <4 x i32>)
// folds to (little endian):
// <4 x i32> <i32 0, i32 0, i32 1, i32 0>
// and to (big endian):
// <4 x i32> <i32 0, i32 0, i32 0, i32 1>
// First thing is first. We only want to think about integer here, so if
// we have something in FP form, recast it as integer.
if (DstEltTy->isFloatingPointTy()) {
// Fold to an vector of integers with same size as our FP type.
unsigned FPWidth = DstEltTy->getPrimitiveSizeInBits();
Type *DestIVTy =
VectorType::get(IntegerType::get(C->getContext(), FPWidth), NumDstElt);
// Recursively handle this integer conversion, if possible.
C = FoldBitCast(C, DestIVTy, DL);
// Finally, IR can handle this now that #elts line up.
return ConstantExpr::getBitCast(C, DestTy);
}
// Okay, we know the destination is integer, if the input is FP, convert
// it to integer first.
if (SrcEltTy->isFloatingPointTy()) {
unsigned FPWidth = SrcEltTy->getPrimitiveSizeInBits();
Type *SrcIVTy =
VectorType::get(IntegerType::get(C->getContext(), FPWidth), NumSrcElt);
// Ask IR to do the conversion now that #elts line up.
C = ConstantExpr::getBitCast(C, SrcIVTy);
// If IR wasn't able to fold it, bail out.
if (!isa<ConstantVector>(C) && // FIXME: Remove ConstantVector.
!isa<ConstantDataVector>(C))
return C;
}
// Now we know that the input and output vectors are both integer vectors
// of the same size, and that their #elements is not the same. Do the
// conversion here, which depends on whether the input or output has
// more elements.
bool isLittleEndian = DL.isLittleEndian();
SmallVector<Constant*, 32> Result;
if (NumDstElt < NumSrcElt) {
// Handle: bitcast (<4 x i32> <i32 0, i32 1, i32 2, i32 3> to <2 x i64>)
Constant *Zero = Constant::getNullValue(DstEltTy);
unsigned Ratio = NumSrcElt/NumDstElt;
unsigned SrcBitSize = SrcEltTy->getPrimitiveSizeInBits();
unsigned SrcElt = 0;
for (unsigned i = 0; i != NumDstElt; ++i) {
// Build each element of the result.
Constant *Elt = Zero;
unsigned ShiftAmt = isLittleEndian ? 0 : SrcBitSize*(Ratio-1);
for (unsigned j = 0; j != Ratio; ++j) {
Constant *Src =dyn_cast<ConstantInt>(C->getAggregateElement(SrcElt++));
if (!Src) // Reject constantexpr elements.
return ConstantExpr::getBitCast(C, DestTy);
// Zero extend the element to the right size.
Src = ConstantExpr::getZExt(Src, Elt->getType());
// Shift it to the right place, depending on endianness.
Src = ConstantExpr::getShl(Src,
ConstantInt::get(Src->getType(), ShiftAmt));
ShiftAmt += isLittleEndian ? SrcBitSize : -SrcBitSize;
// Mix it in.
Elt = ConstantExpr::getOr(Elt, Src);
}
Result.push_back(Elt);
}
return ConstantVector::get(Result);
}
// Handle: bitcast (<2 x i64> <i64 0, i64 1> to <4 x i32>)
unsigned Ratio = NumDstElt/NumSrcElt;
unsigned DstBitSize = DL.getTypeSizeInBits(DstEltTy);
// Loop over each source value, expanding into multiple results.
for (unsigned i = 0; i != NumSrcElt; ++i) {
Constant *Src = dyn_cast<ConstantInt>(C->getAggregateElement(i));
if (!Src) // Reject constantexpr elements.
return ConstantExpr::getBitCast(C, DestTy);
unsigned ShiftAmt = isLittleEndian ? 0 : DstBitSize*(Ratio-1);
for (unsigned j = 0; j != Ratio; ++j) {
// Shift the piece of the value into the right place, depending on
// endianness.
Constant *Elt = ConstantExpr::getLShr(Src,
ConstantInt::get(Src->getType(), ShiftAmt));
ShiftAmt += isLittleEndian ? DstBitSize : -DstBitSize;
// Truncate the element to an integer with the same pointer size and
// convert the element back to a pointer using a inttoptr.
if (DstEltTy->isPointerTy()) {
IntegerType *DstIntTy = Type::getIntNTy(C->getContext(), DstBitSize);
Constant *CE = ConstantExpr::getTrunc(Elt, DstIntTy);
Result.push_back(ConstantExpr::getIntToPtr(CE, DstEltTy));
continue;
}
// Truncate and remember this piece.
Result.push_back(ConstantExpr::getTrunc(Elt, DstEltTy));
}
}
return ConstantVector::get(Result);
}
/// If this constant is a constant offset from a global, return the global and
/// the constant. Because of constantexprs, this function is recursive.
static bool IsConstantOffsetFromGlobal(Constant *C, GlobalValue *&GV,
APInt &Offset, const DataLayout &DL) {
// Trivial case, constant is the global.
if ((GV = dyn_cast<GlobalValue>(C))) {
unsigned BitWidth = DL.getPointerTypeSizeInBits(GV->getType());
Offset = APInt(BitWidth, 0);
return true;
}
// Otherwise, if this isn't a constant expr, bail out.
ConstantExpr *CE = dyn_cast<ConstantExpr>(C);
if (!CE) return false;
// Look through ptr->int and ptr->ptr casts.
if (CE->getOpcode() == Instruction::PtrToInt ||
CE->getOpcode() == Instruction::BitCast ||
CE->getOpcode() == Instruction::AddrSpaceCast)
return IsConstantOffsetFromGlobal(CE->getOperand(0), GV, Offset, DL);
// i32* getelementptr ([5 x i32]* @a, i32 0, i32 5)
GEPOperator *GEP = dyn_cast<GEPOperator>(CE);
if (!GEP)
return false;
unsigned BitWidth = DL.getPointerTypeSizeInBits(GEP->getType());
APInt TmpOffset(BitWidth, 0);
// If the base isn't a global+constant, we aren't either.
if (!IsConstantOffsetFromGlobal(CE->getOperand(0), GV, TmpOffset, DL))
return false;
// Otherwise, add any offset that our operands provide.
if (!GEP->accumulateConstantOffset(DL, TmpOffset))
return false;
Offset = TmpOffset;
return true;
}
/// Recursive helper to read bits out of global. C is the constant being copied
/// out of. ByteOffset is an offset into C. CurPtr is the pointer to copy
/// results into and BytesLeft is the number of bytes left in
/// the CurPtr buffer. DL is the DataLayout.
static bool ReadDataFromGlobal(Constant *C, uint64_t ByteOffset,
unsigned char *CurPtr, unsigned BytesLeft,
const DataLayout &DL) {
assert(ByteOffset <= DL.getTypeAllocSize(C->getType()) &&
"Out of range access");
// If this element is zero or undefined, we can just return since *CurPtr is
// zero initialized.
if (isa<ConstantAggregateZero>(C) || isa<UndefValue>(C))
return true;
if (ConstantInt *CI = dyn_cast<ConstantInt>(C)) {
if (CI->getBitWidth() > 64 ||
(CI->getBitWidth() & 7) != 0)
return false;
uint64_t Val = CI->getZExtValue();
unsigned IntBytes = unsigned(CI->getBitWidth()/8);
for (unsigned i = 0; i != BytesLeft && ByteOffset != IntBytes; ++i) {
int n = ByteOffset;
if (!DL.isLittleEndian())
n = IntBytes - n - 1;
CurPtr[i] = (unsigned char)(Val >> (n * 8));
++ByteOffset;
}
return true;
}
if (ConstantFP *CFP = dyn_cast<ConstantFP>(C)) {
if (CFP->getType()->isDoubleTy()) {
C = FoldBitCast(C, Type::getInt64Ty(C->getContext()), DL);
return ReadDataFromGlobal(C, ByteOffset, CurPtr, BytesLeft, DL);
}
if (CFP->getType()->isFloatTy()){
C = FoldBitCast(C, Type::getInt32Ty(C->getContext()), DL);
return ReadDataFromGlobal(C, ByteOffset, CurPtr, BytesLeft, DL);
}
if (CFP->getType()->isHalfTy()){
C = FoldBitCast(C, Type::getInt16Ty(C->getContext()), DL);
return ReadDataFromGlobal(C, ByteOffset, CurPtr, BytesLeft, DL);
}
return false;
}
if (ConstantStruct *CS = dyn_cast<ConstantStruct>(C)) {
const StructLayout *SL = DL.getStructLayout(CS->getType());
unsigned Index = SL->getElementContainingOffset(ByteOffset);
uint64_t CurEltOffset = SL->getElementOffset(Index);
ByteOffset -= CurEltOffset;
while (1) {
// If the element access is to the element itself and not to tail padding,
// read the bytes from the element.
uint64_t EltSize = DL.getTypeAllocSize(CS->getOperand(Index)->getType());
if (ByteOffset < EltSize &&
!ReadDataFromGlobal(CS->getOperand(Index), ByteOffset, CurPtr,
BytesLeft, DL))
return false;
++Index;
// Check to see if we read from the last struct element, if so we're done.
if (Index == CS->getType()->getNumElements())
return true;
// If we read all of the bytes we needed from this element we're done.
uint64_t NextEltOffset = SL->getElementOffset(Index);
if (BytesLeft <= NextEltOffset - CurEltOffset - ByteOffset)
return true;
// Move to the next element of the struct.
CurPtr += NextEltOffset - CurEltOffset - ByteOffset;
BytesLeft -= NextEltOffset - CurEltOffset - ByteOffset;
ByteOffset = 0;
CurEltOffset = NextEltOffset;
}
// not reached.
}
if (isa<ConstantArray>(C) || isa<ConstantVector>(C) ||
isa<ConstantDataSequential>(C)) {
Type *EltTy = C->getType()->getSequentialElementType();
uint64_t EltSize = DL.getTypeAllocSize(EltTy);
uint64_t Index = ByteOffset / EltSize;
uint64_t Offset = ByteOffset - Index * EltSize;
uint64_t NumElts;
if (ArrayType *AT = dyn_cast<ArrayType>(C->getType()))
NumElts = AT->getNumElements();
else
NumElts = C->getType()->getVectorNumElements();
for (; Index != NumElts; ++Index) {
if (!ReadDataFromGlobal(C->getAggregateElement(Index), Offset, CurPtr,
BytesLeft, DL))
return false;
uint64_t BytesWritten = EltSize - Offset;
assert(BytesWritten <= EltSize && "Not indexing into this element?");
if (BytesWritten >= BytesLeft)
return true;
Offset = 0;
BytesLeft -= BytesWritten;
CurPtr += BytesWritten;
}
return true;
}
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) {
if (CE->getOpcode() == Instruction::IntToPtr &&
CE->getOperand(0)->getType() == DL.getIntPtrType(CE->getType())) {
return ReadDataFromGlobal(CE->getOperand(0), ByteOffset, CurPtr,
BytesLeft, DL);
}
}
// Otherwise, unknown initializer type.
return false;
}
static Constant *FoldReinterpretLoadFromConstPtr(Constant *C,
const DataLayout &DL) {
PointerType *PTy = cast<PointerType>(C->getType());
Type *LoadTy = PTy->getElementType();
IntegerType *IntType = dyn_cast<IntegerType>(LoadTy);
// If this isn't an integer load we can't fold it directly.
if (!IntType) {
unsigned AS = PTy->getAddressSpace();
// If this is a float/double load, we can try folding it as an int32/64 load
// and then bitcast the result. This can be useful for union cases. Note
// that address spaces don't matter here since we're not going to result in
// an actual new load.
Type *MapTy;
if (LoadTy->isHalfTy())
MapTy = Type::getInt16PtrTy(C->getContext(), AS);
else if (LoadTy->isFloatTy())
MapTy = Type::getInt32PtrTy(C->getContext(), AS);
else if (LoadTy->isDoubleTy())
MapTy = Type::getInt64PtrTy(C->getContext(), AS);
else if (LoadTy->isVectorTy()) {
MapTy = PointerType::getIntNPtrTy(C->getContext(),
DL.getTypeAllocSizeInBits(LoadTy), AS);
} else
return nullptr;
C = FoldBitCast(C, MapTy, DL);
if (Constant *Res = FoldReinterpretLoadFromConstPtr(C, DL))
return FoldBitCast(Res, LoadTy, DL);
return nullptr;
}
unsigned BytesLoaded = (IntType->getBitWidth() + 7) / 8;
if (BytesLoaded > 32 || BytesLoaded == 0)
return nullptr;
GlobalValue *GVal;
APInt Offset;
if (!IsConstantOffsetFromGlobal(C, GVal, Offset, DL))
return nullptr;
GlobalVariable *GV = dyn_cast<GlobalVariable>(GVal);
if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer() ||
!GV->getInitializer()->getType()->isSized())
return nullptr;
// If we're loading off the beginning of the global, some bytes may be valid,
// but we don't try to handle this.
if (Offset.isNegative())
return nullptr;
// If we're not accessing anything in this constant, the result is undefined.
if (Offset.getZExtValue() >=
DL.getTypeAllocSize(GV->getInitializer()->getType()))
return UndefValue::get(IntType);
unsigned char RawBytes[32] = {0};
if (!ReadDataFromGlobal(GV->getInitializer(), Offset.getZExtValue(), RawBytes,
BytesLoaded, DL))
return nullptr;
APInt ResultVal = APInt(IntType->getBitWidth(), 0);
if (DL.isLittleEndian()) {
ResultVal = RawBytes[BytesLoaded - 1];
for (unsigned i = 1; i != BytesLoaded; ++i) {
ResultVal <<= 8;
ResultVal |= RawBytes[BytesLoaded - 1 - i];
}
} else {
ResultVal = RawBytes[0];
for (unsigned i = 1; i != BytesLoaded; ++i) {
ResultVal <<= 8;
ResultVal |= RawBytes[i];
}
}
return ConstantInt::get(IntType->getContext(), ResultVal);
}
static Constant *ConstantFoldLoadThroughBitcast(ConstantExpr *CE,
const DataLayout &DL) {
auto *DestPtrTy = dyn_cast<PointerType>(CE->getType());
if (!DestPtrTy)
return nullptr;
Type *DestTy = DestPtrTy->getElementType();
Constant *C = ConstantFoldLoadFromConstPtr(CE->getOperand(0), DL);
if (!C)
return nullptr;
do {
Type *SrcTy = C->getType();
// If the type sizes are the same and a cast is legal, just directly
// cast the constant.
if (DL.getTypeSizeInBits(DestTy) == DL.getTypeSizeInBits(SrcTy)) {
Instruction::CastOps Cast = Instruction::BitCast;
// If we are going from a pointer to int or vice versa, we spell the cast
// differently.
if (SrcTy->isIntegerTy() && DestTy->isPointerTy())
Cast = Instruction::IntToPtr;
else if (SrcTy->isPointerTy() && DestTy->isIntegerTy())
Cast = Instruction::PtrToInt;
if (CastInst::castIsValid(Cast, C, DestTy))
return ConstantExpr::getCast(Cast, C, DestTy);
}
// If this isn't an aggregate type, there is nothing we can do to drill down
// and find a bitcastable constant.
if (!SrcTy->isAggregateType())
return nullptr;
// We're simulating a load through a pointer that was bitcast to point to
// a different type, so we can try to walk down through the initial
// elements of an aggregate to see if some part of th e aggregate is
// castable to implement the "load" semantic model.
C = C->getAggregateElement(0u);
} while (C);
return nullptr;
}
/// Return the value that a load from C would produce if it is constant and
/// determinable. If this is not determinable, return null.
Constant *llvm::ConstantFoldLoadFromConstPtr(Constant *C,
const DataLayout &DL) {
// First, try the easy cases:
if (GlobalVariable *GV = dyn_cast<GlobalVariable>(C))
if (GV->isConstant() && GV->hasDefinitiveInitializer())
return GV->getInitializer();
// If the loaded value isn't a constant expr, we can't handle it.
ConstantExpr *CE = dyn_cast<ConstantExpr>(C);
if (!CE)
return nullptr;
if (CE->getOpcode() == Instruction::GetElementPtr) {
if (GlobalVariable *GV = dyn_cast<GlobalVariable>(CE->getOperand(0))) {
if (GV->isConstant() && GV->hasDefinitiveInitializer()) {
if (Constant *V =
ConstantFoldLoadThroughGEPConstantExpr(GV->getInitializer(), CE))
return V;
}
}
}
if (CE->getOpcode() == Instruction::BitCast)
if (Constant *LoadedC = ConstantFoldLoadThroughBitcast(CE, DL))
return LoadedC;
// Instead of loading constant c string, use corresponding integer value
// directly if string length is small enough.
StringRef Str;
if (getConstantStringInfo(CE, Str) && !Str.empty()) {
unsigned StrLen = Str.size();
Type *Ty = cast<PointerType>(CE->getType())->getElementType();
unsigned NumBits = Ty->getPrimitiveSizeInBits();
// Replace load with immediate integer if the result is an integer or fp
// value.
if ((NumBits >> 3) == StrLen + 1 && (NumBits & 7) == 0 &&
(isa<IntegerType>(Ty) || Ty->isFloatingPointTy())) {
APInt StrVal(NumBits, 0);
APInt SingleChar(NumBits, 0);
if (DL.isLittleEndian()) {
for (signed i = StrLen-1; i >= 0; i--) {
SingleChar = (uint64_t) Str[i] & UCHAR_MAX;
StrVal = (StrVal << 8) | SingleChar;
}
} else {
for (unsigned i = 0; i < StrLen; i++) {
SingleChar = (uint64_t) Str[i] & UCHAR_MAX;
StrVal = (StrVal << 8) | SingleChar;
}
// Append NULL at the end.
SingleChar = 0;
StrVal = (StrVal << 8) | SingleChar;
}
Constant *Res = ConstantInt::get(CE->getContext(), StrVal);
if (Ty->isFloatingPointTy())
Res = ConstantExpr::getBitCast(Res, Ty);
return Res;
}
}
// If this load comes from anywhere in a constant global, and if the global
// is all undef or zero, we know what it loads.
if (GlobalVariable *GV =
dyn_cast<GlobalVariable>(GetUnderlyingObject(CE, DL))) {
if (GV->isConstant() && GV->hasDefinitiveInitializer()) {
Type *ResTy = cast<PointerType>(C->getType())->getElementType();
if (GV->getInitializer()->isNullValue())
return Constant::getNullValue(ResTy);
if (isa<UndefValue>(GV->getInitializer()))
return UndefValue::get(ResTy);
}
}
// Try hard to fold loads from bitcasted strange and non-type-safe things.
return FoldReinterpretLoadFromConstPtr(CE, DL);
}
static Constant *ConstantFoldLoadInst(const LoadInst *LI,
const DataLayout &DL) {
if (LI->isVolatile()) return nullptr;
if (Constant *C = dyn_cast<Constant>(LI->getOperand(0)))
return ConstantFoldLoadFromConstPtr(C, DL);
return nullptr;
}
/// One of Op0/Op1 is a constant expression.
/// Attempt to symbolically evaluate the result of a binary operator merging
/// these together. If target data info is available, it is provided as DL,
/// otherwise DL is null.
static Constant *SymbolicallyEvaluateBinop(unsigned Opc, Constant *Op0,
Constant *Op1,
const DataLayout &DL) {
// SROA
// Fold (and 0xffffffff00000000, (shl x, 32)) -> shl.
// Fold (lshr (or X, Y), 32) -> (lshr [X/Y], 32) if one doesn't contribute
// bits.
if (Opc == Instruction::And) {
unsigned BitWidth = DL.getTypeSizeInBits(Op0->getType()->getScalarType());
APInt KnownZero0(BitWidth, 0), KnownOne0(BitWidth, 0);
APInt KnownZero1(BitWidth, 0), KnownOne1(BitWidth, 0);
computeKnownBits(Op0, KnownZero0, KnownOne0, DL);
computeKnownBits(Op1, KnownZero1, KnownOne1, DL);
if ((KnownOne1 | KnownZero0).isAllOnesValue()) {
// All the bits of Op0 that the 'and' could be masking are already zero.
return Op0;
}
if ((KnownOne0 | KnownZero1).isAllOnesValue()) {
// All the bits of Op1 that the 'and' could be masking are already zero.
return Op1;
}
APInt KnownZero = KnownZero0 | KnownZero1;
APInt KnownOne = KnownOne0 & KnownOne1;
if ((KnownZero | KnownOne).isAllOnesValue()) {
return ConstantInt::get(Op0->getType(), KnownOne);
}
}
// If the constant expr is something like &A[123] - &A[4].f, fold this into a
// constant. This happens frequently when iterating over a global array.
if (Opc == Instruction::Sub) {
GlobalValue *GV1, *GV2;
APInt Offs1, Offs2;
if (IsConstantOffsetFromGlobal(Op0, GV1, Offs1, DL))
if (IsConstantOffsetFromGlobal(Op1, GV2, Offs2, DL) && GV1 == GV2) {
unsigned OpSize = DL.getTypeSizeInBits(Op0->getType());
// (&GV+C1) - (&GV+C2) -> C1-C2, pointer arithmetic cannot overflow.
// PtrToInt may change the bitwidth so we have convert to the right size
// first.
return ConstantInt::get(Op0->getType(), Offs1.zextOrTrunc(OpSize) -
Offs2.zextOrTrunc(OpSize));
}
}
return nullptr;
}
/// If array indices are not pointer-sized integers, explicitly cast them so
/// that they aren't implicitly casted by the getelementptr.
static Constant *CastGEPIndices(Type *SrcTy, ArrayRef<Constant *> Ops,
Type *ResultTy, const DataLayout &DL,
const TargetLibraryInfo *TLI) {
Type *IntPtrTy = DL.getIntPtrType(ResultTy);
bool Any = false;
SmallVector<Constant*, 32> NewIdxs;
for (unsigned i = 1, e = Ops.size(); i != e; ++i) {
if ((i == 1 ||
!isa<StructType>(GetElementPtrInst::getIndexedType(
cast<PointerType>(Ops[0]->getType()->getScalarType())
->getElementType(),
Ops.slice(1, i - 1)))) &&
Ops[i]->getType() != IntPtrTy) {
Any = true;
NewIdxs.push_back(ConstantExpr::getCast(CastInst::getCastOpcode(Ops[i],
true,
IntPtrTy,
true),
Ops[i], IntPtrTy));
} else
NewIdxs.push_back(Ops[i]);
}
if (!Any)
return nullptr;
Constant *C = ConstantExpr::getGetElementPtr(SrcTy, Ops[0], NewIdxs);
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) {
if (Constant *Folded = ConstantFoldConstantExpression(CE, DL, TLI))
C = Folded;
}
return C;
}
/// Strip the pointer casts, but preserve the address space information.
static Constant* StripPtrCastKeepAS(Constant* Ptr) {
assert(Ptr->getType()->isPointerTy() && "Not a pointer type");
PointerType *OldPtrTy = cast<PointerType>(Ptr->getType());
Ptr = Ptr->stripPointerCasts();
PointerType *NewPtrTy = cast<PointerType>(Ptr->getType());
// Preserve the address space number of the pointer.
if (NewPtrTy->getAddressSpace() != OldPtrTy->getAddressSpace()) {
NewPtrTy = NewPtrTy->getElementType()->getPointerTo(
OldPtrTy->getAddressSpace());
Ptr = ConstantExpr::getPointerCast(Ptr, NewPtrTy);
}
return Ptr;
}
/// If we can symbolically evaluate the GEP constant expression, do so.
static Constant *SymbolicallyEvaluateGEP(Type *SrcTy, ArrayRef<Constant *> Ops,
Type *ResultTy, const DataLayout &DL,
const TargetLibraryInfo *TLI) {
Constant *Ptr = Ops[0];
if (!Ptr->getType()->getPointerElementType()->isSized() ||
!Ptr->getType()->isPointerTy())
return nullptr;
Type *IntPtrTy = DL.getIntPtrType(Ptr->getType());
Type *ResultElementTy = ResultTy->getPointerElementType();
// If this is a constant expr gep that is effectively computing an
// "offsetof", fold it into 'cast int Size to T*' instead of 'gep 0, 0, 12'
for (unsigned i = 1, e = Ops.size(); i != e; ++i)
if (!isa<ConstantInt>(Ops[i])) {
// If this is "gep i8* Ptr, (sub 0, V)", fold this as:
// "inttoptr (sub (ptrtoint Ptr), V)"
if (Ops.size() == 2 && ResultElementTy->isIntegerTy(8)) {
ConstantExpr *CE = dyn_cast<ConstantExpr>(Ops[1]);
assert((!CE || CE->getType() == IntPtrTy) &&
"CastGEPIndices didn't canonicalize index types!");
if (CE && CE->getOpcode() == Instruction::Sub &&
CE->getOperand(0)->isNullValue()) {
Constant *Res = ConstantExpr::getPtrToInt(Ptr, CE->getType());
Res = ConstantExpr::getSub(Res, CE->getOperand(1));
Res = ConstantExpr::getIntToPtr(Res, ResultTy);
if (ConstantExpr *ResCE = dyn_cast<ConstantExpr>(Res))
Res = ConstantFoldConstantExpression(ResCE, DL, TLI);
return Res;
}
}
return nullptr;
}
unsigned BitWidth = DL.getTypeSizeInBits(IntPtrTy);
APInt Offset =
APInt(BitWidth,
DL.getIndexedOffset(
Ptr->getType(),
makeArrayRef((Value * const *)Ops.data() + 1, Ops.size() - 1)));
Ptr = StripPtrCastKeepAS(Ptr);
// If this is a GEP of a GEP, fold it all into a single GEP.
while (GEPOperator *GEP = dyn_cast<GEPOperator>(Ptr)) {
SmallVector<Value *, 4> NestedOps(GEP->op_begin() + 1, GEP->op_end());
// Do not try the incorporate the sub-GEP if some index is not a number.
bool AllConstantInt = true;
for (unsigned i = 0, e = NestedOps.size(); i != e; ++i)
if (!isa<ConstantInt>(NestedOps[i])) {
AllConstantInt = false;
break;
}
if (!AllConstantInt)
break;
Ptr = cast<Constant>(GEP->getOperand(0));
Offset += APInt(BitWidth, DL.getIndexedOffset(Ptr->getType(), NestedOps));
Ptr = StripPtrCastKeepAS(Ptr);
}
// If the base value for this address is a literal integer value, fold the
// getelementptr to the resulting integer value casted to the pointer type.
APInt BasePtr(BitWidth, 0);
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Ptr)) {
if (CE->getOpcode() == Instruction::IntToPtr) {
if (ConstantInt *Base = dyn_cast<ConstantInt>(CE->getOperand(0)))
BasePtr = Base->getValue().zextOrTrunc(BitWidth);
}
}
if (Ptr->isNullValue() || BasePtr != 0) {
Constant *C = ConstantInt::get(Ptr->getContext(), Offset + BasePtr);
return ConstantExpr::getIntToPtr(C, ResultTy);
}
// Otherwise form a regular getelementptr. Recompute the indices so that
// we eliminate over-indexing of the notional static type array bounds.
// This makes it easy to determine if the getelementptr is "inbounds".
// Also, this helps GlobalOpt do SROA on GlobalVariables.
Type *Ty = Ptr->getType();
assert(Ty->isPointerTy() && "Forming regular GEP of non-pointer type");
SmallVector<Constant *, 32> NewIdxs;
do {
if (SequentialType *ATy = dyn_cast<SequentialType>(Ty)) {
if (ATy->isPointerTy()) {
// The only pointer indexing we'll do is on the first index of the GEP.
if (!NewIdxs.empty())
break;
// Only handle pointers to sized types, not pointers to functions.
if (!ATy->getElementType()->isSized())
return nullptr;
}
// Determine which element of the array the offset points into.
APInt ElemSize(BitWidth, DL.getTypeAllocSize(ATy->getElementType()));
if (ElemSize == 0)
// The element size is 0. This may be [0 x Ty]*, so just use a zero
// index for this level and proceed to the next level to see if it can
// accommodate the offset.
NewIdxs.push_back(ConstantInt::get(IntPtrTy, 0));
else {
// The element size is non-zero divide the offset by the element
// size (rounding down), to compute the index at this level.
APInt NewIdx = Offset.udiv(ElemSize);
Offset -= NewIdx * ElemSize;
NewIdxs.push_back(ConstantInt::get(IntPtrTy, NewIdx));
}
Ty = ATy->getElementType();
} else if (StructType *STy = dyn_cast<StructType>(Ty)) {
// If we end up with an offset that isn't valid for this struct type, we
// can't re-form this GEP in a regular form, so bail out. The pointer
// operand likely went through casts that are necessary to make the GEP
// sensible.
const StructLayout &SL = *DL.getStructLayout(STy);
if (Offset.uge(SL.getSizeInBytes()))
break;
// Determine which field of the struct the offset points into. The
// getZExtValue is fine as we've already ensured that the offset is
// within the range representable by the StructLayout API.
unsigned ElIdx = SL.getElementContainingOffset(Offset.getZExtValue());
NewIdxs.push_back(ConstantInt::get(Type::getInt32Ty(Ty->getContext()),
ElIdx));
Offset -= APInt(BitWidth, SL.getElementOffset(ElIdx));
Ty = STy->getTypeAtIndex(ElIdx);
} else {
// We've reached some non-indexable type.
break;
}
} while (Ty != ResultElementTy);
// If we haven't used up the entire offset by descending the static
// type, then the offset is pointing into the middle of an indivisible
// member, so we can't simplify it.
if (Offset != 0)
return nullptr;
// Create a GEP.
Constant *C = ConstantExpr::getGetElementPtr(SrcTy, Ptr, NewIdxs);
assert(C->getType()->getPointerElementType() == Ty &&
"Computed GetElementPtr has unexpected type!");
// If we ended up indexing a member with a type that doesn't match
// the type of what the original indices indexed, add a cast.
if (Ty != ResultElementTy)
C = FoldBitCast(C, ResultTy, DL);
return C;
}
//===----------------------------------------------------------------------===//
// Constant Folding public APIs
//===----------------------------------------------------------------------===//
/// Try to constant fold the specified instruction.
/// If successful, the constant result is returned, if not, null is returned.
/// Note that this fails if not all of the operands are constant. Otherwise,
/// this function can only fail when attempting to fold instructions like loads
/// and stores, which have no constant expression form.
Constant *llvm::ConstantFoldInstruction(Instruction *I, const DataLayout &DL,
const TargetLibraryInfo *TLI) {
// Handle PHI nodes quickly here...
if (PHINode *PN = dyn_cast<PHINode>(I)) {
Constant *CommonValue = nullptr;
for (Value *Incoming : PN->incoming_values()) {
// If the incoming value is undef then skip it. Note that while we could
// skip the value if it is equal to the phi node itself we choose not to
// because that would break the rule that constant folding only applies if
// all operands are constants.
if (isa<UndefValue>(Incoming))
continue;
// If the incoming value is not a constant, then give up.
Constant *C = dyn_cast<Constant>(Incoming);
if (!C)
return nullptr;
// Fold the PHI's operands.
if (ConstantExpr *NewC = dyn_cast<ConstantExpr>(C))
C = ConstantFoldConstantExpression(NewC, DL, TLI);
// If the incoming value is a different constant to
// the one we saw previously, then give up.
if (CommonValue && C != CommonValue)
return nullptr;
CommonValue = C;
}
// If we reach here, all incoming values are the same constant or undef.
return CommonValue ? CommonValue : UndefValue::get(PN->getType());
}
// Scan the operand list, checking to see if they are all constants, if so,
// hand off to ConstantFoldInstOperands.
SmallVector<Constant*, 8> Ops;
for (User::op_iterator i = I->op_begin(), e = I->op_end(); i != e; ++i) {
Constant *Op = dyn_cast<Constant>(*i);
if (!Op)
return nullptr; // All operands not constant!
// Fold the Instruction's operands.
if (ConstantExpr *NewCE = dyn_cast<ConstantExpr>(Op))
Op = ConstantFoldConstantExpression(NewCE, DL, TLI);
Ops.push_back(Op);
}
if (const CmpInst *CI = dyn_cast<CmpInst>(I))
return ConstantFoldCompareInstOperands(CI->getPredicate(), Ops[0], Ops[1],
DL, TLI);
if (const LoadInst *LI = dyn_cast<LoadInst>(I))
return ConstantFoldLoadInst(LI, DL);
if (InsertValueInst *IVI = dyn_cast<InsertValueInst>(I)) {
return ConstantExpr::getInsertValue(
cast<Constant>(IVI->getAggregateOperand()),
cast<Constant>(IVI->getInsertedValueOperand()),
IVI->getIndices());
}
if (ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(I)) {
return ConstantExpr::getExtractValue(
cast<Constant>(EVI->getAggregateOperand()),
EVI->getIndices());
}
return ConstantFoldInstOperands(I->getOpcode(), I->getType(), Ops, DL, TLI);
}
static Constant *
ConstantFoldConstantExpressionImpl(const ConstantExpr *CE, const DataLayout &DL,
const TargetLibraryInfo *TLI,
SmallPtrSetImpl<ConstantExpr *> &FoldedOps) {
SmallVector<Constant *, 8> Ops;
for (User::const_op_iterator i = CE->op_begin(), e = CE->op_end(); i != e;
++i) {
Constant *NewC = cast<Constant>(*i);
// Recursively fold the ConstantExpr's operands. If we have already folded
// a ConstantExpr, we don't have to process it again.
if (ConstantExpr *NewCE = dyn_cast<ConstantExpr>(NewC)) {
if (FoldedOps.insert(NewCE).second)
NewC = ConstantFoldConstantExpressionImpl(NewCE, DL, TLI, FoldedOps);
}
Ops.push_back(NewC);
}
if (CE->isCompare())
return ConstantFoldCompareInstOperands(CE->getPredicate(), Ops[0], Ops[1],
DL, TLI);
return ConstantFoldInstOperands(CE->getOpcode(), CE->getType(), Ops, DL, TLI);
}
/// Attempt to fold the constant expression
/// using the specified DataLayout. If successful, the constant result is
/// result is returned, if not, null is returned.
Constant *llvm::ConstantFoldConstantExpression(const ConstantExpr *CE,
const DataLayout &DL,
const TargetLibraryInfo *TLI) {
SmallPtrSet<ConstantExpr *, 4> FoldedOps;
return ConstantFoldConstantExpressionImpl(CE, DL, TLI, FoldedOps);
}
/// Attempt to constant fold an instruction with the
/// specified opcode and operands. If successful, the constant result is
/// returned, if not, null is returned. Note that this function can fail when
/// attempting to fold instructions like loads and stores, which have no
/// constant expression form.
///
/// TODO: This function neither utilizes nor preserves nsw/nuw/inbounds/etc
/// information, due to only being passed an opcode and operands. Constant
/// folding using this function strips this information.
///
Constant *llvm::ConstantFoldInstOperands(unsigned Opcode, Type *DestTy,
ArrayRef<Constant *> Ops,
const DataLayout &DL,
const TargetLibraryInfo *TLI) {
// Handle easy binops first.
if (Instruction::isBinaryOp(Opcode)) {
if (isa<ConstantExpr>(Ops[0]) || isa<ConstantExpr>(Ops[1])) {
if (Constant *C = SymbolicallyEvaluateBinop(Opcode, Ops[0], Ops[1], DL))
return C;
}
return ConstantExpr::get(Opcode, Ops[0], Ops[1]);
}
switch (Opcode) {
default: return nullptr;
case Instruction::ICmp:
case Instruction::FCmp: llvm_unreachable("Invalid for compares");
case Instruction::Call:
if (Function *F = dyn_cast<Function>(Ops.back()))
if (canConstantFoldCallTo(F))
return ConstantFoldCall(F, Ops.slice(0, Ops.size() - 1), TLI);
return nullptr;
case Instruction::PtrToInt:
// If the input is a inttoptr, eliminate the pair. This requires knowing
// the width of a pointer, so it can't be done in ConstantExpr::getCast.
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Ops[0])) {
if (CE->getOpcode() == Instruction::IntToPtr) {
Constant *Input = CE->getOperand(0);
unsigned InWidth = Input->getType()->getScalarSizeInBits();
unsigned PtrWidth = DL.getPointerTypeSizeInBits(CE->getType());
if (PtrWidth < InWidth) {
Constant *Mask =
ConstantInt::get(CE->getContext(),
APInt::getLowBitsSet(InWidth, PtrWidth));
Input = ConstantExpr::getAnd(Input, Mask);
}
// Do a zext or trunc to get to the dest size.
return ConstantExpr::getIntegerCast(Input, DestTy, false);
}
}
return ConstantExpr::getCast(Opcode, Ops[0], DestTy);
case Instruction::IntToPtr:
// If the input is a ptrtoint, turn the pair into a ptr to ptr bitcast if
// the int size is >= the ptr size and the address spaces are the same.
// This requires knowing the width of a pointer, so it can't be done in
// ConstantExpr::getCast.
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Ops[0])) {
if (CE->getOpcode() == Instruction::PtrToInt) {
Constant *SrcPtr = CE->getOperand(0);
unsigned SrcPtrSize = DL.getPointerTypeSizeInBits(SrcPtr->getType());
unsigned MidIntSize = CE->getType()->getScalarSizeInBits();
if (MidIntSize >= SrcPtrSize) {
unsigned SrcAS = SrcPtr->getType()->getPointerAddressSpace();
if (SrcAS == DestTy->getPointerAddressSpace())
return FoldBitCast(CE->getOperand(0), DestTy, DL);
}
}
}
return ConstantExpr::getCast(Opcode, Ops[0], DestTy);
case Instruction::Trunc:
case Instruction::ZExt:
case Instruction::SExt:
case Instruction::FPTrunc:
case Instruction::FPExt:
case Instruction::UIToFP:
case Instruction::SIToFP:
case Instruction::FPToUI:
case Instruction::FPToSI:
case Instruction::AddrSpaceCast:
return ConstantExpr::getCast(Opcode, Ops[0], DestTy);
case Instruction::BitCast:
return FoldBitCast(Ops[0], DestTy, DL);
case Instruction::Select:
return ConstantExpr::getSelect(Ops[0], Ops[1], Ops[2]);
case Instruction::ExtractElement:
return ConstantExpr::getExtractElement(Ops[0], Ops[1]);
case Instruction::InsertElement:
return ConstantExpr::getInsertElement(Ops[0], Ops[1], Ops[2]);
case Instruction::ShuffleVector:
return ConstantExpr::getShuffleVector(Ops[0], Ops[1], Ops[2]);
case Instruction::GetElementPtr: {
Type *SrcTy = nullptr;
if (Constant *C = CastGEPIndices(SrcTy, Ops, DestTy, DL, TLI))
return C;
if (Constant *C = SymbolicallyEvaluateGEP(SrcTy, Ops, DestTy, DL, TLI))
return C;
return ConstantExpr::getGetElementPtr(SrcTy, Ops[0], Ops.slice(1));
}
}
}
/// Attempt to constant fold a compare
/// instruction (icmp/fcmp) with the specified operands. If it fails, it
/// returns a constant expression of the specified operands.
Constant *llvm::ConstantFoldCompareInstOperands(unsigned Predicate,
Constant *Ops0, Constant *Ops1,
const DataLayout &DL,
const TargetLibraryInfo *TLI) {
// fold: icmp (inttoptr x), null -> icmp x, 0
// fold: icmp (ptrtoint x), 0 -> icmp x, null
// fold: icmp (inttoptr x), (inttoptr y) -> icmp trunc/zext x, trunc/zext y
// fold: icmp (ptrtoint x), (ptrtoint y) -> icmp x, y
//
// FIXME: The following comment is out of data and the DataLayout is here now.
// ConstantExpr::getCompare cannot do this, because it doesn't have DL
// around to know if bit truncation is happening.
if (ConstantExpr *CE0 = dyn_cast<ConstantExpr>(Ops0)) {
if (Ops1->isNullValue()) {
if (CE0->getOpcode() == Instruction::IntToPtr) {
Type *IntPtrTy = DL.getIntPtrType(CE0->getType());
// Convert the integer value to the right size to ensure we get the
// proper extension or truncation.
Constant *C = ConstantExpr::getIntegerCast(CE0->getOperand(0),
IntPtrTy, false);
Constant *Null = Constant::getNullValue(C->getType());
return ConstantFoldCompareInstOperands(Predicate, C, Null, DL, TLI);
}
// Only do this transformation if the int is intptrty in size, otherwise
// there is a truncation or extension that we aren't modeling.
if (CE0->getOpcode() == Instruction::PtrToInt) {
Type *IntPtrTy = DL.getIntPtrType(CE0->getOperand(0)->getType());
if (CE0->getType() == IntPtrTy) {
Constant *C = CE0->getOperand(0);
Constant *Null = Constant::getNullValue(C->getType());
return ConstantFoldCompareInstOperands(Predicate, C, Null, DL, TLI);
}
}
}
if (ConstantExpr *CE1 = dyn_cast<ConstantExpr>(Ops1)) {
if (CE0->getOpcode() == CE1->getOpcode()) {
if (CE0->getOpcode() == Instruction::IntToPtr) {
Type *IntPtrTy = DL.getIntPtrType(CE0->getType());
// Convert the integer value to the right size to ensure we get the
// proper extension or truncation.
Constant *C0 = ConstantExpr::getIntegerCast(CE0->getOperand(0),
IntPtrTy, false);
Constant *C1 = ConstantExpr::getIntegerCast(CE1->getOperand(0),
IntPtrTy, false);
return ConstantFoldCompareInstOperands(Predicate, C0, C1, DL, TLI);
}
// Only do this transformation if the int is intptrty in size, otherwise
// there is a truncation or extension that we aren't modeling.
if (CE0->getOpcode() == Instruction::PtrToInt) {
Type *IntPtrTy = DL.getIntPtrType(CE0->getOperand(0)->getType());
if (CE0->getType() == IntPtrTy &&
CE0->getOperand(0)->getType() == CE1->getOperand(0)->getType()) {
return ConstantFoldCompareInstOperands(
Predicate, CE0->getOperand(0), CE1->getOperand(0), DL, TLI);
}
}
}
}
// icmp eq (or x, y), 0 -> (icmp eq x, 0) & (icmp eq y, 0)
// icmp ne (or x, y), 0 -> (icmp ne x, 0) | (icmp ne y, 0)
if ((Predicate == ICmpInst::ICMP_EQ || Predicate == ICmpInst::ICMP_NE) &&
CE0->getOpcode() == Instruction::Or && Ops1->isNullValue()) {
Constant *LHS = ConstantFoldCompareInstOperands(
Predicate, CE0->getOperand(0), Ops1, DL, TLI);
Constant *RHS = ConstantFoldCompareInstOperands(
Predicate, CE0->getOperand(1), Ops1, DL, TLI);
unsigned OpC =
Predicate == ICmpInst::ICMP_EQ ? Instruction::And : Instruction::Or;
Constant *Ops[] = { LHS, RHS };
return ConstantFoldInstOperands(OpC, LHS->getType(), Ops, DL, TLI);
}
}
return ConstantExpr::getCompare(Predicate, Ops0, Ops1);
}
/// Given a constant and a getelementptr constantexpr, return the constant value
/// being addressed by the constant expression, or null if something is funny
/// and we can't decide.
Constant *llvm::ConstantFoldLoadThroughGEPConstantExpr(Constant *C,
ConstantExpr *CE) {
if (!CE->getOperand(1)->isNullValue())
return nullptr; // Do not allow stepping over the value!
// Loop over all of the operands, tracking down which value we are
// addressing.
for (unsigned i = 2, e = CE->getNumOperands(); i != e; ++i) {
C = C->getAggregateElement(CE->getOperand(i));
if (!C)
return nullptr;
}
return C;
}
/// Given a constant and getelementptr indices (with an *implied* zero pointer
/// index that is not in the list), return the constant value being addressed by
/// a virtual load, or null if something is funny and we can't decide.
Constant *llvm::ConstantFoldLoadThroughGEPIndices(Constant *C,
ArrayRef<Constant*> Indices) {
// Loop over all of the operands, tracking down which value we are
// addressing.
for (unsigned i = 0, e = Indices.size(); i != e; ++i) {
C = C->getAggregateElement(Indices[i]);
if (!C)
return nullptr;
}
return C;
}
//===----------------------------------------------------------------------===//
// Constant Folding for Calls
//
/// Return true if it's even possible to fold a call to the specified function.
bool llvm::canConstantFoldCallTo(const Function *F) {
if (hlsl::CanConstantFoldCallTo(F)) // HLSL Change
return true;
switch (F->getIntrinsicID()) {
case Intrinsic::fabs:
case Intrinsic::minnum:
case Intrinsic::maxnum:
case Intrinsic::log:
case Intrinsic::log2:
case Intrinsic::log10:
case Intrinsic::exp:
case Intrinsic::exp2:
case Intrinsic::floor:
case Intrinsic::ceil:
case Intrinsic::sqrt:
case Intrinsic::sin:
case Intrinsic::cos:
case Intrinsic::pow:
case Intrinsic::powi:
case Intrinsic::bswap:
case Intrinsic::ctpop:
case Intrinsic::ctlz:
case Intrinsic::cttz:
case Intrinsic::fma:
case Intrinsic::fmuladd:
case Intrinsic::copysign:
case Intrinsic::round:
case Intrinsic::sadd_with_overflow:
case Intrinsic::uadd_with_overflow:
case Intrinsic::ssub_with_overflow:
case Intrinsic::usub_with_overflow:
case Intrinsic::smul_with_overflow:
case Intrinsic::umul_with_overflow:
case Intrinsic::convert_from_fp16:
case Intrinsic::convert_to_fp16:
#if 0 // HLSL Change - remove platform intrinsics
case Intrinsic::x86_sse_cvtss2si:
case Intrinsic::x86_sse_cvtss2si64:
case Intrinsic::x86_sse_cvttss2si:
case Intrinsic::x86_sse_cvttss2si64:
case Intrinsic::x86_sse2_cvtsd2si:
case Intrinsic::x86_sse2_cvtsd2si64:
case Intrinsic::x86_sse2_cvttsd2si:
case Intrinsic::x86_sse2_cvttsd2si64:
#endif // HLSL Change - remove platform intrinsics
return true;
default:
return false;
case 0: break;
}
if (!F->hasName())
return false;
StringRef Name = F->getName();
// In these cases, the check of the length is required. We don't want to
// return true for a name like "cos\0blah" which strcmp would return equal to
// "cos", but has length 8.
switch (Name[0]) {
default: return false;
case 'a':
return Name == "acos" || Name == "asin" || Name == "atan" || Name =="atan2";
case 'c':
return Name == "cos" || Name == "ceil" || Name == "cosf" || Name == "cosh";
case 'e':
return Name == "exp" || Name == "exp2";
case 'f':
return Name == "fabs" || Name == "fmod" || Name == "floor";
case 'l':
return Name == "log" || Name == "log10";
case 'p':
return Name == "pow";
case 's':
return Name == "sin" || Name == "sinh" || Name == "sqrt" ||
Name == "sinf" || Name == "sqrtf";
case 't':
return Name == "tan" || Name == "tanh";
}
}
static Constant *GetConstantFoldFPValue(double V, Type *Ty) {
if (Ty->isHalfTy()) {
APFloat APF(V);
bool unused;
APF.convert(APFloat::IEEEhalf, APFloat::rmNearestTiesToEven, &unused);
return ConstantFP::get(Ty->getContext(), APF);
}
if (Ty->isFloatTy())
return ConstantFP::get(Ty->getContext(), APFloat((float)V));
if (Ty->isDoubleTy())
return ConstantFP::get(Ty->getContext(), APFloat(V));
llvm_unreachable("Can only constant fold half/float/double");
}
namespace {
/// Clear the floating-point exception state.
static inline void llvm_fenv_clearexcept() {
#if defined(HAVE_FENV_H) && HAVE_DECL_FE_ALL_EXCEPT
feclearexcept(FE_ALL_EXCEPT);
#endif
errno = 0;
}
/// Test if a floating-point exception was raised.
static inline bool llvm_fenv_testexcept() {
int errno_val = errno;
if (errno_val == ERANGE || errno_val == EDOM)
return true;
#if defined(HAVE_FENV_H) && HAVE_DECL_FE_ALL_EXCEPT && HAVE_DECL_FE_INEXACT
if (fetestexcept(FE_ALL_EXCEPT & ~FE_INEXACT))
return true;
#endif
return false;
}
} // End namespace
// HLSL Change: changed calling convention of NativeFP to __cdecl and make non-static
Constant *llvm::ConstantFoldFP(double (__cdecl *NativeFP)(double), double V,
Type *Ty) {
llvm_fenv_clearexcept();
V = NativeFP(V);
if (llvm_fenv_testexcept()) {
llvm_fenv_clearexcept();
return nullptr;
}
return GetConstantFoldFPValue(V, Ty);
}
// HLSL Change: changed calling convention of NativeFP to __cdecl
static Constant *ConstantFoldBinaryFP(double (__cdecl *NativeFP)(double, double),
double V, double W, Type *Ty) {
llvm_fenv_clearexcept();
V = NativeFP(V, W);
if (llvm_fenv_testexcept()) {
llvm_fenv_clearexcept();
return nullptr;
}
return GetConstantFoldFPValue(V, Ty);
}
#if 0 // HLSL Change - remove platform intrinsics
/// Attempt to fold an SSE floating point to integer conversion of a constant
/// floating point. If roundTowardZero is false, the default IEEE rounding is
/// used (toward nearest, ties to even). This matches the behavior of the
/// non-truncating SSE instructions in the default rounding mode. The desired
/// integer type Ty is used to select how many bits are available for the
/// result. Returns null if the conversion cannot be performed, otherwise
/// returns the Constant value resulting from the conversion.
static Constant *ConstantFoldConvertToInt(const APFloat &Val,
bool roundTowardZero, Type *Ty) {
// All of these conversion intrinsics form an integer of at most 64bits.
unsigned ResultWidth = Ty->getIntegerBitWidth();
assert(ResultWidth <= 64 &&
"Can only constant fold conversions to 64 and 32 bit ints");
uint64_t UIntVal;
bool isExact = false;
APFloat::roundingMode mode = roundTowardZero? APFloat::rmTowardZero
: APFloat::rmNearestTiesToEven;
APFloat::opStatus status = Val.convertToInteger(&UIntVal, ResultWidth,
/*isSigned=*/true, mode,
&isExact);
if (status != APFloat::opOK && status != APFloat::opInexact)
return nullptr;
return ConstantInt::get(Ty, UIntVal, /*isSigned=*/true);
}
#endif // HLSL Change Ends
// HLSL Change - make non-static.
double llvm::getValueAsDouble(ConstantFP *Op) {
Type *Ty = Op->getType();
if (Ty->isFloatTy())
return Op->getValueAPF().convertToFloat();
if (Ty->isDoubleTy())
return Op->getValueAPF().convertToDouble();
bool unused;
APFloat APF = Op->getValueAPF();
APF.convert(APFloat::IEEEdouble, APFloat::rmNearestTiesToEven, &unused);
return APF.convertToDouble();
}
static Constant *ConstantFoldScalarCall(StringRef Name, unsigned IntrinsicID,
Type *Ty, ArrayRef<Constant *> Operands,
const TargetLibraryInfo *TLI) {
if (Constant *C = hlsl::ConstantFoldScalarCall(Name, Ty, Operands)) // HLSL Change - Try hlsl constant folding first.
return C;
if (Operands.size() == 1) {
if (ConstantFP *Op = dyn_cast<ConstantFP>(Operands[0])) {
if (IntrinsicID == Intrinsic::convert_to_fp16) {
APFloat Val(Op->getValueAPF());
bool lost = false;
Val.convert(APFloat::IEEEhalf, APFloat::rmNearestTiesToEven, &lost);
return ConstantInt::get(Ty->getContext(), Val.bitcastToAPInt());
}
if (!Ty->isHalfTy() && !Ty->isFloatTy() && !Ty->isDoubleTy())
return nullptr;
if (IntrinsicID == Intrinsic::round) {
APFloat V = Op->getValueAPF();
V.roundToIntegral(APFloat::rmNearestTiesToAway);
return ConstantFP::get(Ty->getContext(), V);
}
/// We only fold functions with finite arguments. Folding NaN and inf is
/// likely to be aborted with an exception anyway, and some host libms
/// have known errors raising exceptions.
if (Op->getValueAPF().isNaN() || Op->getValueAPF().isInfinity())
return nullptr;
/// Currently APFloat versions of these functions do not exist, so we use
/// the host native double versions. Float versions are not called
/// directly but for all these it is true (float)(f((double)arg)) ==
/// f(arg). Long double not supported yet.
double V = getValueAsDouble(Op);
switch (IntrinsicID) {
default: break;
case Intrinsic::fabs:
return ConstantFoldFP(fabs, V, Ty);
case Intrinsic::log2:
return ConstantFoldFP(Log2, V, Ty);
case Intrinsic::log:
return ConstantFoldFP(log, V, Ty);
case Intrinsic::log10:
return ConstantFoldFP(log10, V, Ty);
case Intrinsic::exp:
return ConstantFoldFP(exp, V, Ty);
case Intrinsic::exp2:
return ConstantFoldFP(exp2, V, Ty);
case Intrinsic::floor:
return ConstantFoldFP(floor, V, Ty);
case Intrinsic::ceil:
return ConstantFoldFP(ceil, V, Ty);
case Intrinsic::sin:
return ConstantFoldFP(sin, V, Ty);
case Intrinsic::cos:
return ConstantFoldFP(cos, V, Ty);
}
if (!TLI)
return nullptr;
switch (Name[0]) {
case 'a':
if (Name == "acos" && TLI->has(LibFunc::acos))
return ConstantFoldFP(acos, V, Ty);
else if (Name == "asin" && TLI->has(LibFunc::asin))
return ConstantFoldFP(asin, V, Ty);
else if (Name == "atan" && TLI->has(LibFunc::atan))
return ConstantFoldFP(atan, V, Ty);
break;
case 'c':
if (Name == "ceil" && TLI->has(LibFunc::ceil))
return ConstantFoldFP(ceil, V, Ty);
else if (Name == "cos" && TLI->has(LibFunc::cos))
return ConstantFoldFP(cos, V, Ty);
else if (Name == "cosh" && TLI->has(LibFunc::cosh))
return ConstantFoldFP(cosh, V, Ty);
else if (Name == "cosf" && TLI->has(LibFunc::cosf))
return ConstantFoldFP(cos, V, Ty);
break;
case 'e':
if (Name == "exp" && TLI->has(LibFunc::exp))
return ConstantFoldFP(exp, V, Ty);
if (Name == "exp2" && TLI->has(LibFunc::exp2)) {
// Constant fold exp2(x) as pow(2,x) in case the host doesn't have a
// C99 library.
return ConstantFoldBinaryFP(pow, 2.0, V, Ty);
}
break;
case 'f':
if (Name == "fabs" && TLI->has(LibFunc::fabs))
return ConstantFoldFP(fabs, V, Ty);
else if (Name == "floor" && TLI->has(LibFunc::floor))
return ConstantFoldFP(floor, V, Ty);
break;
case 'l':
if (Name == "log" && V > 0 && TLI->has(LibFunc::log))
return ConstantFoldFP(log, V, Ty);
else if (Name == "log10" && V > 0 && TLI->has(LibFunc::log10))
return ConstantFoldFP(log10, V, Ty);
else if (IntrinsicID == Intrinsic::sqrt &&
(Ty->isHalfTy() || Ty->isFloatTy() || Ty->isDoubleTy())) {
if (V >= -0.0)
return ConstantFoldFP(sqrt, V, Ty);
else {
// Unlike the sqrt definitions in C/C++, POSIX, and IEEE-754 - which
// all guarantee or favor returning NaN - the square root of a
// negative number is not defined for the LLVM sqrt intrinsic.
// This is because the intrinsic should only be emitted in place of
// libm's sqrt function when using "no-nans-fp-math".
return UndefValue::get(Ty);
}
}
break;
case 's':
if (Name == "sin" && TLI->has(LibFunc::sin))
return ConstantFoldFP(sin, V, Ty);
else if (Name == "sinh" && TLI->has(LibFunc::sinh))
return ConstantFoldFP(sinh, V, Ty);
else if (Name == "sqrt" && V >= 0 && TLI->has(LibFunc::sqrt))
return ConstantFoldFP(sqrt, V, Ty);
else if (Name == "sqrtf" && V >= 0 && TLI->has(LibFunc::sqrtf))
return ConstantFoldFP(sqrt, V, Ty);
else if (Name == "sinf" && TLI->has(LibFunc::sinf))
return ConstantFoldFP(sin, V, Ty);
break;
case 't':
if (Name == "tan" && TLI->has(LibFunc::tan))
return ConstantFoldFP(tan, V, Ty);
else if (Name == "tanh" && TLI->has(LibFunc::tanh))
return ConstantFoldFP(tanh, V, Ty);
break;
default:
break;
}
return nullptr;
}
if (ConstantInt *Op = dyn_cast<ConstantInt>(Operands[0])) {
switch (IntrinsicID) {
case Intrinsic::bswap:
return ConstantInt::get(Ty->getContext(), Op->getValue().byteSwap());
case Intrinsic::ctpop:
return ConstantInt::get(Ty, Op->getValue().countPopulation());
case Intrinsic::convert_from_fp16: {
APFloat Val(APFloat::IEEEhalf, Op->getValue());
bool lost = false;
APFloat::opStatus status = Val.convert(
Ty->getFltSemantics(), APFloat::rmNearestTiesToEven, &lost);
// Conversion is always precise.
(void)status;
assert(status == APFloat::opOK && !lost &&
"Precision lost during fp16 constfolding");
return ConstantFP::get(Ty->getContext(), Val);
}
default:
return nullptr;
}
}
#if 0 // HLSL Change - remove platform intrinsics
// Support ConstantVector in case we have an Undef in the top.
if (isa<ConstantVector>(Operands[0]) ||
isa<ConstantDataVector>(Operands[0])) {
Constant *Op = cast<Constant>(Operands[0]);
switch (IntrinsicID) {
default: break;
case Intrinsic::x86_sse_cvtss2si:
case Intrinsic::x86_sse_cvtss2si64:
case Intrinsic::x86_sse2_cvtsd2si:
case Intrinsic::x86_sse2_cvtsd2si64:
if (ConstantFP *FPOp =
dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U)))
return ConstantFoldConvertToInt(FPOp->getValueAPF(),
/*roundTowardZero=*/false, Ty);
case Intrinsic::x86_sse_cvttss2si:
case Intrinsic::x86_sse_cvttss2si64:
case Intrinsic::x86_sse2_cvttsd2si:
case Intrinsic::x86_sse2_cvttsd2si64:
if (ConstantFP *FPOp =
dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U)))
return ConstantFoldConvertToInt(FPOp->getValueAPF(),
/*roundTowardZero=*/true, Ty);
}
}
#endif // HLSL Change - remove platform intrinsics
if (isa<UndefValue>(Operands[0])) {
if (IntrinsicID == Intrinsic::bswap)
return Operands[0];
return nullptr;
}
return nullptr;
}
if (Operands.size() == 2) {
if (ConstantFP *Op1 = dyn_cast<ConstantFP>(Operands[0])) {
if (!Ty->isHalfTy() && !Ty->isFloatTy() && !Ty->isDoubleTy())
return nullptr;
double Op1V = getValueAsDouble(Op1);
if (ConstantFP *Op2 = dyn_cast<ConstantFP>(Operands[1])) {
if (Op2->getType() != Op1->getType())
return nullptr;
double Op2V = getValueAsDouble(Op2);
if (IntrinsicID == Intrinsic::pow) {
return ConstantFoldBinaryFP(pow, Op1V, Op2V, Ty);
}
if (IntrinsicID == Intrinsic::copysign) {
APFloat V1 = Op1->getValueAPF();
APFloat V2 = Op2->getValueAPF();
V1.copySign(V2);
return ConstantFP::get(Ty->getContext(), V1);
}
if (IntrinsicID == Intrinsic::minnum) {
const APFloat &C1 = Op1->getValueAPF();
const APFloat &C2 = Op2->getValueAPF();
return ConstantFP::get(Ty->getContext(), minnum(C1, C2));
}
if (IntrinsicID == Intrinsic::maxnum) {
const APFloat &C1 = Op1->getValueAPF();
const APFloat &C2 = Op2->getValueAPF();
return ConstantFP::get(Ty->getContext(), maxnum(C1, C2));
}
if (!TLI)
return nullptr;
if (Name == "pow" && TLI->has(LibFunc::pow))
return ConstantFoldBinaryFP(pow, Op1V, Op2V, Ty);
if (Name == "fmod" && TLI->has(LibFunc::fmod))
return ConstantFoldBinaryFP(fmod, Op1V, Op2V, Ty);
if (Name == "atan2" && TLI->has(LibFunc::atan2))
return ConstantFoldBinaryFP(atan2, Op1V, Op2V, Ty);
} else if (ConstantInt *Op2C = dyn_cast<ConstantInt>(Operands[1])) {
if (IntrinsicID == Intrinsic::powi && Ty->isHalfTy())
return ConstantFP::get(Ty->getContext(),
APFloat((float)std::pow((float)Op1V,
(int)Op2C->getZExtValue())));
if (IntrinsicID == Intrinsic::powi && Ty->isFloatTy())
return ConstantFP::get(Ty->getContext(),
APFloat((float)std::pow((float)Op1V,
(int)Op2C->getZExtValue())));
if (IntrinsicID == Intrinsic::powi && Ty->isDoubleTy())
return ConstantFP::get(Ty->getContext(),
APFloat((double)std::pow((double)Op1V,
(int)Op2C->getZExtValue())));
}
return nullptr;
}
if (ConstantInt *Op1 = dyn_cast<ConstantInt>(Operands[0])) {
if (ConstantInt *Op2 = dyn_cast<ConstantInt>(Operands[1])) {
switch (IntrinsicID) {
default: break;
case Intrinsic::sadd_with_overflow:
case Intrinsic::uadd_with_overflow:
case Intrinsic::ssub_with_overflow:
case Intrinsic::usub_with_overflow:
case Intrinsic::smul_with_overflow:
case Intrinsic::umul_with_overflow: {
APInt Res;
bool Overflow;
switch (IntrinsicID) {
default: llvm_unreachable("Invalid case");
case Intrinsic::sadd_with_overflow:
Res = Op1->getValue().sadd_ov(Op2->getValue(), Overflow);
break;
case Intrinsic::uadd_with_overflow:
Res = Op1->getValue().uadd_ov(Op2->getValue(), Overflow);
break;
case Intrinsic::ssub_with_overflow:
Res = Op1->getValue().ssub_ov(Op2->getValue(), Overflow);
break;
case Intrinsic::usub_with_overflow:
Res = Op1->getValue().usub_ov(Op2->getValue(), Overflow);
break;
case Intrinsic::smul_with_overflow:
Res = Op1->getValue().smul_ov(Op2->getValue(), Overflow);
break;
case Intrinsic::umul_with_overflow:
Res = Op1->getValue().umul_ov(Op2->getValue(), Overflow);
break;
}
Constant *Ops[] = {
ConstantInt::get(Ty->getContext(), Res),
ConstantInt::get(Type::getInt1Ty(Ty->getContext()), Overflow)
};
return ConstantStruct::get(cast<StructType>(Ty), Ops);
}
case Intrinsic::cttz:
if (Op2->isOne() && Op1->isZero()) // cttz(0, 1) is undef.
return UndefValue::get(Ty);
return ConstantInt::get(Ty, Op1->getValue().countTrailingZeros());
case Intrinsic::ctlz:
if (Op2->isOne() && Op1->isZero()) // ctlz(0, 1) is undef.
return UndefValue::get(Ty);
return ConstantInt::get(Ty, Op1->getValue().countLeadingZeros());
}
}
return nullptr;
}
return nullptr;
}
if (Operands.size() != 3)
return nullptr;
if (const ConstantFP *Op1 = dyn_cast<ConstantFP>(Operands[0])) {
if (const ConstantFP *Op2 = dyn_cast<ConstantFP>(Operands[1])) {
if (const ConstantFP *Op3 = dyn_cast<ConstantFP>(Operands[2])) {
switch (IntrinsicID) {
default: break;
case Intrinsic::fma:
case Intrinsic::fmuladd: {
APFloat V = Op1->getValueAPF();
APFloat::opStatus s = V.fusedMultiplyAdd(Op2->getValueAPF(),
Op3->getValueAPF(),
APFloat::rmNearestTiesToEven);
if (s != APFloat::opInvalidOp)
return ConstantFP::get(Ty->getContext(), V);
return nullptr;
}
}
}
}
}
return nullptr;
}
static Constant *ConstantFoldVectorCall(StringRef Name, unsigned IntrinsicID,
VectorType *VTy,
ArrayRef<Constant *> Operands,
const TargetLibraryInfo *TLI) {
SmallVector<Constant *, 4> Result(VTy->getNumElements());
SmallVector<Constant *, 4> Lane(Operands.size());
Type *Ty = VTy->getElementType();
for (unsigned I = 0, E = VTy->getNumElements(); I != E; ++I) {
// Gather a column of constants.
for (unsigned J = 0, JE = Operands.size(); J != JE; ++J) {
Constant *Agg = Operands[J]->getAggregateElement(I);
if (!Agg)
return nullptr;
Lane[J] = Agg;
}
// Use the regular scalar folding to simplify this column.
Constant *Folded = ConstantFoldScalarCall(Name, IntrinsicID, Ty, Lane, TLI);
if (!Folded)
return nullptr;
Result[I] = Folded;
}
return ConstantVector::get(Result);
}
/// Attempt to constant fold a call to the specified function
/// with the specified arguments, returning null if unsuccessful.
Constant *
llvm::ConstantFoldCall(Function *F, ArrayRef<Constant *> Operands,
const TargetLibraryInfo *TLI) {
if (!F->hasName())
return nullptr;
StringRef Name = F->getName();
Type *Ty = F->getReturnType();
if (VectorType *VTy = dyn_cast<VectorType>(Ty))
return ConstantFoldVectorCall(Name, F->getIntrinsicID(), VTy, Operands, TLI);
return ConstantFoldScalarCall(Name, F->getIntrinsicID(), Ty, Operands, TLI);
}
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/Analysis/AliasAnalysisCounter.cpp | //===- AliasAnalysisCounter.cpp - Alias Analysis Query Counter ------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements a pass which can be used to count how many alias queries
// are being made and how the alias analysis implementation being used responds.
//
//===----------------------------------------------------------------------===//
#include "llvm/Analysis/Passes.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/IR/Module.h"
#include "llvm/Pass.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
static cl::opt<bool>
PrintAll("count-aa-print-all-queries", cl::ReallyHidden, cl::init(true));
static cl::opt<bool>
PrintAllFailures("count-aa-print-all-failed-queries", cl::ReallyHidden);
namespace {
class AliasAnalysisCounter : public ModulePass, public AliasAnalysis {
unsigned No, May, Partial, Must;
unsigned NoMR, JustRef, JustMod, MR;
Module *M;
public:
static char ID; // Class identification, replacement for typeinfo
AliasAnalysisCounter() : ModulePass(ID) {
initializeAliasAnalysisCounterPass(*PassRegistry::getPassRegistry());
No = May = Partial = Must = 0;
NoMR = JustRef = JustMod = MR = 0;
}
void printLine(const char *Desc, unsigned Val, unsigned Sum) {
errs() << " " << Val << " " << Desc << " responses ("
<< Val*100/Sum << "%)\n";
}
~AliasAnalysisCounter() override {
unsigned AASum = No+May+Partial+Must;
unsigned MRSum = NoMR+JustRef+JustMod+MR;
if (AASum + MRSum) { // Print a report if any counted queries occurred...
errs() << "\n===== Alias Analysis Counter Report =====\n"
<< " Analysis counted:\n"
<< " " << AASum << " Total Alias Queries Performed\n";
if (AASum) {
printLine("no alias", No, AASum);
printLine("may alias", May, AASum);
printLine("partial alias", Partial, AASum);
printLine("must alias", Must, AASum);
errs() << " Alias Analysis Counter Summary: " << No*100/AASum << "%/"
<< May*100/AASum << "%/"
<< Partial*100/AASum << "%/"
<< Must*100/AASum<<"%\n\n";
}
errs() << " " << MRSum << " Total Mod/Ref Queries Performed\n";
if (MRSum) {
printLine("no mod/ref", NoMR, MRSum);
printLine("ref", JustRef, MRSum);
printLine("mod", JustMod, MRSum);
printLine("mod/ref", MR, MRSum);
errs() << " Mod/Ref Analysis Counter Summary: " <<NoMR*100/MRSum
<< "%/" << JustRef*100/MRSum << "%/" << JustMod*100/MRSum
<< "%/" << MR*100/MRSum <<"%\n\n";
}
}
}
bool runOnModule(Module &M) override {
this->M = &M;
InitializeAliasAnalysis(this, &M.getDataLayout());
return false;
}
void getAnalysisUsage(AnalysisUsage &AU) const override {
AliasAnalysis::getAnalysisUsage(AU);
AU.addRequired<AliasAnalysis>();
AU.setPreservesAll();
}
/// getAdjustedAnalysisPointer - This method is used when a pass implements
/// an analysis interface through multiple inheritance. If needed, it
/// should override this to adjust the this pointer as needed for the
/// specified pass info.
void *getAdjustedAnalysisPointer(AnalysisID PI) override {
if (PI == &AliasAnalysis::ID)
return (AliasAnalysis*)this;
return this;
}
// FIXME: We could count these too...
bool pointsToConstantMemory(const MemoryLocation &Loc,
bool OrLocal) override {
return getAnalysis<AliasAnalysis>().pointsToConstantMemory(Loc, OrLocal);
}
// Forwarding functions: just delegate to a real AA implementation, counting
// the number of responses...
AliasResult alias(const MemoryLocation &LocA,
const MemoryLocation &LocB) override;
ModRefResult getModRefInfo(ImmutableCallSite CS,
const MemoryLocation &Loc) override;
ModRefResult getModRefInfo(ImmutableCallSite CS1,
ImmutableCallSite CS2) override {
return AliasAnalysis::getModRefInfo(CS1,CS2);
}
};
}
char AliasAnalysisCounter::ID = 0;
INITIALIZE_AG_PASS(AliasAnalysisCounter, AliasAnalysis, "count-aa",
"Count Alias Analysis Query Responses", false, true, false)
ModulePass *llvm::createAliasAnalysisCounterPass() {
return new AliasAnalysisCounter();
}
AliasResult AliasAnalysisCounter::alias(const MemoryLocation &LocA,
const MemoryLocation &LocB) {
AliasResult R = getAnalysis<AliasAnalysis>().alias(LocA, LocB);
const char *AliasString = nullptr;
switch (R) {
case NoAlias: No++; AliasString = "No alias"; break;
case MayAlias: May++; AliasString = "May alias"; break;
case PartialAlias: Partial++; AliasString = "Partial alias"; break;
case MustAlias: Must++; AliasString = "Must alias"; break;
}
if (PrintAll || (PrintAllFailures && R == MayAlias)) {
errs() << AliasString << ":\t";
errs() << "[" << LocA.Size << "B] ";
LocA.Ptr->printAsOperand(errs(), true, M);
errs() << ", ";
errs() << "[" << LocB.Size << "B] ";
LocB.Ptr->printAsOperand(errs(), true, M);
errs() << "\n";
}
return R;
}
AliasAnalysis::ModRefResult
AliasAnalysisCounter::getModRefInfo(ImmutableCallSite CS,
const MemoryLocation &Loc) {
ModRefResult R = getAnalysis<AliasAnalysis>().getModRefInfo(CS, Loc);
const char *MRString = nullptr;
switch (R) {
case NoModRef: NoMR++; MRString = "NoModRef"; break;
case Ref: JustRef++; MRString = "JustRef"; break;
case Mod: JustMod++; MRString = "JustMod"; break;
case ModRef: MR++; MRString = "ModRef"; break;
}
if (PrintAll || (PrintAllFailures && R == ModRef)) {
errs() << MRString << ": Ptr: ";
errs() << "[" << Loc.Size << "B] ";
Loc.Ptr->printAsOperand(errs(), true, M);
errs() << "\t<->" << *CS.getInstruction() << '\n';
}
return R;
}
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/Analysis/IVUsers.cpp | //===- IVUsers.cpp - Induction Variable Users -------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements bookkeeping for "interesting" users of expressions
// computed from induction variables.
//
//===----------------------------------------------------------------------===//
#include "llvm/ADT/STLExtras.h"
#include "llvm/Analysis/AssumptionCache.h"
#include "llvm/Analysis/CodeMetrics.h"
#include "llvm/Analysis/IVUsers.h"
#include "llvm/Analysis/LoopPass.h"
#include "llvm/Analysis/ScalarEvolutionExpressions.h"
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Dominators.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/Type.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
#include <algorithm>
using namespace llvm;
#define DEBUG_TYPE "iv-users"
char IVUsers::ID = 0;
INITIALIZE_PASS_BEGIN(IVUsers, "iv-users",
"Induction Variable Users", false, true)
INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
INITIALIZE_PASS_DEPENDENCY(ScalarEvolution)
INITIALIZE_PASS_END(IVUsers, "iv-users",
"Induction Variable Users", false, true)
Pass *llvm::createIVUsersPass() {
return new IVUsers();
}
/// isInteresting - Test whether the given expression is "interesting" when
/// used by the given expression, within the context of analyzing the
/// given loop.
static bool isInteresting(const SCEV *S, const Instruction *I, const Loop *L,
ScalarEvolution *SE, LoopInfo *LI) {
// An addrec is interesting if it's affine or if it has an interesting start.
if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) {
// Keep things simple. Don't touch loop-variant strides unless they're
// only used outside the loop and we can simplify them.
if (AR->getLoop() == L)
return AR->isAffine() ||
(!L->contains(I) &&
SE->getSCEVAtScope(AR, LI->getLoopFor(I->getParent())) != AR);
// Otherwise recurse to see if the start value is interesting, and that
// the step value is not interesting, since we don't yet know how to
// do effective SCEV expansions for addrecs with interesting steps.
return isInteresting(AR->getStart(), I, L, SE, LI) &&
!isInteresting(AR->getStepRecurrence(*SE), I, L, SE, LI);
}
// An add is interesting if exactly one of its operands is interesting.
if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) {
bool AnyInterestingYet = false;
for (SCEVAddExpr::op_iterator OI = Add->op_begin(), OE = Add->op_end();
OI != OE; ++OI)
if (isInteresting(*OI, I, L, SE, LI)) {
if (AnyInterestingYet)
return false;
AnyInterestingYet = true;
}
return AnyInterestingYet;
}
// Nothing else is interesting here.
return false;
}
/// Return true if all loop headers that dominate this block are in simplified
/// form.
static bool isSimplifiedLoopNest(BasicBlock *BB, const DominatorTree *DT,
const LoopInfo *LI,
SmallPtrSetImpl<Loop*> &SimpleLoopNests) {
Loop *NearestLoop = nullptr;
for (DomTreeNode *Rung = DT->getNode(BB);
Rung; Rung = Rung->getIDom()) {
BasicBlock *DomBB = Rung->getBlock();
Loop *DomLoop = LI->getLoopFor(DomBB);
if (DomLoop && DomLoop->getHeader() == DomBB) {
// If the domtree walk reaches a loop with no preheader, return false.
if (!DomLoop->isLoopSimplifyForm())
return false;
// If we have already checked this loop nest, stop checking.
if (SimpleLoopNests.count(DomLoop))
break;
// If we have not already checked this loop nest, remember the loop
// header nearest to BB. The nearest loop may not contain BB.
if (!NearestLoop)
NearestLoop = DomLoop;
}
}
if (NearestLoop)
SimpleLoopNests.insert(NearestLoop);
return true;
}
/// AddUsersImpl - Inspect the specified instruction. If it is a
/// reducible SCEV, recursively add its users to the IVUsesByStride set and
/// return true. Otherwise, return false.
bool IVUsers::AddUsersImpl(Instruction *I,
SmallPtrSetImpl<Loop*> &SimpleLoopNests) {
const DataLayout &DL = I->getModule()->getDataLayout();
// Add this IV user to the Processed set before returning false to ensure that
// all IV users are members of the set. See IVUsers::isIVUserOrOperand.
if (!Processed.insert(I).second)
return true; // Instruction already handled.
if (!SE->isSCEVable(I->getType()))
return false; // Void and FP expressions cannot be reduced.
// IVUsers is used by LSR which assumes that all SCEV expressions are safe to
// pass to SCEVExpander. Expressions are not safe to expand if they represent
// operations that are not safe to speculate, namely integer division.
if (!isa<PHINode>(I) && !isSafeToSpeculativelyExecute(I))
return false;
// LSR is not APInt clean, do not touch integers bigger than 64-bits.
// Also avoid creating IVs of non-native types. For example, we don't want a
// 64-bit IV in 32-bit code just because the loop has one 64-bit cast.
uint64_t Width = SE->getTypeSizeInBits(I->getType());
if (Width > 64 || !DL.isLegalInteger(Width))
return false;
// Don't attempt to promote ephemeral values to indvars. They will be removed
// later anyway.
if (EphValues.count(I))
return false;
// Get the symbolic expression for this instruction.
const SCEV *ISE = SE->getSCEV(I);
// If we've come to an uninteresting expression, stop the traversal and
// call this a user.
if (!isInteresting(ISE, I, L, SE, LI))
return false;
SmallPtrSet<Instruction *, 4> UniqueUsers;
for (Use &U : I->uses()) {
Instruction *User = cast<Instruction>(U.getUser());
if (!UniqueUsers.insert(User).second)
continue;
// Do not infinitely recurse on PHI nodes.
if (isa<PHINode>(User) && Processed.count(User))
continue;
// Only consider IVUsers that are dominated by simplified loop
// headers. Otherwise, SCEVExpander will crash.
BasicBlock *UseBB = User->getParent();
// A phi's use is live out of its predecessor block.
if (PHINode *PHI = dyn_cast<PHINode>(User)) {
unsigned OperandNo = U.getOperandNo();
unsigned ValNo = PHINode::getIncomingValueNumForOperand(OperandNo);
UseBB = PHI->getIncomingBlock(ValNo);
}
if (!isSimplifiedLoopNest(UseBB, DT, LI, SimpleLoopNests))
return false;
// Descend recursively, but not into PHI nodes outside the current loop.
// It's important to see the entire expression outside the loop to get
// choices that depend on addressing mode use right, although we won't
// consider references outside the loop in all cases.
// If User is already in Processed, we don't want to recurse into it again,
// but do want to record a second reference in the same instruction.
bool AddUserToIVUsers = false;
if (LI->getLoopFor(User->getParent()) != L) {
if (isa<PHINode>(User) || Processed.count(User) ||
!AddUsersImpl(User, SimpleLoopNests)) {
DEBUG(dbgs() << "FOUND USER in other loop: " << *User << '\n'
<< " OF SCEV: " << *ISE << '\n');
AddUserToIVUsers = true;
}
} else if (Processed.count(User) || !AddUsersImpl(User, SimpleLoopNests)) {
DEBUG(dbgs() << "FOUND USER: " << *User << '\n'
<< " OF SCEV: " << *ISE << '\n');
AddUserToIVUsers = true;
}
if (AddUserToIVUsers) {
// Okay, we found a user that we cannot reduce.
IVStrideUse &NewUse = AddUser(User, I);
// Autodetect the post-inc loop set, populating NewUse.PostIncLoops.
// The regular return value here is discarded; instead of recording
// it, we just recompute it when we need it.
const SCEV *OriginalISE = ISE;
ISE = TransformForPostIncUse(NormalizeAutodetect,
ISE, User, I,
NewUse.PostIncLoops,
*SE, *DT);
// PostIncNormalization effectively simplifies the expression under
// pre-increment assumptions. Those assumptions (no wrapping) might not
// hold for the post-inc value. Catch such cases by making sure the
// transformation is invertible.
if (OriginalISE != ISE) {
const SCEV *DenormalizedISE =
TransformForPostIncUse(Denormalize, ISE, User, I,
NewUse.PostIncLoops, *SE, *DT);
// If we normalized the expression, but denormalization doesn't give the
// original one, discard this user.
if (OriginalISE != DenormalizedISE) {
DEBUG(dbgs() << " DISCARDING (NORMALIZATION ISN'T INVERTIBLE): "
<< *ISE << '\n');
IVUses.pop_back();
return false;
}
}
DEBUG(if (SE->getSCEV(I) != ISE)
dbgs() << " NORMALIZED TO: " << *ISE << '\n');
}
}
return true;
}
bool IVUsers::AddUsersIfInteresting(Instruction *I) {
// SCEVExpander can only handle users that are dominated by simplified loop
// entries. Keep track of all loops that are only dominated by other simple
// loops so we don't traverse the domtree for each user.
SmallPtrSet<Loop*,16> SimpleLoopNests;
return AddUsersImpl(I, SimpleLoopNests);
}
IVStrideUse &IVUsers::AddUser(Instruction *User, Value *Operand) {
IVUses.push_back(new IVStrideUse(this, User, Operand));
return IVUses.back();
}
IVUsers::IVUsers()
: LoopPass(ID) {
initializeIVUsersPass(*PassRegistry::getPassRegistry());
}
void IVUsers::getAnalysisUsage(AnalysisUsage &AU) const {
AU.addRequired<AssumptionCacheTracker>();
AU.addRequired<LoopInfoWrapperPass>();
AU.addRequired<DominatorTreeWrapperPass>();
AU.addRequired<ScalarEvolution>();
AU.setPreservesAll();
}
bool IVUsers::runOnLoop(Loop *l, LPPassManager &LPM) {
L = l;
AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(
*L->getHeader()->getParent());
LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
SE = &getAnalysis<ScalarEvolution>();
// Collect ephemeral values so that AddUsersIfInteresting skips them.
EphValues.clear();
CodeMetrics::collectEphemeralValues(L, AC, EphValues);
// Find all uses of induction variables in this loop, and categorize
// them by stride. Start by finding all of the PHI nodes in the header for
// this loop. If they are induction variables, inspect their uses.
for (BasicBlock::iterator I = L->getHeader()->begin(); isa<PHINode>(I); ++I)
(void)AddUsersIfInteresting(I);
return false;
}
void IVUsers::print(raw_ostream &OS, const Module *M) const {
OS << "IV Users for loop ";
L->getHeader()->printAsOperand(OS, false);
if (SE->hasLoopInvariantBackedgeTakenCount(L)) {
OS << " with backedge-taken count "
<< *SE->getBackedgeTakenCount(L);
}
OS << ":\n";
for (ilist<IVStrideUse>::const_iterator UI = IVUses.begin(),
E = IVUses.end(); UI != E; ++UI) {
OS << " ";
UI->getOperandValToReplace()->printAsOperand(OS, false);
OS << " = " << *getReplacementExpr(*UI);
for (PostIncLoopSet::const_iterator
I = UI->PostIncLoops.begin(),
E = UI->PostIncLoops.end(); I != E; ++I) {
OS << " (post-inc with loop ";
(*I)->getHeader()->printAsOperand(OS, false);
OS << ")";
}
OS << " in ";
if (UI->getUser())
UI->getUser()->print(OS);
else
OS << "Printing <null> User";
OS << '\n';
}
}
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
void IVUsers::dump() const {
print(dbgs());
}
#endif
void IVUsers::releaseMemory() {
Processed.clear();
IVUses.clear();
}
/// getReplacementExpr - Return a SCEV expression which computes the
/// value of the OperandValToReplace.
const SCEV *IVUsers::getReplacementExpr(const IVStrideUse &IU) const {
return SE->getSCEV(IU.getOperandValToReplace());
}
/// getExpr - Return the expression for the use.
const SCEV *IVUsers::getExpr(const IVStrideUse &IU) const {
return
TransformForPostIncUse(Normalize, getReplacementExpr(IU),
IU.getUser(), IU.getOperandValToReplace(),
const_cast<PostIncLoopSet &>(IU.getPostIncLoops()),
*SE, *DT);
}
static const SCEVAddRecExpr *findAddRecForLoop(const SCEV *S, const Loop *L) {
if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) {
if (AR->getLoop() == L)
return AR;
return findAddRecForLoop(AR->getStart(), L);
}
if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) {
for (SCEVAddExpr::op_iterator I = Add->op_begin(), E = Add->op_end();
I != E; ++I)
if (const SCEVAddRecExpr *AR = findAddRecForLoop(*I, L))
return AR;
return nullptr;
}
return nullptr;
}
const SCEV *IVUsers::getStride(const IVStrideUse &IU, const Loop *L) const {
if (const SCEVAddRecExpr *AR = findAddRecForLoop(getExpr(IU), L))
return AR->getStepRecurrence(*SE);
return nullptr;
}
void IVStrideUse::transformToPostInc(const Loop *L) {
PostIncLoops.insert(L);
}
void IVStrideUse::deleted() {
// Remove this user from the list.
Parent->Processed.erase(this->getUser());
Parent->IVUses.erase(this);
// this now dangles!
}
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/Analysis/LazyCallGraph.cpp | //===- LazyCallGraph.cpp - Analysis of a Module's call graph --------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "llvm/Analysis/LazyCallGraph.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/IR/CallSite.h"
#include "llvm/IR/InstVisitor.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/PassManager.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
#define DEBUG_TYPE "lcg"
static void findCallees(
SmallVectorImpl<Constant *> &Worklist, SmallPtrSetImpl<Constant *> &Visited,
SmallVectorImpl<PointerUnion<Function *, LazyCallGraph::Node *>> &Callees,
DenseMap<Function *, size_t> &CalleeIndexMap) {
while (!Worklist.empty()) {
Constant *C = Worklist.pop_back_val();
if (Function *F = dyn_cast<Function>(C)) {
// Note that we consider *any* function with a definition to be a viable
// edge. Even if the function's definition is subject to replacement by
// some other module (say, a weak definition) there may still be
// optimizations which essentially speculate based on the definition and
// a way to check that the specific definition is in fact the one being
// used. For example, this could be done by moving the weak definition to
// a strong (internal) definition and making the weak definition be an
// alias. Then a test of the address of the weak function against the new
// strong definition's address would be an effective way to determine the
// safety of optimizing a direct call edge.
if (!F->isDeclaration() &&
CalleeIndexMap.insert(std::make_pair(F, Callees.size())).second) {
DEBUG(dbgs() << " Added callable function: " << F->getName()
<< "\n");
Callees.push_back(F);
}
continue;
}
for (Value *Op : C->operand_values())
if (Visited.insert(cast<Constant>(Op)).second)
Worklist.push_back(cast<Constant>(Op));
}
}
LazyCallGraph::Node::Node(LazyCallGraph &G, Function &F)
: G(&G), F(F), DFSNumber(0), LowLink(0) {
DEBUG(dbgs() << " Adding functions called by '" << F.getName()
<< "' to the graph.\n");
SmallVector<Constant *, 16> Worklist;
SmallPtrSet<Constant *, 16> Visited;
// Find all the potential callees in this function. First walk the
// instructions and add every operand which is a constant to the worklist.
for (BasicBlock &BB : F)
for (Instruction &I : BB)
for (Value *Op : I.operand_values())
if (Constant *C = dyn_cast<Constant>(Op))
if (Visited.insert(C).second)
Worklist.push_back(C);
// We've collected all the constant (and thus potentially function or
// function containing) operands to all of the instructions in the function.
// Process them (recursively) collecting every function found.
findCallees(Worklist, Visited, Callees, CalleeIndexMap);
}
void LazyCallGraph::Node::insertEdgeInternal(Function &Callee) {
if (Node *N = G->lookup(Callee))
return insertEdgeInternal(*N);
CalleeIndexMap.insert(std::make_pair(&Callee, Callees.size()));
Callees.push_back(&Callee);
}
void LazyCallGraph::Node::insertEdgeInternal(Node &CalleeN) {
CalleeIndexMap.insert(std::make_pair(&CalleeN.getFunction(), Callees.size()));
Callees.push_back(&CalleeN);
}
void LazyCallGraph::Node::removeEdgeInternal(Function &Callee) {
auto IndexMapI = CalleeIndexMap.find(&Callee);
assert(IndexMapI != CalleeIndexMap.end() &&
"Callee not in the callee set for this caller?");
Callees[IndexMapI->second] = nullptr;
CalleeIndexMap.erase(IndexMapI);
}
LazyCallGraph::LazyCallGraph(Module &M) : NextDFSNumber(0) {
DEBUG(dbgs() << "Building CG for module: " << M.getModuleIdentifier()
<< "\n");
for (Function &F : M)
if (!F.isDeclaration() && !F.hasLocalLinkage())
if (EntryIndexMap.insert(std::make_pair(&F, EntryNodes.size())).second) {
DEBUG(dbgs() << " Adding '" << F.getName()
<< "' to entry set of the graph.\n");
EntryNodes.push_back(&F);
}
// Now add entry nodes for functions reachable via initializers to globals.
SmallVector<Constant *, 16> Worklist;
SmallPtrSet<Constant *, 16> Visited;
for (GlobalVariable &GV : M.globals())
if (GV.hasInitializer())
if (Visited.insert(GV.getInitializer()).second)
Worklist.push_back(GV.getInitializer());
DEBUG(dbgs() << " Adding functions referenced by global initializers to the "
"entry set.\n");
findCallees(Worklist, Visited, EntryNodes, EntryIndexMap);
for (auto &Entry : EntryNodes) {
assert(!Entry.isNull() &&
"We can't have removed edges before we finish the constructor!");
if (Function *F = Entry.dyn_cast<Function *>())
SCCEntryNodes.push_back(F);
else
SCCEntryNodes.push_back(&Entry.get<Node *>()->getFunction());
}
}
LazyCallGraph::LazyCallGraph(LazyCallGraph &&G)
: BPA(std::move(G.BPA)), NodeMap(std::move(G.NodeMap)),
EntryNodes(std::move(G.EntryNodes)),
EntryIndexMap(std::move(G.EntryIndexMap)), SCCBPA(std::move(G.SCCBPA)),
SCCMap(std::move(G.SCCMap)), LeafSCCs(std::move(G.LeafSCCs)),
DFSStack(std::move(G.DFSStack)),
SCCEntryNodes(std::move(G.SCCEntryNodes)),
NextDFSNumber(G.NextDFSNumber) {
updateGraphPtrs();
}
LazyCallGraph &LazyCallGraph::operator=(LazyCallGraph &&G) {
BPA = std::move(G.BPA);
NodeMap = std::move(G.NodeMap);
EntryNodes = std::move(G.EntryNodes);
EntryIndexMap = std::move(G.EntryIndexMap);
SCCBPA = std::move(G.SCCBPA);
SCCMap = std::move(G.SCCMap);
LeafSCCs = std::move(G.LeafSCCs);
DFSStack = std::move(G.DFSStack);
SCCEntryNodes = std::move(G.SCCEntryNodes);
NextDFSNumber = G.NextDFSNumber;
updateGraphPtrs();
return *this;
}
void LazyCallGraph::SCC::insert(Node &N) {
N.DFSNumber = N.LowLink = -1;
Nodes.push_back(&N);
G->SCCMap[&N] = this;
}
bool LazyCallGraph::SCC::isDescendantOf(const SCC &C) const {
// Walk up the parents of this SCC and verify that we eventually find C.
SmallVector<const SCC *, 4> AncestorWorklist;
AncestorWorklist.push_back(this);
do {
const SCC *AncestorC = AncestorWorklist.pop_back_val();
if (AncestorC->isChildOf(C))
return true;
for (const SCC *ParentC : AncestorC->ParentSCCs)
AncestorWorklist.push_back(ParentC);
} while (!AncestorWorklist.empty());
return false;
}
void LazyCallGraph::SCC::insertIntraSCCEdge(Node &CallerN, Node &CalleeN) {
// First insert it into the caller.
CallerN.insertEdgeInternal(CalleeN);
assert(G->SCCMap.lookup(&CallerN) == this && "Caller must be in this SCC.");
assert(G->SCCMap.lookup(&CalleeN) == this && "Callee must be in this SCC.");
// Nothing changes about this SCC or any other.
}
void LazyCallGraph::SCC::insertOutgoingEdge(Node &CallerN, Node &CalleeN) {
// First insert it into the caller.
CallerN.insertEdgeInternal(CalleeN);
assert(G->SCCMap.lookup(&CallerN) == this && "Caller must be in this SCC.");
SCC &CalleeC = *G->SCCMap.lookup(&CalleeN);
assert(&CalleeC != this && "Callee must not be in this SCC.");
assert(CalleeC.isDescendantOf(*this) &&
"Callee must be a descendant of the Caller.");
// The only change required is to add this SCC to the parent set of the callee.
CalleeC.ParentSCCs.insert(this);
}
SmallVector<LazyCallGraph::SCC *, 1>
LazyCallGraph::SCC::insertIncomingEdge(Node &CallerN, Node &CalleeN) {
// First insert it into the caller.
CallerN.insertEdgeInternal(CalleeN);
assert(G->SCCMap.lookup(&CalleeN) == this && "Callee must be in this SCC.");
SCC &CallerC = *G->SCCMap.lookup(&CallerN);
assert(&CallerC != this && "Caller must not be in this SCC.");
assert(CallerC.isDescendantOf(*this) &&
"Caller must be a descendant of the Callee.");
// The algorithm we use for merging SCCs based on the cycle introduced here
// is to walk the SCC inverted DAG formed by the parent SCC sets. The inverse
// graph has the same cycle properties as the actual DAG of the SCCs, and
// when forming SCCs lazily by a DFS, the bottom of the graph won't exist in
// many cases which should prune the search space.
//
// FIXME: We can get this pruning behavior even after the incremental SCC
// formation by leaving behind (conservative) DFS numberings in the nodes,
// and pruning the search with them. These would need to be cleverly updated
// during the removal of intra-SCC edges, but could be preserved
// conservatively.
// The set of SCCs that are connected to the caller, and thus will
// participate in the merged connected component.
SmallPtrSet<SCC *, 8> ConnectedSCCs;
ConnectedSCCs.insert(this);
ConnectedSCCs.insert(&CallerC);
// We build up a DFS stack of the parents chains.
SmallVector<std::pair<SCC *, SCC::parent_iterator>, 8> DFSSCCs;
SmallPtrSet<SCC *, 8> VisitedSCCs;
int ConnectedDepth = -1;
SCC *C = this;
parent_iterator I = parent_begin(), E = parent_end();
for (;;) {
while (I != E) {
SCC &ParentSCC = *I++;
// If we have already processed this parent SCC, skip it, and remember
// whether it was connected so we don't have to check the rest of the
// stack. This also handles when we reach a child of the 'this' SCC (the
// callee) which terminates the search.
if (ConnectedSCCs.count(&ParentSCC)) {
ConnectedDepth = std::max<int>(ConnectedDepth, DFSSCCs.size());
continue;
}
if (VisitedSCCs.count(&ParentSCC))
continue;
// We fully explore the depth-first space, adding nodes to the connected
// set only as we pop them off, so "recurse" by rotating to the parent.
DFSSCCs.push_back(std::make_pair(C, I));
C = &ParentSCC;
I = ParentSCC.parent_begin();
E = ParentSCC.parent_end();
}
// If we've found a connection anywhere below this point on the stack (and
// thus up the parent graph from the caller), the current node needs to be
// added to the connected set now that we've processed all of its parents.
if ((int)DFSSCCs.size() == ConnectedDepth) {
--ConnectedDepth; // We're finished with this connection.
ConnectedSCCs.insert(C);
} else {
// Otherwise remember that its parents don't ever connect.
assert(ConnectedDepth < (int)DFSSCCs.size() &&
"Cannot have a connected depth greater than the DFS depth!");
VisitedSCCs.insert(C);
}
if (DFSSCCs.empty())
break; // We've walked all the parents of the caller transitively.
// Pop off the prior node and position to unwind the depth first recursion.
std::tie(C, I) = DFSSCCs.pop_back_val();
E = C->parent_end();
}
// Now that we have identified all of the SCCs which need to be merged into
// a connected set with the inserted edge, merge all of them into this SCC.
// FIXME: This operation currently creates ordering stability problems
// because we don't use stably ordered containers for the parent SCCs or the
// connected SCCs.
unsigned NewNodeBeginIdx = Nodes.size();
for (SCC *C : ConnectedSCCs) {
if (C == this)
continue;
for (SCC *ParentC : C->ParentSCCs)
if (!ConnectedSCCs.count(ParentC))
ParentSCCs.insert(ParentC);
C->ParentSCCs.clear();
for (Node *N : *C) {
for (Node &ChildN : *N) {
SCC &ChildC = *G->SCCMap.lookup(&ChildN);
if (&ChildC != C)
ChildC.ParentSCCs.erase(C);
}
G->SCCMap[N] = this;
Nodes.push_back(N);
}
C->Nodes.clear();
}
for (auto I = Nodes.begin() + NewNodeBeginIdx, E = Nodes.end(); I != E; ++I)
for (Node &ChildN : **I) {
SCC &ChildC = *G->SCCMap.lookup(&ChildN);
if (&ChildC != this)
ChildC.ParentSCCs.insert(this);
}
// We return the list of SCCs which were merged so that callers can
// invalidate any data they have associated with those SCCs. Note that these
// SCCs are no longer in an interesting state (they are totally empty) but
// the pointers will remain stable for the life of the graph itself.
return SmallVector<SCC *, 1>(ConnectedSCCs.begin(), ConnectedSCCs.end());
}
void LazyCallGraph::SCC::removeInterSCCEdge(Node &CallerN, Node &CalleeN) {
// First remove it from the node.
CallerN.removeEdgeInternal(CalleeN.getFunction());
assert(G->SCCMap.lookup(&CallerN) == this &&
"The caller must be a member of this SCC.");
SCC &CalleeC = *G->SCCMap.lookup(&CalleeN);
assert(&CalleeC != this &&
"This API only supports the rmoval of inter-SCC edges.");
assert(std::find(G->LeafSCCs.begin(), G->LeafSCCs.end(), this) ==
G->LeafSCCs.end() &&
"Cannot have a leaf SCC caller with a different SCC callee.");
bool HasOtherCallToCalleeC = false;
bool HasOtherCallOutsideSCC = false;
for (Node *N : *this) {
for (Node &OtherCalleeN : *N) {
SCC &OtherCalleeC = *G->SCCMap.lookup(&OtherCalleeN);
if (&OtherCalleeC == &CalleeC) {
HasOtherCallToCalleeC = true;
break;
}
if (&OtherCalleeC != this)
HasOtherCallOutsideSCC = true;
}
if (HasOtherCallToCalleeC)
break;
}
// Because the SCCs form a DAG, deleting such an edge cannot change the set
// of SCCs in the graph. However, it may cut an edge of the SCC DAG, making
// the caller no longer a parent of the callee. Walk the other call edges
// in the caller to tell.
if (!HasOtherCallToCalleeC) {
bool Removed = CalleeC.ParentSCCs.erase(this);
(void)Removed;
assert(Removed &&
"Did not find the caller SCC in the callee SCC's parent list!");
// It may orphan an SCC if it is the last edge reaching it, but that does
// not violate any invariants of the graph.
if (CalleeC.ParentSCCs.empty())
DEBUG(dbgs() << "LCG: Update removing " << CallerN.getFunction().getName()
<< " -> " << CalleeN.getFunction().getName()
<< " edge orphaned the callee's SCC!\n");
}
// It may make the Caller SCC a leaf SCC.
if (!HasOtherCallOutsideSCC)
G->LeafSCCs.push_back(this);
}
void LazyCallGraph::SCC::internalDFS(
SmallVectorImpl<std::pair<Node *, Node::iterator>> &DFSStack,
SmallVectorImpl<Node *> &PendingSCCStack, Node *N,
SmallVectorImpl<SCC *> &ResultSCCs) {
Node::iterator I = N->begin();
N->LowLink = N->DFSNumber = 1;
int NextDFSNumber = 2;
for (;;) {
assert(N->DFSNumber != 0 && "We should always assign a DFS number "
"before processing a node.");
// We simulate recursion by popping out of the nested loop and continuing.
Node::iterator E = N->end();
while (I != E) {
Node &ChildN = *I;
if (SCC *ChildSCC = G->SCCMap.lookup(&ChildN)) {
// Check if we have reached a node in the new (known connected) set of
// this SCC. If so, the entire stack is necessarily in that set and we
// can re-start.
if (ChildSCC == this) {
insert(*N);
while (!PendingSCCStack.empty())
insert(*PendingSCCStack.pop_back_val());
while (!DFSStack.empty())
insert(*DFSStack.pop_back_val().first);
return;
}
// If this child isn't currently in this SCC, no need to process it.
// However, we do need to remove this SCC from its SCC's parent set.
ChildSCC->ParentSCCs.erase(this);
++I;
continue;
}
if (ChildN.DFSNumber == 0) {
// Mark that we should start at this child when next this node is the
// top of the stack. We don't start at the next child to ensure this
// child's lowlink is reflected.
DFSStack.push_back(std::make_pair(N, I));
// Continue, resetting to the child node.
ChildN.LowLink = ChildN.DFSNumber = NextDFSNumber++;
N = &ChildN;
I = ChildN.begin();
E = ChildN.end();
continue;
}
// Track the lowest link of the children, if any are still in the stack.
// Any child not on the stack will have a LowLink of -1.
assert(ChildN.LowLink != 0 &&
"Low-link must not be zero with a non-zero DFS number.");
if (ChildN.LowLink >= 0 && ChildN.LowLink < N->LowLink)
N->LowLink = ChildN.LowLink;
++I;
}
if (N->LowLink == N->DFSNumber) {
ResultSCCs.push_back(G->formSCC(N, PendingSCCStack));
if (DFSStack.empty())
return;
} else {
// At this point we know that N cannot ever be an SCC root. Its low-link
// is not its dfs-number, and we've processed all of its children. It is
// just sitting here waiting until some node further down the stack gets
// low-link == dfs-number and pops it off as well. Move it to the pending
// stack which is pulled into the next SCC to be formed.
PendingSCCStack.push_back(N);
assert(!DFSStack.empty() && "We shouldn't have an empty stack!");
}
N = DFSStack.back().first;
I = DFSStack.back().second;
DFSStack.pop_back();
}
}
SmallVector<LazyCallGraph::SCC *, 1>
LazyCallGraph::SCC::removeIntraSCCEdge(Node &CallerN,
Node &CalleeN) {
// First remove it from the node.
CallerN.removeEdgeInternal(CalleeN.getFunction());
// We return a list of the resulting *new* SCCs in postorder.
SmallVector<SCC *, 1> ResultSCCs;
// Direct recursion doesn't impact the SCC graph at all.
if (&CallerN == &CalleeN)
return ResultSCCs;
// The worklist is every node in the original SCC.
SmallVector<Node *, 1> Worklist;
Worklist.swap(Nodes);
for (Node *N : Worklist) {
// The nodes formerly in this SCC are no longer in any SCC.
N->DFSNumber = 0;
N->LowLink = 0;
G->SCCMap.erase(N);
}
assert(Worklist.size() > 1 && "We have to have at least two nodes to have an "
"edge between them that is within the SCC.");
// The callee can already reach every node in this SCC (by definition). It is
// the only node we know will stay inside this SCC. Everything which
// transitively reaches Callee will also remain in the SCC. To model this we
// incrementally add any chain of nodes which reaches something in the new
// node set to the new node set. This short circuits one side of the Tarjan's
// walk.
insert(CalleeN);
// We're going to do a full mini-Tarjan's walk using a local stack here.
SmallVector<std::pair<Node *, Node::iterator>, 4> DFSStack;
SmallVector<Node *, 4> PendingSCCStack;
do {
Node *N = Worklist.pop_back_val();
if (N->DFSNumber == 0)
internalDFS(DFSStack, PendingSCCStack, N, ResultSCCs);
assert(DFSStack.empty() && "Didn't flush the entire DFS stack!");
assert(PendingSCCStack.empty() && "Didn't flush all pending SCC nodes!");
} while (!Worklist.empty());
// Now we need to reconnect the current SCC to the graph.
bool IsLeafSCC = true;
for (Node *N : Nodes) {
for (Node &ChildN : *N) {
SCC &ChildSCC = *G->SCCMap.lookup(&ChildN);
if (&ChildSCC == this)
continue;
ChildSCC.ParentSCCs.insert(this);
IsLeafSCC = false;
}
}
#ifndef NDEBUG
if (!ResultSCCs.empty())
assert(!IsLeafSCC && "This SCC cannot be a leaf as we have split out new "
"SCCs by removing this edge.");
if (!std::any_of(G->LeafSCCs.begin(), G->LeafSCCs.end(),
[&](SCC *C) { return C == this; }))
assert(!IsLeafSCC && "This SCC cannot be a leaf as it already had child "
"SCCs before we removed this edge.");
#endif
// If this SCC stopped being a leaf through this edge removal, remove it from
// the leaf SCC list.
if (!IsLeafSCC && !ResultSCCs.empty())
G->LeafSCCs.erase(std::remove(G->LeafSCCs.begin(), G->LeafSCCs.end(), this),
G->LeafSCCs.end());
// Return the new list of SCCs.
return ResultSCCs;
}
void LazyCallGraph::insertEdge(Node &CallerN, Function &Callee) {
assert(SCCMap.empty() && DFSStack.empty() &&
"This method cannot be called after SCCs have been formed!");
return CallerN.insertEdgeInternal(Callee);
}
void LazyCallGraph::removeEdge(Node &CallerN, Function &Callee) {
assert(SCCMap.empty() && DFSStack.empty() &&
"This method cannot be called after SCCs have been formed!");
return CallerN.removeEdgeInternal(Callee);
}
LazyCallGraph::Node &LazyCallGraph::insertInto(Function &F, Node *&MappedN) {
return *new (MappedN = BPA.Allocate()) Node(*this, F);
}
void LazyCallGraph::updateGraphPtrs() {
// Process all nodes updating the graph pointers.
{
SmallVector<Node *, 16> Worklist;
for (auto &Entry : EntryNodes)
if (Node *EntryN = Entry.dyn_cast<Node *>())
Worklist.push_back(EntryN);
while (!Worklist.empty()) {
Node *N = Worklist.pop_back_val();
N->G = this;
for (auto &Callee : N->Callees)
if (!Callee.isNull())
if (Node *CalleeN = Callee.dyn_cast<Node *>())
Worklist.push_back(CalleeN);
}
}
// Process all SCCs updating the graph pointers.
{
SmallVector<SCC *, 16> Worklist(LeafSCCs.begin(), LeafSCCs.end());
while (!Worklist.empty()) {
SCC *C = Worklist.pop_back_val();
C->G = this;
Worklist.insert(Worklist.end(), C->ParentSCCs.begin(),
C->ParentSCCs.end());
}
}
}
LazyCallGraph::SCC *LazyCallGraph::formSCC(Node *RootN,
SmallVectorImpl<Node *> &NodeStack) {
// The tail of the stack is the new SCC. Allocate the SCC and pop the stack
// into it.
SCC *NewSCC = new (SCCBPA.Allocate()) SCC(*this);
while (!NodeStack.empty() && NodeStack.back()->DFSNumber > RootN->DFSNumber) {
assert(NodeStack.back()->LowLink >= RootN->LowLink &&
"We cannot have a low link in an SCC lower than its root on the "
"stack!");
NewSCC->insert(*NodeStack.pop_back_val());
}
NewSCC->insert(*RootN);
// A final pass over all edges in the SCC (this remains linear as we only
// do this once when we build the SCC) to connect it to the parent sets of
// its children.
bool IsLeafSCC = true;
for (Node *SCCN : NewSCC->Nodes)
for (Node &SCCChildN : *SCCN) {
SCC &ChildSCC = *SCCMap.lookup(&SCCChildN);
if (&ChildSCC == NewSCC)
continue;
ChildSCC.ParentSCCs.insert(NewSCC);
IsLeafSCC = false;
}
// For the SCCs where we fine no child SCCs, add them to the leaf list.
if (IsLeafSCC)
LeafSCCs.push_back(NewSCC);
return NewSCC;
}
LazyCallGraph::SCC *LazyCallGraph::getNextSCCInPostOrder() {
Node *N;
Node::iterator I;
if (!DFSStack.empty()) {
N = DFSStack.back().first;
I = DFSStack.back().second;
DFSStack.pop_back();
} else {
// If we've handled all candidate entry nodes to the SCC forest, we're done.
do {
if (SCCEntryNodes.empty())
return nullptr;
N = &get(*SCCEntryNodes.pop_back_val());
} while (N->DFSNumber != 0);
I = N->begin();
N->LowLink = N->DFSNumber = 1;
NextDFSNumber = 2;
}
for (;;) {
assert(N->DFSNumber != 0 && "We should always assign a DFS number "
"before placing a node onto the stack.");
Node::iterator E = N->end();
while (I != E) {
Node &ChildN = *I;
if (ChildN.DFSNumber == 0) {
// Mark that we should start at this child when next this node is the
// top of the stack. We don't start at the next child to ensure this
// child's lowlink is reflected.
DFSStack.push_back(std::make_pair(N, N->begin()));
// Recurse onto this node via a tail call.
assert(!SCCMap.count(&ChildN) &&
"Found a node with 0 DFS number but already in an SCC!");
ChildN.LowLink = ChildN.DFSNumber = NextDFSNumber++;
N = &ChildN;
I = ChildN.begin();
E = ChildN.end();
continue;
}
// Track the lowest link of the children, if any are still in the stack.
assert(ChildN.LowLink != 0 &&
"Low-link must not be zero with a non-zero DFS number.");
if (ChildN.LowLink >= 0 && ChildN.LowLink < N->LowLink)
N->LowLink = ChildN.LowLink;
++I;
}
if (N->LowLink == N->DFSNumber)
// Form the new SCC out of the top of the DFS stack.
return formSCC(N, PendingSCCStack);
// At this point we know that N cannot ever be an SCC root. Its low-link
// is not its dfs-number, and we've processed all of its children. It is
// just sitting here waiting until some node further down the stack gets
// low-link == dfs-number and pops it off as well. Move it to the pending
// stack which is pulled into the next SCC to be formed.
PendingSCCStack.push_back(N);
assert(!DFSStack.empty() && "We never found a viable root!");
N = DFSStack.back().first;
I = DFSStack.back().second;
DFSStack.pop_back();
}
}
char LazyCallGraphAnalysis::PassID;
LazyCallGraphPrinterPass::LazyCallGraphPrinterPass(raw_ostream &OS) : OS(OS) {}
static void printNodes(raw_ostream &OS, LazyCallGraph::Node &N,
SmallPtrSetImpl<LazyCallGraph::Node *> &Printed) {
// Recurse depth first through the nodes.
for (LazyCallGraph::Node &ChildN : N)
if (Printed.insert(&ChildN).second)
printNodes(OS, ChildN, Printed);
OS << " Call edges in function: " << N.getFunction().getName() << "\n";
for (LazyCallGraph::iterator I = N.begin(), E = N.end(); I != E; ++I)
OS << " -> " << I->getFunction().getName() << "\n";
OS << "\n";
}
static void printSCC(raw_ostream &OS, LazyCallGraph::SCC &SCC) {
ptrdiff_t SCCSize = std::distance(SCC.begin(), SCC.end());
OS << " SCC with " << SCCSize << " functions:\n";
for (LazyCallGraph::Node *N : SCC)
OS << " " << N->getFunction().getName() << "\n";
OS << "\n";
}
PreservedAnalyses LazyCallGraphPrinterPass::run(Module &M,
ModuleAnalysisManager *AM) {
LazyCallGraph &G = AM->getResult<LazyCallGraphAnalysis>(M);
OS << "Printing the call graph for module: " << M.getModuleIdentifier()
<< "\n\n";
SmallPtrSet<LazyCallGraph::Node *, 16> Printed;
for (LazyCallGraph::Node &N : G)
if (Printed.insert(&N).second)
printNodes(OS, N, Printed);
for (LazyCallGraph::SCC &SCC : G.postorder_sccs())
printSCC(OS, SCC);
return PreservedAnalyses::all();
}
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/Analysis/Trace.cpp | //===- Trace.cpp - Implementation of Trace class --------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This class represents a single trace of LLVM basic blocks. A trace is a
// single entry, multiple exit, region of code that is often hot. Trace-based
// optimizations treat traces almost like they are a large, strange, basic
// block: because the trace path is assumed to be hot, optimizations for the
// fall-through path are made at the expense of the non-fall-through paths.
//
//===----------------------------------------------------------------------===//
#include "llvm/Analysis/Trace.h"
#include "llvm/IR/Function.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
Function *Trace::getFunction() const {
return getEntryBasicBlock()->getParent();
}
Module *Trace::getModule() const {
return getFunction()->getParent();
}
/// print - Write trace to output stream.
///
void Trace::print(raw_ostream &O) const {
Function *F = getFunction();
O << "; Trace from function " << F->getName() << ", blocks:\n";
for (const_iterator i = begin(), e = end(); i != e; ++i) {
O << "; ";
(*i)->printAsOperand(O, true, getModule());
O << "\n";
}
O << "; Trace parent function: \n" << *F;
}
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
/// dump - Debugger convenience method; writes trace to standard error
/// output stream.
///
void Trace::dump() const {
print(dbgs());
}
#endif
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/Analysis/CostModel.cpp | //===- CostModel.cpp ------ Cost Model Analysis ---------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines the cost model analysis. It provides a very basic cost
// estimation for LLVM-IR. This analysis uses the services of the codegen
// to approximate the cost of any IR instruction when lowered to machine
// instructions. The cost results are unit-less and the cost number represents
// the throughput of the machine assuming that all loads hit the cache, all
// branches are predicted, etc. The cost numbers can be added in order to
// compare two or more transformation alternatives.
//
//===----------------------------------------------------------------------===//
#include "llvm/ADT/STLExtras.h"
#include "llvm/Analysis/Passes.h"
#include "llvm/Analysis/TargetTransformInfo.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/Value.h"
#include "llvm/Pass.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
#define CM_NAME "cost-model"
#define DEBUG_TYPE CM_NAME
static cl::opt<bool> EnableReduxCost("costmodel-reduxcost", cl::init(false),
cl::Hidden,
cl::desc("Recognize reduction patterns."));
namespace {
class CostModelAnalysis : public FunctionPass {
public:
static char ID; // Class identification, replacement for typeinfo
CostModelAnalysis() : FunctionPass(ID), F(nullptr), TTI(nullptr) {
initializeCostModelAnalysisPass(
*PassRegistry::getPassRegistry());
}
/// Returns the expected cost of the instruction.
/// Returns -1 if the cost is unknown.
/// Note, this method does not cache the cost calculation and it
/// can be expensive in some cases.
unsigned getInstructionCost(const Instruction *I) const;
private:
void getAnalysisUsage(AnalysisUsage &AU) const override;
bool runOnFunction(Function &F) override;
void print(raw_ostream &OS, const Module*) const override;
/// The function that we analyze.
Function *F;
/// Target information.
const TargetTransformInfo *TTI;
};
} // End of anonymous namespace
// Register this pass.
char CostModelAnalysis::ID = 0;
static const char cm_name[] = "Cost Model Analysis";
INITIALIZE_PASS_BEGIN(CostModelAnalysis, CM_NAME, cm_name, false, true)
INITIALIZE_PASS_END (CostModelAnalysis, CM_NAME, cm_name, false, true)
FunctionPass *llvm::createCostModelAnalysisPass() {
return new CostModelAnalysis();
}
void
CostModelAnalysis::getAnalysisUsage(AnalysisUsage &AU) const {
AU.setPreservesAll();
}
bool
CostModelAnalysis::runOnFunction(Function &F) {
this->F = &F;
auto *TTIWP = getAnalysisIfAvailable<TargetTransformInfoWrapperPass>();
TTI = TTIWP ? &TTIWP->getTTI(F) : nullptr;
return false;
}
static bool isReverseVectorMask(SmallVectorImpl<int> &Mask) {
for (unsigned i = 0, MaskSize = Mask.size(); i < MaskSize; ++i)
if (Mask[i] > 0 && Mask[i] != (int)(MaskSize - 1 - i))
return false;
return true;
}
static bool isAlternateVectorMask(SmallVectorImpl<int> &Mask) {
bool isAlternate = true;
unsigned MaskSize = Mask.size();
// Example: shufflevector A, B, <0,5,2,7>
for (unsigned i = 0; i < MaskSize && isAlternate; ++i) {
if (Mask[i] < 0)
continue;
isAlternate = Mask[i] == (int)((i & 1) ? MaskSize + i : i);
}
if (isAlternate)
return true;
isAlternate = true;
// Example: shufflevector A, B, <4,1,6,3>
for (unsigned i = 0; i < MaskSize && isAlternate; ++i) {
if (Mask[i] < 0)
continue;
isAlternate = Mask[i] == (int)((i & 1) ? i : MaskSize + i);
}
return isAlternate;
}
static TargetTransformInfo::OperandValueKind getOperandInfo(Value *V) {
TargetTransformInfo::OperandValueKind OpInfo =
TargetTransformInfo::OK_AnyValue;
// Check for a splat of a constant or for a non uniform vector of constants.
if (isa<ConstantVector>(V) || isa<ConstantDataVector>(V)) {
OpInfo = TargetTransformInfo::OK_NonUniformConstantValue;
if (cast<Constant>(V)->getSplatValue() != nullptr)
OpInfo = TargetTransformInfo::OK_UniformConstantValue;
}
return OpInfo;
}
static bool matchPairwiseShuffleMask(ShuffleVectorInst *SI, bool IsLeft,
unsigned Level) {
// We don't need a shuffle if we just want to have element 0 in position 0 of
// the vector.
if (!SI && Level == 0 && IsLeft)
return true;
else if (!SI)
return false;
SmallVector<int, 32> Mask(SI->getType()->getVectorNumElements(), -1);
// Build a mask of 0, 2, ... (left) or 1, 3, ... (right) depending on whether
// we look at the left or right side.
for (unsigned i = 0, e = (1 << Level), val = !IsLeft; i != e; ++i, val += 2)
Mask[i] = val;
SmallVector<int, 16> ActualMask = SI->getShuffleMask();
if (Mask != ActualMask)
return false;
return true;
}
static bool matchPairwiseReductionAtLevel(const BinaryOperator *BinOp,
unsigned Level, unsigned NumLevels) {
// Match one level of pairwise operations.
// %rdx.shuf.0.0 = shufflevector <4 x float> %rdx, <4 x float> undef,
// <4 x i32> <i32 0, i32 2 , i32 undef, i32 undef>
// %rdx.shuf.0.1 = shufflevector <4 x float> %rdx, <4 x float> undef,
// <4 x i32> <i32 1, i32 3, i32 undef, i32 undef>
// %bin.rdx.0 = fadd <4 x float> %rdx.shuf.0.0, %rdx.shuf.0.1
if (BinOp == nullptr)
return false;
assert(BinOp->getType()->isVectorTy() && "Expecting a vector type");
unsigned Opcode = BinOp->getOpcode();
Value *L = BinOp->getOperand(0);
Value *R = BinOp->getOperand(1);
ShuffleVectorInst *LS = dyn_cast<ShuffleVectorInst>(L);
if (!LS && Level)
return false;
ShuffleVectorInst *RS = dyn_cast<ShuffleVectorInst>(R);
if (!RS && Level)
return false;
// On level 0 we can omit one shufflevector instruction.
if (!Level && !RS && !LS)
return false;
// Shuffle inputs must match.
Value *NextLevelOpL = LS ? LS->getOperand(0) : nullptr;
Value *NextLevelOpR = RS ? RS->getOperand(0) : nullptr;
Value *NextLevelOp = nullptr;
if (NextLevelOpR && NextLevelOpL) {
// If we have two shuffles their operands must match.
if (NextLevelOpL != NextLevelOpR)
return false;
NextLevelOp = NextLevelOpL;
} else if (Level == 0 && (NextLevelOpR || NextLevelOpL)) {
// On the first level we can omit the shufflevector <0, undef,...>. So the
// input to the other shufflevector <1, undef> must match with one of the
// inputs to the current binary operation.
// Example:
// %NextLevelOpL = shufflevector %R, <1, undef ...>
// %BinOp = fadd %NextLevelOpL, %R
if (NextLevelOpL && NextLevelOpL != R)
return false;
else if (NextLevelOpR && NextLevelOpR != L)
return false;
NextLevelOp = NextLevelOpL ? R : L;
} else
return false;
// Check that the next levels binary operation exists and matches with the
// current one.
BinaryOperator *NextLevelBinOp = nullptr;
if (Level + 1 != NumLevels) {
if (!(NextLevelBinOp = dyn_cast<BinaryOperator>(NextLevelOp)))
return false;
else if (NextLevelBinOp->getOpcode() != Opcode)
return false;
}
// Shuffle mask for pairwise operation must match.
if (matchPairwiseShuffleMask(LS, true, Level)) {
if (!matchPairwiseShuffleMask(RS, false, Level))
return false;
} else if (matchPairwiseShuffleMask(RS, true, Level)) {
if (!matchPairwiseShuffleMask(LS, false, Level))
return false;
} else
return false;
if (++Level == NumLevels)
return true;
// Match next level.
return matchPairwiseReductionAtLevel(NextLevelBinOp, Level, NumLevels);
}
static bool matchPairwiseReduction(const ExtractElementInst *ReduxRoot,
unsigned &Opcode, Type *&Ty) {
if (!EnableReduxCost)
return false;
// Need to extract the first element.
ConstantInt *CI = dyn_cast<ConstantInt>(ReduxRoot->getOperand(1));
unsigned Idx = ~0u;
if (CI)
Idx = CI->getZExtValue();
if (Idx != 0)
return false;
BinaryOperator *RdxStart = dyn_cast<BinaryOperator>(ReduxRoot->getOperand(0));
if (!RdxStart)
return false;
Type *VecTy = ReduxRoot->getOperand(0)->getType();
unsigned NumVecElems = VecTy->getVectorNumElements();
if (!isPowerOf2_32(NumVecElems))
return false;
// We look for a sequence of shuffle,shuffle,add triples like the following
// that builds a pairwise reduction tree.
//
// (X0, X1, X2, X3)
// (X0 + X1, X2 + X3, undef, undef)
// ((X0 + X1) + (X2 + X3), undef, undef, undef)
//
// %rdx.shuf.0.0 = shufflevector <4 x float> %rdx, <4 x float> undef,
// <4 x i32> <i32 0, i32 2 , i32 undef, i32 undef>
// %rdx.shuf.0.1 = shufflevector <4 x float> %rdx, <4 x float> undef,
// <4 x i32> <i32 1, i32 3, i32 undef, i32 undef>
// %bin.rdx.0 = fadd <4 x float> %rdx.shuf.0.0, %rdx.shuf.0.1
// %rdx.shuf.1.0 = shufflevector <4 x float> %bin.rdx.0, <4 x float> undef,
// <4 x i32> <i32 0, i32 undef, i32 undef, i32 undef>
// %rdx.shuf.1.1 = shufflevector <4 x float> %bin.rdx.0, <4 x float> undef,
// <4 x i32> <i32 1, i32 undef, i32 undef, i32 undef>
// %bin.rdx8 = fadd <4 x float> %rdx.shuf.1.0, %rdx.shuf.1.1
// %r = extractelement <4 x float> %bin.rdx8, i32 0
if (!matchPairwiseReductionAtLevel(RdxStart, 0, Log2_32(NumVecElems)))
return false;
Opcode = RdxStart->getOpcode();
Ty = VecTy;
return true;
}
static std::pair<Value *, ShuffleVectorInst *>
getShuffleAndOtherOprd(BinaryOperator *B) {
Value *L = B->getOperand(0);
Value *R = B->getOperand(1);
ShuffleVectorInst *S = nullptr;
if ((S = dyn_cast<ShuffleVectorInst>(L)))
return std::make_pair(R, S);
S = dyn_cast<ShuffleVectorInst>(R);
return std::make_pair(L, S);
}
static bool matchVectorSplittingReduction(const ExtractElementInst *ReduxRoot,
unsigned &Opcode, Type *&Ty) {
if (!EnableReduxCost)
return false;
// Need to extract the first element.
ConstantInt *CI = dyn_cast<ConstantInt>(ReduxRoot->getOperand(1));
unsigned Idx = ~0u;
if (CI)
Idx = CI->getZExtValue();
if (Idx != 0)
return false;
BinaryOperator *RdxStart = dyn_cast<BinaryOperator>(ReduxRoot->getOperand(0));
if (!RdxStart)
return false;
unsigned RdxOpcode = RdxStart->getOpcode();
Type *VecTy = ReduxRoot->getOperand(0)->getType();
unsigned NumVecElems = VecTy->getVectorNumElements();
if (!isPowerOf2_32(NumVecElems))
return false;
// We look for a sequence of shuffles and adds like the following matching one
// fadd, shuffle vector pair at a time.
//
// %rdx.shuf = shufflevector <4 x float> %rdx, <4 x float> undef,
// <4 x i32> <i32 2, i32 3, i32 undef, i32 undef>
// %bin.rdx = fadd <4 x float> %rdx, %rdx.shuf
// %rdx.shuf7 = shufflevector <4 x float> %bin.rdx, <4 x float> undef,
// <4 x i32> <i32 1, i32 undef, i32 undef, i32 undef>
// %bin.rdx8 = fadd <4 x float> %bin.rdx, %rdx.shuf7
// %r = extractelement <4 x float> %bin.rdx8, i32 0
unsigned MaskStart = 1;
Value *RdxOp = RdxStart;
SmallVector<int, 32> ShuffleMask(NumVecElems, 0);
unsigned NumVecElemsRemain = NumVecElems;
while (NumVecElemsRemain - 1) {
// Check for the right reduction operation.
BinaryOperator *BinOp;
if (!(BinOp = dyn_cast<BinaryOperator>(RdxOp)))
return false;
if (BinOp->getOpcode() != RdxOpcode)
return false;
Value *NextRdxOp;
ShuffleVectorInst *Shuffle;
std::tie(NextRdxOp, Shuffle) = getShuffleAndOtherOprd(BinOp);
// Check the current reduction operation and the shuffle use the same value.
if (Shuffle == nullptr)
return false;
if (Shuffle->getOperand(0) != NextRdxOp)
return false;
// Check that shuffle masks matches.
for (unsigned j = 0; j != MaskStart; ++j)
ShuffleMask[j] = MaskStart + j;
// Fill the rest of the mask with -1 for undef.
std::fill(&ShuffleMask[MaskStart], ShuffleMask.end(), -1);
SmallVector<int, 16> Mask = Shuffle->getShuffleMask();
if (ShuffleMask != Mask)
return false;
RdxOp = NextRdxOp;
NumVecElemsRemain /= 2;
MaskStart *= 2;
}
Opcode = RdxOpcode;
Ty = VecTy;
return true;
}
unsigned CostModelAnalysis::getInstructionCost(const Instruction *I) const {
if (!TTI)
return -1;
switch (I->getOpcode()) {
case Instruction::GetElementPtr:{
Type *ValTy = I->getOperand(0)->getType()->getPointerElementType();
return TTI->getAddressComputationCost(ValTy);
}
case Instruction::Ret:
case Instruction::PHI:
case Instruction::Br: {
return TTI->getCFInstrCost(I->getOpcode());
}
case Instruction::Add:
case Instruction::FAdd:
case Instruction::Sub:
case Instruction::FSub:
case Instruction::Mul:
case Instruction::FMul:
case Instruction::UDiv:
case Instruction::SDiv:
case Instruction::FDiv:
case Instruction::URem:
case Instruction::SRem:
case Instruction::FRem:
case Instruction::Shl:
case Instruction::LShr:
case Instruction::AShr:
case Instruction::And:
case Instruction::Or:
case Instruction::Xor: {
TargetTransformInfo::OperandValueKind Op1VK =
getOperandInfo(I->getOperand(0));
TargetTransformInfo::OperandValueKind Op2VK =
getOperandInfo(I->getOperand(1));
return TTI->getArithmeticInstrCost(I->getOpcode(), I->getType(), Op1VK,
Op2VK);
}
case Instruction::Select: {
const SelectInst *SI = cast<SelectInst>(I);
Type *CondTy = SI->getCondition()->getType();
return TTI->getCmpSelInstrCost(I->getOpcode(), I->getType(), CondTy);
}
case Instruction::ICmp:
case Instruction::FCmp: {
Type *ValTy = I->getOperand(0)->getType();
return TTI->getCmpSelInstrCost(I->getOpcode(), ValTy);
}
case Instruction::Store: {
const StoreInst *SI = cast<StoreInst>(I);
Type *ValTy = SI->getValueOperand()->getType();
return TTI->getMemoryOpCost(I->getOpcode(), ValTy,
SI->getAlignment(),
SI->getPointerAddressSpace());
}
case Instruction::Load: {
const LoadInst *LI = cast<LoadInst>(I);
return TTI->getMemoryOpCost(I->getOpcode(), I->getType(),
LI->getAlignment(),
LI->getPointerAddressSpace());
}
case Instruction::ZExt:
case Instruction::SExt:
case Instruction::FPToUI:
case Instruction::FPToSI:
case Instruction::FPExt:
case Instruction::PtrToInt:
case Instruction::IntToPtr:
case Instruction::SIToFP:
case Instruction::UIToFP:
case Instruction::Trunc:
case Instruction::FPTrunc:
case Instruction::BitCast:
case Instruction::AddrSpaceCast: {
Type *SrcTy = I->getOperand(0)->getType();
return TTI->getCastInstrCost(I->getOpcode(), I->getType(), SrcTy);
}
case Instruction::ExtractElement: {
const ExtractElementInst * EEI = cast<ExtractElementInst>(I);
ConstantInt *CI = dyn_cast<ConstantInt>(I->getOperand(1));
unsigned Idx = -1;
if (CI)
Idx = CI->getZExtValue();
// Try to match a reduction sequence (series of shufflevector and vector
// adds followed by a extractelement).
unsigned ReduxOpCode;
Type *ReduxType;
if (matchVectorSplittingReduction(EEI, ReduxOpCode, ReduxType))
return TTI->getReductionCost(ReduxOpCode, ReduxType, false);
else if (matchPairwiseReduction(EEI, ReduxOpCode, ReduxType))
return TTI->getReductionCost(ReduxOpCode, ReduxType, true);
return TTI->getVectorInstrCost(I->getOpcode(),
EEI->getOperand(0)->getType(), Idx);
}
case Instruction::InsertElement: {
const InsertElementInst * IE = cast<InsertElementInst>(I);
ConstantInt *CI = dyn_cast<ConstantInt>(IE->getOperand(2));
unsigned Idx = -1;
if (CI)
Idx = CI->getZExtValue();
return TTI->getVectorInstrCost(I->getOpcode(),
IE->getType(), Idx);
}
case Instruction::ShuffleVector: {
const ShuffleVectorInst *Shuffle = cast<ShuffleVectorInst>(I);
Type *VecTypOp0 = Shuffle->getOperand(0)->getType();
unsigned NumVecElems = VecTypOp0->getVectorNumElements();
SmallVector<int, 16> Mask = Shuffle->getShuffleMask();
if (NumVecElems == Mask.size()) {
if (isReverseVectorMask(Mask))
return TTI->getShuffleCost(TargetTransformInfo::SK_Reverse, VecTypOp0,
0, nullptr);
if (isAlternateVectorMask(Mask))
return TTI->getShuffleCost(TargetTransformInfo::SK_Alternate,
VecTypOp0, 0, nullptr);
}
return -1;
}
case Instruction::Call:
if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
SmallVector<Type*, 4> Tys;
for (unsigned J = 0, JE = II->getNumArgOperands(); J != JE; ++J)
Tys.push_back(II->getArgOperand(J)->getType());
return TTI->getIntrinsicInstrCost(II->getIntrinsicID(), II->getType(),
Tys);
}
return -1;
default:
// We don't have any information on this instruction.
return -1;
}
}
void CostModelAnalysis::print(raw_ostream &OS, const Module*) const {
if (!F)
return;
for (Function::iterator B = F->begin(), BE = F->end(); B != BE; ++B) {
for (BasicBlock::iterator it = B->begin(), e = B->end(); it != e; ++it) {
Instruction *Inst = it;
unsigned Cost = getInstructionCost(Inst);
if (Cost != (unsigned)-1)
OS << "Cost Model: Found an estimated cost of " << Cost;
else
OS << "Cost Model: Unknown cost";
OS << " for instruction: "<< *Inst << "\n";
}
}
}
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/Analysis/CFG.cpp | //===-- CFG.cpp - BasicBlock analysis --------------------------------------==//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This family of functions performs analyses on basic blocks, and instructions
// contained within basic blocks.
//
//===----------------------------------------------------------------------===//
#include "llvm/Analysis/CFG.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/Analysis/LoopInfo.h"
#include "llvm/IR/Dominators.h"
using namespace llvm;
/// FindFunctionBackedges - Analyze the specified function to find all of the
/// loop backedges in the function and return them. This is a relatively cheap
/// (compared to computing dominators and loop info) analysis.
///
/// The output is added to Result, as pairs of <from,to> edge info.
void llvm::FindFunctionBackedges(const Function &F,
SmallVectorImpl<std::pair<const BasicBlock*,const BasicBlock*> > &Result) {
const BasicBlock *BB = &F.getEntryBlock();
if (succ_empty(BB))
return;
SmallPtrSet<const BasicBlock*, 8> Visited;
SmallVector<std::pair<const BasicBlock*, succ_const_iterator>, 8> VisitStack;
SmallPtrSet<const BasicBlock*, 8> InStack;
Visited.insert(BB);
VisitStack.push_back(std::make_pair(BB, succ_begin(BB)));
InStack.insert(BB);
do {
std::pair<const BasicBlock*, succ_const_iterator> &Top = VisitStack.back();
const BasicBlock *ParentBB = Top.first;
succ_const_iterator &I = Top.second;
bool FoundNew = false;
while (I != succ_end(ParentBB)) {
BB = *I++;
if (Visited.insert(BB).second) {
FoundNew = true;
break;
}
// Successor is in VisitStack, it's a back edge.
if (InStack.count(BB))
Result.push_back(std::make_pair(ParentBB, BB));
}
if (FoundNew) {
// Go down one level if there is a unvisited successor.
InStack.insert(BB);
VisitStack.push_back(std::make_pair(BB, succ_begin(BB)));
} else {
// Go up one level.
InStack.erase(VisitStack.pop_back_val().first);
}
} while (!VisitStack.empty());
}
/// GetSuccessorNumber - Search for the specified successor of basic block BB
/// and return its position in the terminator instruction's list of
/// successors. It is an error to call this with a block that is not a
/// successor.
unsigned llvm::GetSuccessorNumber(BasicBlock *BB, BasicBlock *Succ) {
TerminatorInst *Term = BB->getTerminator();
#ifndef NDEBUG
unsigned e = Term->getNumSuccessors();
#endif
for (unsigned i = 0; ; ++i) {
assert(i != e && "Didn't find edge?");
if (Term->getSuccessor(i) == Succ)
return i;
}
}
/// isCriticalEdge - Return true if the specified edge is a critical edge.
/// Critical edges are edges from a block with multiple successors to a block
/// with multiple predecessors.
bool llvm::isCriticalEdge(const TerminatorInst *TI, unsigned SuccNum,
bool AllowIdenticalEdges) {
assert(SuccNum < TI->getNumSuccessors() && "Illegal edge specification!");
if (TI->getNumSuccessors() == 1) return false;
const BasicBlock *Dest = TI->getSuccessor(SuccNum);
const_pred_iterator I = pred_begin(Dest), E = pred_end(Dest);
// If there is more than one predecessor, this is a critical edge...
assert(I != E && "No preds, but we have an edge to the block?");
const BasicBlock *FirstPred = *I;
++I; // Skip one edge due to the incoming arc from TI.
if (!AllowIdenticalEdges)
return I != E;
// If AllowIdenticalEdges is true, then we allow this edge to be considered
// non-critical iff all preds come from TI's block.
for (; I != E; ++I)
if (*I != FirstPred)
return true;
return false;
}
// LoopInfo contains a mapping from basic block to the innermost loop. Find
// the outermost loop in the loop nest that contains BB.
static const Loop *getOutermostLoop(const LoopInfo *LI, const BasicBlock *BB) {
const Loop *L = LI->getLoopFor(BB);
if (L) {
while (const Loop *Parent = L->getParentLoop())
L = Parent;
}
return L;
}
// True if there is a loop which contains both BB1 and BB2.
static bool loopContainsBoth(const LoopInfo *LI,
const BasicBlock *BB1, const BasicBlock *BB2) {
const Loop *L1 = getOutermostLoop(LI, BB1);
const Loop *L2 = getOutermostLoop(LI, BB2);
return L1 != nullptr && L1 == L2;
}
bool llvm::isPotentiallyReachableFromMany(
SmallVectorImpl<BasicBlock *> &Worklist, BasicBlock *StopBB,
const DominatorTree *DT, const LoopInfo *LI) {
// When the stop block is unreachable, it's dominated from everywhere,
// regardless of whether there's a path between the two blocks.
if (DT && !DT->isReachableFromEntry(StopBB))
DT = nullptr;
// Limit the number of blocks we visit. The goal is to avoid run-away compile
// times on large CFGs without hampering sensible code. Arbitrarily chosen.
unsigned Limit = 32;
SmallSet<const BasicBlock*, 64> Visited;
do {
BasicBlock *BB = Worklist.pop_back_val();
if (!Visited.insert(BB).second)
continue;
if (BB == StopBB)
return true;
if (DT && DT->dominates(BB, StopBB))
return true;
if (LI && loopContainsBoth(LI, BB, StopBB))
return true;
if (!--Limit) {
// We haven't been able to prove it one way or the other. Conservatively
// answer true -- that there is potentially a path.
return true;
}
if (const Loop *Outer = LI ? getOutermostLoop(LI, BB) : nullptr) {
// All blocks in a single loop are reachable from all other blocks. From
// any of these blocks, we can skip directly to the exits of the loop,
// ignoring any other blocks inside the loop body.
Outer->getExitBlocks(Worklist);
} else {
Worklist.append(succ_begin(BB), succ_end(BB));
}
} while (!Worklist.empty());
// We have exhausted all possible paths and are certain that 'To' can not be
// reached from 'From'.
return false;
}
bool llvm::isPotentiallyReachable(const BasicBlock *A, const BasicBlock *B,
const DominatorTree *DT, const LoopInfo *LI) {
assert(A->getParent() == B->getParent() &&
"This analysis is function-local!");
SmallVector<BasicBlock*, 32> Worklist;
Worklist.push_back(const_cast<BasicBlock*>(A));
return isPotentiallyReachableFromMany(Worklist, const_cast<BasicBlock *>(B),
DT, LI);
}
bool llvm::isPotentiallyReachable(const Instruction *A, const Instruction *B,
const DominatorTree *DT, const LoopInfo *LI) {
assert(A->getParent()->getParent() == B->getParent()->getParent() &&
"This analysis is function-local!");
SmallVector<BasicBlock*, 32> Worklist;
if (A->getParent() == B->getParent()) {
// The same block case is special because it's the only time we're looking
// within a single block to see which instruction comes first. Once we
// start looking at multiple blocks, the first instruction of the block is
// reachable, so we only need to determine reachability between whole
// blocks.
BasicBlock *BB = const_cast<BasicBlock *>(A->getParent());
// If the block is in a loop then we can reach any instruction in the block
// from any other instruction in the block by going around a backedge.
if (LI && LI->getLoopFor(BB) != nullptr)
return true;
// Linear scan, start at 'A', see whether we hit 'B' or the end first.
for (BasicBlock::const_iterator I = A, E = BB->end(); I != E; ++I) {
if (&*I == B)
return true;
}
// Can't be in a loop if it's the entry block -- the entry block may not
// have predecessors.
if (BB == &BB->getParent()->getEntryBlock())
return false;
// Otherwise, continue doing the normal per-BB CFG walk.
Worklist.append(succ_begin(BB), succ_end(BB));
if (Worklist.empty()) {
// We've proven that there's no path!
return false;
}
} else {
Worklist.push_back(const_cast<BasicBlock*>(A->getParent()));
}
if (A->getParent() == &A->getParent()->getParent()->getEntryBlock())
return true;
if (B->getParent() == &A->getParent()->getParent()->getEntryBlock())
return false;
return isPotentiallyReachableFromMany(
Worklist, const_cast<BasicBlock *>(B->getParent()), DT, LI);
}
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/Analysis/DxilConstantFoldingExt.cpp | //===-- DxilConstantFoldingExt.cpp - Hooks for extensions ----------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
// Copyright (C) Microsoft Corporation. All rights reserved.
//
//===----------------------------------------------------------------------===//
//
// These are placeholder hooks to support constant folding of extensions.
// They are defined in a separate file to make it easy to merge changes or link
// in your own version. There should be no upstream changes to these
// definitions.
//
//===----------------------------------------------------------------------===//
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Analysis/DxilConstantFolding.h"
#include "llvm/IR/Constant.h"
using namespace llvm;
Constant *hlsl::ConstantFoldScalarCallExt(StringRef Name, Type *Ty,
ArrayRef<Constant *> RawOperands) {
return nullptr;
}
bool hlsl::CanConstantFoldCallToExt(const Function *F) { return false; }
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/Analysis/CodeMetrics.cpp | //===- CodeMetrics.cpp - Code cost measurements ---------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements code cost measurement utilities.
//
//===----------------------------------------------------------------------===//
#include "llvm/Analysis/AssumptionCache.h"
#include "llvm/Analysis/CodeMetrics.h"
#include "llvm/Analysis/LoopInfo.h"
#include "llvm/Analysis/TargetTransformInfo.h"
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/IR/CallSite.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
#define DEBUG_TYPE "code-metrics"
using namespace llvm;
static void completeEphemeralValues(SmallVector<const Value *, 16> &WorkSet,
SmallPtrSetImpl<const Value*> &EphValues) {
SmallPtrSet<const Value *, 32> Visited;
// Make sure that all of the items in WorkSet are in our EphValues set.
EphValues.insert(WorkSet.begin(), WorkSet.end());
// Note: We don't speculate PHIs here, so we'll miss instruction chains kept
// alive only by ephemeral values.
while (!WorkSet.empty()) {
const Value *V = WorkSet.front();
WorkSet.erase(WorkSet.begin());
if (!Visited.insert(V).second)
continue;
// If all uses of this value are ephemeral, then so is this value.
bool FoundNEUse = false;
for (const User *I : V->users())
if (!EphValues.count(I)) {
FoundNEUse = true;
break;
}
if (FoundNEUse)
continue;
EphValues.insert(V);
DEBUG(dbgs() << "Ephemeral Value: " << *V << "\n");
if (const User *U = dyn_cast<User>(V))
for (const Value *J : U->operands()) {
if (isSafeToSpeculativelyExecute(J))
WorkSet.push_back(J);
}
}
}
// Find all ephemeral values.
void CodeMetrics::collectEphemeralValues(
const Loop *L, AssumptionCache *AC,
SmallPtrSetImpl<const Value *> &EphValues) {
SmallVector<const Value *, 16> WorkSet;
for (auto &AssumeVH : AC->assumptions()) {
if (!AssumeVH)
continue;
Instruction *I = cast<Instruction>(AssumeVH);
// Filter out call sites outside of the loop so we don't to a function's
// worth of work for each of its loops (and, in the common case, ephemeral
// values in the loop are likely due to @llvm.assume calls in the loop).
if (!L->contains(I->getParent()))
continue;
WorkSet.push_back(I);
}
completeEphemeralValues(WorkSet, EphValues);
}
void CodeMetrics::collectEphemeralValues(
const Function *F, AssumptionCache *AC,
SmallPtrSetImpl<const Value *> &EphValues) {
SmallVector<const Value *, 16> WorkSet;
for (auto &AssumeVH : AC->assumptions()) {
if (!AssumeVH)
continue;
Instruction *I = cast<Instruction>(AssumeVH);
assert(I->getParent()->getParent() == F &&
"Found assumption for the wrong function!");
WorkSet.push_back(I);
}
completeEphemeralValues(WorkSet, EphValues);
}
/// analyzeBasicBlock - Fill in the current structure with information gleaned
/// from the specified block.
void CodeMetrics::analyzeBasicBlock(const BasicBlock *BB,
const TargetTransformInfo &TTI,
SmallPtrSetImpl<const Value*> &EphValues) {
++NumBlocks;
unsigned NumInstsBeforeThisBB = NumInsts;
for (BasicBlock::const_iterator II = BB->begin(), E = BB->end();
II != E; ++II) {
// Skip ephemeral values.
if (EphValues.count(II))
continue;
// Special handling for calls.
if (isa<CallInst>(II) || isa<InvokeInst>(II)) {
ImmutableCallSite CS(cast<Instruction>(II));
if (const Function *F = CS.getCalledFunction()) {
// If a function is both internal and has a single use, then it is
// extremely likely to get inlined in the future (it was probably
// exposed by an interleaved devirtualization pass).
if (!CS.isNoInline() && F->hasInternalLinkage() && F->hasOneUse())
++NumInlineCandidates;
// If this call is to function itself, then the function is recursive.
// Inlining it into other functions is a bad idea, because this is
// basically just a form of loop peeling, and our metrics aren't useful
// for that case.
if (F == BB->getParent())
isRecursive = true;
if (TTI.isLoweredToCall(F))
++NumCalls;
} else {
// We don't want inline asm to count as a call - that would prevent loop
// unrolling. The argument setup cost is still real, though.
if (!isa<InlineAsm>(CS.getCalledValue()))
++NumCalls;
}
}
if (const AllocaInst *AI = dyn_cast<AllocaInst>(II)) {
if (!AI->isStaticAlloca())
this->usesDynamicAlloca = true;
}
if (isa<ExtractElementInst>(II) || II->getType()->isVectorTy())
++NumVectorInsts;
if (const CallInst *CI = dyn_cast<CallInst>(II))
if (CI->cannotDuplicate())
notDuplicatable = true;
if (const InvokeInst *InvI = dyn_cast<InvokeInst>(II))
if (InvI->cannotDuplicate())
notDuplicatable = true;
NumInsts += TTI.getUserCost(&*II);
}
if (isa<ReturnInst>(BB->getTerminator()))
++NumRets;
// We never want to inline functions that contain an indirectbr. This is
// incorrect because all the blockaddress's (in static global initializers
// for example) would be referring to the original function, and this indirect
// jump would jump from the inlined copy of the function into the original
// function which is extremely undefined behavior.
// FIXME: This logic isn't really right; we can safely inline functions
// with indirectbr's as long as no other function or global references the
// blockaddress of a block within the current function. And as a QOI issue,
// if someone is using a blockaddress without an indirectbr, and that
// reference somehow ends up in another function or global, we probably
// don't want to inline this function.
notDuplicatable |= isa<IndirectBrInst>(BB->getTerminator());
// Remember NumInsts for this BB.
NumBBInsts[BB] = NumInsts - NumInstsBeforeThisBB;
}
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/Analysis/NoAliasAnalysis.cpp | //===- NoAliasAnalysis.cpp - Minimal Alias Analysis Impl ------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines the default implementation of the Alias Analysis interface
// that simply returns "I don't know" for all queries.
//
//===----------------------------------------------------------------------===//
#include "llvm/Analysis/Passes.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Module.h"
#include "llvm/Pass.h"
using namespace llvm;
namespace {
/// NoAA - This class implements the -no-aa pass, which always returns "I
/// don't know" for alias queries. NoAA is unlike other alias analysis
/// implementations, in that it does not chain to a previous analysis. As
/// such it doesn't follow many of the rules that other alias analyses must.
///
struct NoAA : public ImmutablePass, public AliasAnalysis {
static char ID; // Class identification, replacement for typeinfo
NoAA() : ImmutablePass(ID) {
initializeNoAAPass(*PassRegistry::getPassRegistry());
}
void getAnalysisUsage(AnalysisUsage &AU) const override {}
bool doInitialization(Module &M) override {
// Note: NoAA does not call InitializeAliasAnalysis because it's
// special and does not support chaining.
DL = &M.getDataLayout();
return true;
}
AliasResult alias(const MemoryLocation &LocA,
const MemoryLocation &LocB) override {
return MayAlias;
}
ModRefBehavior getModRefBehavior(ImmutableCallSite CS) override {
return UnknownModRefBehavior;
}
ModRefBehavior getModRefBehavior(const Function *F) override {
return UnknownModRefBehavior;
}
bool pointsToConstantMemory(const MemoryLocation &Loc,
bool OrLocal) override {
return false;
}
ModRefResult getArgModRefInfo(ImmutableCallSite CS,
unsigned ArgIdx) override {
return ModRef;
}
ModRefResult getModRefInfo(ImmutableCallSite CS,
const MemoryLocation &Loc) override {
return ModRef;
}
ModRefResult getModRefInfo(ImmutableCallSite CS1,
ImmutableCallSite CS2) override {
return ModRef;
}
void deleteValue(Value *V) override {}
void addEscapingUse(Use &U) override {}
/// getAdjustedAnalysisPointer - This method is used when a pass implements
/// an analysis interface through multiple inheritance. If needed, it
/// should override this to adjust the this pointer as needed for the
/// specified pass info.
void *getAdjustedAnalysisPointer(const void *ID) override {
if (ID == &AliasAnalysis::ID)
return (AliasAnalysis*)this;
return this;
}
};
} // End of anonymous namespace
// Register this pass...
char NoAA::ID = 0;
INITIALIZE_AG_PASS(NoAA, AliasAnalysis, "no-aa",
"No Alias Analysis (always returns 'may' alias)",
true, true, true)
ImmutablePass *llvm::createNoAAPass() { return new NoAA(); }
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/Analysis/AliasSetTracker.cpp | //===- AliasSetTracker.cpp - Alias Sets Tracker implementation-------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements the AliasSetTracker and AliasSet classes.
//
//===----------------------------------------------------------------------===//
#include "llvm/Analysis/AliasSetTracker.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/InstIterator.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Type.h"
#include "llvm/Pass.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
/// mergeSetIn - Merge the specified alias set into this alias set.
///
void AliasSet::mergeSetIn(AliasSet &AS, AliasSetTracker &AST) {
assert(!AS.Forward && "Alias set is already forwarding!");
assert(!Forward && "This set is a forwarding set!!");
// Update the alias and access types of this set...
Access |= AS.Access;
Alias |= AS.Alias;
Volatile |= AS.Volatile;
if (Alias == SetMustAlias) {
// Check that these two merged sets really are must aliases. Since both
// used to be must-alias sets, we can just check any pointer from each set
// for aliasing.
AliasAnalysis &AA = AST.getAliasAnalysis();
PointerRec *L = getSomePointer();
PointerRec *R = AS.getSomePointer();
// If the pointers are not a must-alias pair, this set becomes a may alias.
if (AA.alias(MemoryLocation(L->getValue(), L->getSize(), L->getAAInfo()),
MemoryLocation(R->getValue(), R->getSize(), R->getAAInfo())) !=
MustAlias)
Alias = SetMayAlias;
}
bool ASHadUnknownInsts = !AS.UnknownInsts.empty();
if (UnknownInsts.empty()) { // Merge call sites...
if (ASHadUnknownInsts) {
std::swap(UnknownInsts, AS.UnknownInsts);
addRef();
}
} else if (ASHadUnknownInsts) {
UnknownInsts.insert(UnknownInsts.end(), AS.UnknownInsts.begin(), AS.UnknownInsts.end());
AS.UnknownInsts.clear();
}
AS.Forward = this; // Forward across AS now...
addRef(); // AS is now pointing to us...
// Merge the list of constituent pointers...
if (AS.PtrList) {
*PtrListEnd = AS.PtrList;
AS.PtrList->setPrevInList(PtrListEnd);
PtrListEnd = AS.PtrListEnd;
AS.PtrList = nullptr;
AS.PtrListEnd = &AS.PtrList;
assert(*AS.PtrListEnd == nullptr && "End of list is not null?");
}
if (ASHadUnknownInsts)
AS.dropRef(AST);
}
void AliasSetTracker::removeAliasSet(AliasSet *AS) {
if (AliasSet *Fwd = AS->Forward) {
Fwd->dropRef(*this);
AS->Forward = nullptr;
}
AliasSets.erase(AS);
}
void AliasSet::removeFromTracker(AliasSetTracker &AST) {
assert(RefCount == 0 && "Cannot remove non-dead alias set from tracker!");
AST.removeAliasSet(this);
}
void AliasSet::addPointer(AliasSetTracker &AST, PointerRec &Entry,
uint64_t Size, const AAMDNodes &AAInfo,
bool KnownMustAlias) {
assert(!Entry.hasAliasSet() && "Entry already in set!");
// Check to see if we have to downgrade to _may_ alias.
if (isMustAlias() && !KnownMustAlias)
if (PointerRec *P = getSomePointer()) {
AliasAnalysis &AA = AST.getAliasAnalysis();
AliasResult Result =
AA.alias(MemoryLocation(P->getValue(), P->getSize(), P->getAAInfo()),
MemoryLocation(Entry.getValue(), Size, AAInfo));
if (Result != MustAlias)
Alias = SetMayAlias;
else // First entry of must alias must have maximum size!
P->updateSizeAndAAInfo(Size, AAInfo);
assert(Result != NoAlias && "Cannot be part of must set!");
}
Entry.setAliasSet(this);
Entry.updateSizeAndAAInfo(Size, AAInfo);
// Add it to the end of the list...
assert(*PtrListEnd == nullptr && "End of list is not null?");
*PtrListEnd = &Entry;
PtrListEnd = Entry.setPrevInList(PtrListEnd);
assert(*PtrListEnd == nullptr && "End of list is not null?");
addRef(); // Entry points to alias set.
}
void AliasSet::addUnknownInst(Instruction *I, AliasAnalysis &AA) {
if (UnknownInsts.empty())
addRef();
UnknownInsts.emplace_back(I);
if (!I->mayWriteToMemory()) {
Alias = SetMayAlias;
Access |= RefAccess;
return;
}
// FIXME: This should use mod/ref information to make this not suck so bad
Alias = SetMayAlias;
Access = ModRefAccess;
}
/// aliasesPointer - Return true if the specified pointer "may" (or must)
/// alias one of the members in the set.
///
bool AliasSet::aliasesPointer(const Value *Ptr, uint64_t Size,
const AAMDNodes &AAInfo,
AliasAnalysis &AA) const {
if (Alias == SetMustAlias) {
assert(UnknownInsts.empty() && "Illegal must alias set!");
// If this is a set of MustAliases, only check to see if the pointer aliases
// SOME value in the set.
PointerRec *SomePtr = getSomePointer();
assert(SomePtr && "Empty must-alias set??");
return AA.alias(MemoryLocation(SomePtr->getValue(), SomePtr->getSize(),
SomePtr->getAAInfo()),
MemoryLocation(Ptr, Size, AAInfo));
}
// If this is a may-alias set, we have to check all of the pointers in the set
// to be sure it doesn't alias the set...
for (iterator I = begin(), E = end(); I != E; ++I)
if (AA.alias(MemoryLocation(Ptr, Size, AAInfo),
MemoryLocation(I.getPointer(), I.getSize(), I.getAAInfo())))
return true;
// Check the unknown instructions...
if (!UnknownInsts.empty()) {
for (unsigned i = 0, e = UnknownInsts.size(); i != e; ++i)
if (AA.getModRefInfo(UnknownInsts[i],
MemoryLocation(Ptr, Size, AAInfo)) !=
AliasAnalysis::NoModRef)
return true;
}
return false;
}
bool AliasSet::aliasesUnknownInst(const Instruction *Inst,
AliasAnalysis &AA) const {
if (!Inst->mayReadOrWriteMemory())
return false;
for (unsigned i = 0, e = UnknownInsts.size(); i != e; ++i) {
ImmutableCallSite C1(getUnknownInst(i)), C2(Inst);
if (!C1 || !C2 ||
AA.getModRefInfo(C1, C2) != AliasAnalysis::NoModRef ||
AA.getModRefInfo(C2, C1) != AliasAnalysis::NoModRef)
return true;
}
for (iterator I = begin(), E = end(); I != E; ++I)
if (AA.getModRefInfo(
Inst, MemoryLocation(I.getPointer(), I.getSize(), I.getAAInfo())) !=
AliasAnalysis::NoModRef)
return true;
return false;
}
void AliasSetTracker::clear() {
// Delete all the PointerRec entries.
for (PointerMapType::iterator I = PointerMap.begin(), E = PointerMap.end();
I != E; ++I)
I->second->eraseFromList();
PointerMap.clear();
// The alias sets should all be clear now.
AliasSets.clear();
}
/// findAliasSetForPointer - Given a pointer, find the one alias set to put the
/// instruction referring to the pointer into. If there are multiple alias sets
/// that may alias the pointer, merge them together and return the unified set.
///
AliasSet *AliasSetTracker::findAliasSetForPointer(const Value *Ptr,
uint64_t Size,
const AAMDNodes &AAInfo) {
AliasSet *FoundSet = nullptr;
for (iterator I = begin(), E = end(); I != E;) {
iterator Cur = I++;
if (Cur->Forward || !Cur->aliasesPointer(Ptr, Size, AAInfo, AA)) continue;
if (!FoundSet) { // If this is the first alias set ptr can go into.
FoundSet = Cur; // Remember it.
} else { // Otherwise, we must merge the sets.
FoundSet->mergeSetIn(*Cur, *this); // Merge in contents.
}
}
return FoundSet;
}
/// containsPointer - Return true if the specified location is represented by
/// this alias set, false otherwise. This does not modify the AST object or
/// alias sets.
bool AliasSetTracker::containsPointer(const Value *Ptr, uint64_t Size,
const AAMDNodes &AAInfo) const {
for (const_iterator I = begin(), E = end(); I != E; ++I)
if (!I->Forward && I->aliasesPointer(Ptr, Size, AAInfo, AA))
return true;
return false;
}
bool AliasSetTracker::containsUnknown(const Instruction *Inst) const {
for (const_iterator I = begin(), E = end(); I != E; ++I)
if (!I->Forward && I->aliasesUnknownInst(Inst, AA))
return true;
return false;
}
AliasSet *AliasSetTracker::findAliasSetForUnknownInst(Instruction *Inst) {
AliasSet *FoundSet = nullptr;
for (iterator I = begin(), E = end(); I != E;) {
iterator Cur = I++;
if (Cur->Forward || !Cur->aliasesUnknownInst(Inst, AA))
continue;
if (!FoundSet) // If this is the first alias set ptr can go into.
FoundSet = Cur; // Remember it.
else if (!Cur->Forward) // Otherwise, we must merge the sets.
FoundSet->mergeSetIn(*Cur, *this); // Merge in contents.
}
return FoundSet;
}
/// getAliasSetForPointer - Return the alias set that the specified pointer
/// lives in.
AliasSet &AliasSetTracker::getAliasSetForPointer(Value *Pointer, uint64_t Size,
const AAMDNodes &AAInfo,
bool *New) {
AliasSet::PointerRec &Entry = getEntryFor(Pointer);
// Check to see if the pointer is already known.
if (Entry.hasAliasSet()) {
Entry.updateSizeAndAAInfo(Size, AAInfo);
// Return the set!
return *Entry.getAliasSet(*this)->getForwardedTarget(*this);
}
if (AliasSet *AS = findAliasSetForPointer(Pointer, Size, AAInfo)) {
// Add it to the alias set it aliases.
AS->addPointer(*this, Entry, Size, AAInfo);
return *AS;
}
if (New) *New = true;
// Otherwise create a new alias set to hold the loaded pointer.
AliasSets.push_back(new AliasSet());
AliasSets.back().addPointer(*this, Entry, Size, AAInfo);
return AliasSets.back();
}
bool AliasSetTracker::add(Value *Ptr, uint64_t Size, const AAMDNodes &AAInfo) {
bool NewPtr;
addPointer(Ptr, Size, AAInfo, AliasSet::NoAccess, NewPtr);
return NewPtr;
}
bool AliasSetTracker::add(LoadInst *LI) {
if (LI->getOrdering() > Monotonic) return addUnknown(LI);
AAMDNodes AAInfo;
LI->getAAMetadata(AAInfo);
AliasSet::AccessLattice Access = AliasSet::RefAccess;
bool NewPtr;
AliasSet &AS = addPointer(LI->getOperand(0),
AA.getTypeStoreSize(LI->getType()),
AAInfo, Access, NewPtr);
if (LI->isVolatile()) AS.setVolatile();
return NewPtr;
}
bool AliasSetTracker::add(StoreInst *SI) {
if (SI->getOrdering() > Monotonic) return addUnknown(SI);
AAMDNodes AAInfo;
SI->getAAMetadata(AAInfo);
AliasSet::AccessLattice Access = AliasSet::ModAccess;
bool NewPtr;
Value *Val = SI->getOperand(0);
AliasSet &AS = addPointer(SI->getOperand(1),
AA.getTypeStoreSize(Val->getType()),
AAInfo, Access, NewPtr);
if (SI->isVolatile()) AS.setVolatile();
return NewPtr;
}
bool AliasSetTracker::add(VAArgInst *VAAI) {
AAMDNodes AAInfo;
VAAI->getAAMetadata(AAInfo);
bool NewPtr;
addPointer(VAAI->getOperand(0), MemoryLocation::UnknownSize, AAInfo,
AliasSet::ModRefAccess, NewPtr);
return NewPtr;
}
bool AliasSetTracker::addUnknown(Instruction *Inst) {
if (isa<DbgInfoIntrinsic>(Inst))
return true; // Ignore DbgInfo Intrinsics.
if (!Inst->mayReadOrWriteMemory())
return true; // doesn't alias anything
AliasSet *AS = findAliasSetForUnknownInst(Inst);
if (AS) {
AS->addUnknownInst(Inst, AA);
return false;
}
AliasSets.push_back(new AliasSet());
AS = &AliasSets.back();
AS->addUnknownInst(Inst, AA);
return true;
}
bool AliasSetTracker::add(Instruction *I) {
// Dispatch to one of the other add methods.
if (LoadInst *LI = dyn_cast<LoadInst>(I))
return add(LI);
if (StoreInst *SI = dyn_cast<StoreInst>(I))
return add(SI);
if (VAArgInst *VAAI = dyn_cast<VAArgInst>(I))
return add(VAAI);
return addUnknown(I);
}
void AliasSetTracker::add(BasicBlock &BB) {
for (BasicBlock::iterator I = BB.begin(), E = BB.end(); I != E; ++I)
add(I);
}
void AliasSetTracker::add(const AliasSetTracker &AST) {
assert(&AA == &AST.AA &&
"Merging AliasSetTracker objects with different Alias Analyses!");
// Loop over all of the alias sets in AST, adding the pointers contained
// therein into the current alias sets. This can cause alias sets to be
// merged together in the current AST.
for (const_iterator I = AST.begin(), E = AST.end(); I != E; ++I) {
if (I->Forward) continue; // Ignore forwarding alias sets
AliasSet &AS = const_cast<AliasSet&>(*I);
// If there are any call sites in the alias set, add them to this AST.
for (unsigned i = 0, e = AS.UnknownInsts.size(); i != e; ++i)
add(AS.UnknownInsts[i]);
// Loop over all of the pointers in this alias set.
bool X;
for (AliasSet::iterator ASI = AS.begin(), E = AS.end(); ASI != E; ++ASI) {
AliasSet &NewAS = addPointer(ASI.getPointer(), ASI.getSize(),
ASI.getAAInfo(),
(AliasSet::AccessLattice)AS.Access, X);
if (AS.isVolatile()) NewAS.setVolatile();
}
}
}
/// remove - Remove the specified (potentially non-empty) alias set from the
/// tracker.
void AliasSetTracker::remove(AliasSet &AS) {
// Drop all call sites.
if (!AS.UnknownInsts.empty())
AS.dropRef(*this);
AS.UnknownInsts.clear();
// Clear the alias set.
unsigned NumRefs = 0;
while (!AS.empty()) {
AliasSet::PointerRec *P = AS.PtrList;
Value *ValToRemove = P->getValue();
// Unlink and delete entry from the list of values.
P->eraseFromList();
// Remember how many references need to be dropped.
++NumRefs;
// Finally, remove the entry.
PointerMap.erase(ValToRemove);
}
// Stop using the alias set, removing it.
AS.RefCount -= NumRefs;
if (AS.RefCount == 0)
AS.removeFromTracker(*this);
}
bool
AliasSetTracker::remove(Value *Ptr, uint64_t Size, const AAMDNodes &AAInfo) {
AliasSet *AS = findAliasSetForPointer(Ptr, Size, AAInfo);
if (!AS) return false;
remove(*AS);
return true;
}
bool AliasSetTracker::remove(LoadInst *LI) {
uint64_t Size = AA.getTypeStoreSize(LI->getType());
AAMDNodes AAInfo;
LI->getAAMetadata(AAInfo);
AliasSet *AS = findAliasSetForPointer(LI->getOperand(0), Size, AAInfo);
if (!AS) return false;
remove(*AS);
return true;
}
bool AliasSetTracker::remove(StoreInst *SI) {
uint64_t Size = AA.getTypeStoreSize(SI->getOperand(0)->getType());
AAMDNodes AAInfo;
SI->getAAMetadata(AAInfo);
AliasSet *AS = findAliasSetForPointer(SI->getOperand(1), Size, AAInfo);
if (!AS) return false;
remove(*AS);
return true;
}
bool AliasSetTracker::remove(VAArgInst *VAAI) {
AAMDNodes AAInfo;
VAAI->getAAMetadata(AAInfo);
AliasSet *AS = findAliasSetForPointer(VAAI->getOperand(0),
MemoryLocation::UnknownSize, AAInfo);
if (!AS) return false;
remove(*AS);
return true;
}
bool AliasSetTracker::removeUnknown(Instruction *I) {
if (!I->mayReadOrWriteMemory())
return false; // doesn't alias anything
AliasSet *AS = findAliasSetForUnknownInst(I);
if (!AS) return false;
remove(*AS);
return true;
}
bool AliasSetTracker::remove(Instruction *I) {
// Dispatch to one of the other remove methods...
if (LoadInst *LI = dyn_cast<LoadInst>(I))
return remove(LI);
if (StoreInst *SI = dyn_cast<StoreInst>(I))
return remove(SI);
if (VAArgInst *VAAI = dyn_cast<VAArgInst>(I))
return remove(VAAI);
return removeUnknown(I);
}
// deleteValue method - This method is used to remove a pointer value from the
// AliasSetTracker entirely. It should be used when an instruction is deleted
// from the program to update the AST. If you don't use this, you would have
// dangling pointers to deleted instructions.
//
void AliasSetTracker::deleteValue(Value *PtrVal) {
// Notify the alias analysis implementation that this value is gone.
AA.deleteValue(PtrVal);
// If this is a call instruction, remove the callsite from the appropriate
// AliasSet (if present).
if (Instruction *Inst = dyn_cast<Instruction>(PtrVal)) {
if (Inst->mayReadOrWriteMemory()) {
// Scan all the alias sets to see if this call site is contained.
for (iterator I = begin(), E = end(); I != E;) {
iterator Cur = I++;
if (!Cur->Forward)
Cur->removeUnknownInst(*this, Inst);
}
}
}
// First, look up the PointerRec for this pointer.
PointerMapType::iterator I = PointerMap.find_as(PtrVal);
if (I == PointerMap.end()) return; // Noop
// If we found one, remove the pointer from the alias set it is in.
AliasSet::PointerRec *PtrValEnt = I->second;
AliasSet *AS = PtrValEnt->getAliasSet(*this);
// Unlink and delete from the list of values.
PtrValEnt->eraseFromList();
// Stop using the alias set.
AS->dropRef(*this);
PointerMap.erase(I);
}
// copyValue - This method should be used whenever a preexisting value in the
// program is copied or cloned, introducing a new value. Note that it is ok for
// clients that use this method to introduce the same value multiple times: if
// the tracker already knows about a value, it will ignore the request.
//
void AliasSetTracker::copyValue(Value *From, Value *To) {
// First, look up the PointerRec for this pointer.
PointerMapType::iterator I = PointerMap.find_as(From);
if (I == PointerMap.end())
return; // Noop
assert(I->second->hasAliasSet() && "Dead entry?");
AliasSet::PointerRec &Entry = getEntryFor(To);
if (Entry.hasAliasSet()) return; // Already in the tracker!
// Add it to the alias set it aliases...
I = PointerMap.find_as(From);
AliasSet *AS = I->second->getAliasSet(*this);
AS->addPointer(*this, Entry, I->second->getSize(),
I->second->getAAInfo(),
true);
}
//===----------------------------------------------------------------------===//
// AliasSet/AliasSetTracker Printing Support
//===----------------------------------------------------------------------===//
void AliasSet::print(raw_ostream &OS) const {
OS << " AliasSet[" << (const void*)this << ", " << RefCount << "] ";
OS << (Alias == SetMustAlias ? "must" : "may") << " alias, ";
switch (Access) {
case NoAccess: OS << "No access "; break;
case RefAccess: OS << "Ref "; break;
case ModAccess: OS << "Mod "; break;
case ModRefAccess: OS << "Mod/Ref "; break;
default: llvm_unreachable("Bad value for Access!");
}
if (isVolatile()) OS << "[volatile] ";
if (Forward)
OS << " forwarding to " << (void*)Forward;
if (!empty()) {
OS << "Pointers: ";
for (iterator I = begin(), E = end(); I != E; ++I) {
if (I != begin()) OS << ", ";
I.getPointer()->printAsOperand(OS << "(");
OS << ", " << I.getSize() << ")";
}
}
if (!UnknownInsts.empty()) {
OS << "\n " << UnknownInsts.size() << " Unknown instructions: ";
for (unsigned i = 0, e = UnknownInsts.size(); i != e; ++i) {
if (i) OS << ", ";
UnknownInsts[i]->printAsOperand(OS);
}
}
OS << "\n";
}
void AliasSetTracker::print(raw_ostream &OS) const {
OS << "Alias Set Tracker: " << AliasSets.size() << " alias sets for "
<< PointerMap.size() << " pointer values.\n";
for (const_iterator I = begin(), E = end(); I != E; ++I)
I->print(OS);
OS << "\n";
}
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
void AliasSet::dump() const { print(dbgs()); }
void AliasSetTracker::dump() const { print(dbgs()); }
#endif
//===----------------------------------------------------------------------===//
// ASTCallbackVH Class Implementation
//===----------------------------------------------------------------------===//
void AliasSetTracker::ASTCallbackVH::deleted() {
assert(AST && "ASTCallbackVH called with a null AliasSetTracker!");
AST->deleteValue(getValPtr());
// this now dangles!
}
void AliasSetTracker::ASTCallbackVH::allUsesReplacedWith(Value *V) {
AST->copyValue(getValPtr(), V);
}
AliasSetTracker::ASTCallbackVH::ASTCallbackVH(Value *V, AliasSetTracker *ast)
: CallbackVH(V), AST(ast) {}
AliasSetTracker::ASTCallbackVH &
AliasSetTracker::ASTCallbackVH::operator=(Value *V) {
return *this = ASTCallbackVH(V, AST);
}
//===----------------------------------------------------------------------===//
// AliasSetPrinter Pass
//===----------------------------------------------------------------------===//
namespace {
class AliasSetPrinter : public FunctionPass {
AliasSetTracker *Tracker;
public:
static char ID; // Pass identification, replacement for typeid
AliasSetPrinter() : FunctionPass(ID) {
initializeAliasSetPrinterPass(*PassRegistry::getPassRegistry());
}
void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.setPreservesAll();
AU.addRequired<AliasAnalysis>();
}
bool runOnFunction(Function &F) override {
Tracker = new AliasSetTracker(getAnalysis<AliasAnalysis>());
for (inst_iterator I = inst_begin(F), E = inst_end(F); I != E; ++I)
Tracker->add(&*I);
Tracker->print(errs());
delete Tracker;
return false;
}
};
}
char AliasSetPrinter::ID = 0;
INITIALIZE_PASS_BEGIN(AliasSetPrinter, "print-alias-sets",
"Alias Set Printer", false, true)
INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
INITIALIZE_PASS_END(AliasSetPrinter, "print-alias-sets",
"Alias Set Printer", false, true)
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/Analysis/StratifiedSets.h | //===- StratifiedSets.h - Abstract stratified sets implementation. --------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_ADT_STRATIFIEDSETS_H
#define LLVM_ADT_STRATIFIEDSETS_H
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/Compiler.h"
#include <bitset>
#include <cassert>
#include <cmath>
#include <limits>
#include <type_traits>
#include <utility>
#include <vector>
namespace llvm {
// \brief An index into Stratified Sets.
typedef unsigned StratifiedIndex;
// NOTE: ^ This can't be a short -- bootstrapping clang has a case where
// ~1M sets exist.
// \brief Container of information related to a value in a StratifiedSet.
struct StratifiedInfo {
StratifiedIndex Index;
// For field sensitivity, etc. we can tack attributes on to this struct.
};
// The number of attributes that StratifiedAttrs should contain. Attributes are
// described below, and 32 was an arbitrary choice because it fits nicely in 32
// bits (because we use a bitset for StratifiedAttrs).
static const unsigned NumStratifiedAttrs = 32;
// These are attributes that the users of StratifiedSets/StratifiedSetBuilders
// may use for various purposes. These also have the special property of that
// they are merged down. So, if set A is above set B, and one decides to set an
// attribute in set A, then the attribute will automatically be set in set B.
typedef std::bitset<NumStratifiedAttrs> StratifiedAttrs;
// \brief A "link" between two StratifiedSets.
struct StratifiedLink {
// \brief This is a value used to signify "does not exist" where
// the StratifiedIndex type is used. This is used instead of
// Optional<StratifiedIndex> because Optional<StratifiedIndex> would
// eat up a considerable amount of extra memory, after struct
// padding/alignment is taken into account.
static const StratifiedIndex SetSentinel;
// \brief The index for the set "above" current
StratifiedIndex Above;
// \brief The link for the set "below" current
StratifiedIndex Below;
// \brief Attributes for these StratifiedSets.
StratifiedAttrs Attrs;
StratifiedLink() : Above(SetSentinel), Below(SetSentinel) {}
bool hasBelow() const { return Below != SetSentinel; }
bool hasAbove() const { return Above != SetSentinel; }
void clearBelow() { Below = SetSentinel; }
void clearAbove() { Above = SetSentinel; }
};
// \brief These are stratified sets, as described in "Fast algorithms for
// Dyck-CFL-reachability with applications to Alias Analysis" by Zhang Q, Lyu M
// R, Yuan H, and Su Z. -- in short, this is meant to represent different sets
// of Value*s. If two Value*s are in the same set, or if both sets have
// overlapping attributes, then the Value*s are said to alias.
//
// Sets may be related by position, meaning that one set may be considered as
// above or below another. In CFL Alias Analysis, this gives us an indication
// of how two variables are related; if the set of variable A is below a set
// containing variable B, then at some point, a variable that has interacted
// with B (or B itself) was either used in order to extract the variable A, or
// was used as storage of variable A.
//
// Sets may also have attributes (as noted above). These attributes are
// generally used for noting whether a variable in the set has interacted with
// a variable whose origins we don't quite know (i.e. globals/arguments), or if
// the variable may have had operations performed on it (modified in a function
// call). All attributes that exist in a set A must exist in all sets marked as
// below set A.
template <typename T> class StratifiedSets {
public:
StratifiedSets() {}
StratifiedSets(DenseMap<T, StratifiedInfo> Map,
std::vector<StratifiedLink> Links)
: Values(std::move(Map)), Links(std::move(Links)) {}
StratifiedSets(StratifiedSets<T> &&Other) { *this = std::move(Other); }
StratifiedSets &operator=(StratifiedSets<T> &&Other) {
Values = std::move(Other.Values);
Links = std::move(Other.Links);
return *this;
}
Optional<StratifiedInfo> find(const T &Elem) const {
auto Iter = Values.find(Elem);
if (Iter == Values.end()) {
return NoneType();
}
return Iter->second;
}
const StratifiedLink &getLink(StratifiedIndex Index) const {
assert(inbounds(Index));
return Links[Index];
}
private:
DenseMap<T, StratifiedInfo> Values;
std::vector<StratifiedLink> Links;
bool inbounds(StratifiedIndex Idx) const { return Idx < Links.size(); }
};
// \brief Generic Builder class that produces StratifiedSets instances.
//
// The goal of this builder is to efficiently produce correct StratifiedSets
// instances. To this end, we use a few tricks:
// > Set chains (A method for linking sets together)
// > Set remaps (A method for marking a set as an alias [irony?] of another)
//
// ==== Set chains ====
// This builder has a notion of some value A being above, below, or with some
// other value B:
// > The `A above B` relationship implies that there is a reference edge going
// from A to B. Namely, it notes that A can store anything in B's set.
// > The `A below B` relationship is the opposite of `A above B`. It implies
// that there's a dereference edge going from A to B.
// > The `A with B` relationship states that there's an assignment edge going
// from A to B, and that A and B should be treated as equals.
//
// As an example, take the following code snippet:
//
// %a = alloca i32, align 4
// %ap = alloca i32*, align 8
// %app = alloca i32**, align 8
// store %a, %ap
// store %ap, %app
// %aw = getelementptr %ap, 0
//
// Given this, the follow relations exist:
// - %a below %ap & %ap above %a
// - %ap below %app & %app above %ap
// - %aw with %ap & %ap with %aw
//
// These relations produce the following sets:
// [{%a}, {%ap, %aw}, {%app}]
//
// ...Which states that the only MayAlias relationship in the above program is
// between %ap and %aw.
//
// Life gets more complicated when we actually have logic in our programs. So,
// we either must remove this logic from our programs, or make consessions for
// it in our AA algorithms. In this case, we have decided to select the latter
// option.
//
// First complication: Conditionals
// Motivation:
// %ad = alloca int, align 4
// %a = alloca int*, align 8
// %b = alloca int*, align 8
// %bp = alloca int**, align 8
// %c = call i1 @SomeFunc()
// %k = select %c, %ad, %bp
// store %ad, %a
// store %b, %bp
//
// %k has 'with' edges to both %a and %b, which ordinarily would not be linked
// together. So, we merge the set that contains %a with the set that contains
// %b. We then recursively merge the set above %a with the set above %b, and
// the set below %a with the set below %b, etc. Ultimately, the sets for this
// program would end up like: {%ad}, {%a, %b, %k}, {%bp}, where {%ad} is below
// {%a, %b, %c} is below {%ad}.
//
// Second complication: Arbitrary casts
// Motivation:
// %ip = alloca int*, align 8
// %ipp = alloca int**, align 8
// %i = bitcast ipp to int
// store %ip, %ipp
// store %i, %ip
//
// This is impossible to construct with any of the rules above, because a set
// containing both {%i, %ipp} is supposed to exist, the set with %i is supposed
// to be below the set with %ip, and the set with %ip is supposed to be below
// the set with %ipp. Because we don't allow circular relationships like this,
// we merge all concerned sets into one. So, the above code would generate a
// single StratifiedSet: {%ip, %ipp, %i}.
//
// ==== Set remaps ====
// More of an implementation detail than anything -- when merging sets, we need
// to update the numbers of all of the elements mapped to those sets. Rather
// than doing this at each merge, we note in the BuilderLink structure that a
// remap has occurred, and use this information so we can defer renumbering set
// elements until build time.
template <typename T> class StratifiedSetsBuilder {
// \brief Represents a Stratified Set, with information about the Stratified
// Set above it, the set below it, and whether the current set has been
// remapped to another.
struct BuilderLink {
const StratifiedIndex Number;
BuilderLink(StratifiedIndex N) : Number(N) {
Remap = StratifiedLink::SetSentinel;
}
bool hasAbove() const {
assert(!isRemapped());
return Link.hasAbove();
}
bool hasBelow() const {
assert(!isRemapped());
return Link.hasBelow();
}
void setBelow(StratifiedIndex I) {
assert(!isRemapped());
Link.Below = I;
}
void setAbove(StratifiedIndex I) {
assert(!isRemapped());
Link.Above = I;
}
void clearBelow() {
assert(!isRemapped());
Link.clearBelow();
}
void clearAbove() {
assert(!isRemapped());
Link.clearAbove();
}
StratifiedIndex getBelow() const {
assert(!isRemapped());
assert(hasBelow());
return Link.Below;
}
StratifiedIndex getAbove() const {
assert(!isRemapped());
assert(hasAbove());
return Link.Above;
}
StratifiedAttrs &getAttrs() {
assert(!isRemapped());
return Link.Attrs;
}
void setAttr(unsigned index) {
assert(!isRemapped());
assert(index < NumStratifiedAttrs);
Link.Attrs.set(index);
}
void setAttrs(const StratifiedAttrs &other) {
assert(!isRemapped());
Link.Attrs |= other;
}
bool isRemapped() const { return Remap != StratifiedLink::SetSentinel; }
// \brief For initial remapping to another set
void remapTo(StratifiedIndex Other) {
assert(!isRemapped());
Remap = Other;
}
StratifiedIndex getRemapIndex() const {
assert(isRemapped());
return Remap;
}
// \brief Should only be called when we're already remapped.
void updateRemap(StratifiedIndex Other) {
assert(isRemapped());
Remap = Other;
}
// \brief Prefer the above functions to calling things directly on what's
// returned from this -- they guard against unexpected calls when the
// current BuilderLink is remapped.
const StratifiedLink &getLink() const { return Link; }
private:
StratifiedLink Link;
StratifiedIndex Remap;
};
// \brief This function performs all of the set unioning/value renumbering
// that we've been putting off, and generates a vector<StratifiedLink> that
// may be placed in a StratifiedSets instance.
void finalizeSets(std::vector<StratifiedLink> &StratLinks) {
DenseMap<StratifiedIndex, StratifiedIndex> Remaps;
for (auto &Link : Links) {
if (Link.isRemapped()) {
continue;
}
StratifiedIndex Number = StratLinks.size();
Remaps.insert(std::make_pair(Link.Number, Number));
StratLinks.push_back(Link.getLink());
}
for (auto &Link : StratLinks) {
if (Link.hasAbove()) {
auto &Above = linksAt(Link.Above);
auto Iter = Remaps.find(Above.Number);
assert(Iter != Remaps.end());
Link.Above = Iter->second;
}
if (Link.hasBelow()) {
auto &Below = linksAt(Link.Below);
auto Iter = Remaps.find(Below.Number);
assert(Iter != Remaps.end());
Link.Below = Iter->second;
}
}
for (auto &Pair : Values) {
auto &Info = Pair.second;
auto &Link = linksAt(Info.Index);
auto Iter = Remaps.find(Link.Number);
assert(Iter != Remaps.end());
Info.Index = Iter->second;
}
}
// \brief There's a guarantee in StratifiedLink where all bits set in a
// Link.externals will be set in all Link.externals "below" it.
static void propagateAttrs(std::vector<StratifiedLink> &Links) {
const auto getHighestParentAbove = [&Links](StratifiedIndex Idx) {
const auto *Link = &Links[Idx];
while (Link->hasAbove()) {
Idx = Link->Above;
Link = &Links[Idx];
}
return Idx;
};
SmallSet<StratifiedIndex, 16> Visited;
for (unsigned I = 0, E = Links.size(); I < E; ++I) {
auto CurrentIndex = getHighestParentAbove(I);
if (!Visited.insert(CurrentIndex).second) {
continue;
}
while (Links[CurrentIndex].hasBelow()) {
auto &CurrentBits = Links[CurrentIndex].Attrs;
auto NextIndex = Links[CurrentIndex].Below;
auto &NextBits = Links[NextIndex].Attrs;
NextBits |= CurrentBits;
CurrentIndex = NextIndex;
}
}
}
public:
// \brief Builds a StratifiedSet from the information we've been given since
// either construction or the prior build() call.
StratifiedSets<T> build() {
std::vector<StratifiedLink> StratLinks;
finalizeSets(StratLinks);
propagateAttrs(StratLinks);
Links.clear();
return StratifiedSets<T>(std::move(Values), std::move(StratLinks));
}
std::size_t size() const { return Values.size(); }
std::size_t numSets() const { return Links.size(); }
bool has(const T &Elem) const { return get(Elem).hasValue(); }
bool add(const T &Main) {
if (get(Main).hasValue())
return false;
auto NewIndex = getNewUnlinkedIndex();
return addAtMerging(Main, NewIndex);
}
// \brief Restructures the stratified sets as necessary to make "ToAdd" in a
// set above "Main". There are some cases where this is not possible (see
// above), so we merge them such that ToAdd and Main are in the same set.
bool addAbove(const T &Main, const T &ToAdd) {
assert(has(Main));
auto Index = *indexOf(Main);
if (!linksAt(Index).hasAbove())
addLinkAbove(Index);
auto Above = linksAt(Index).getAbove();
return addAtMerging(ToAdd, Above);
}
// \brief Restructures the stratified sets as necessary to make "ToAdd" in a
// set below "Main". There are some cases where this is not possible (see
// above), so we merge them such that ToAdd and Main are in the same set.
bool addBelow(const T &Main, const T &ToAdd) {
assert(has(Main));
auto Index = *indexOf(Main);
if (!linksAt(Index).hasBelow())
addLinkBelow(Index);
auto Below = linksAt(Index).getBelow();
return addAtMerging(ToAdd, Below);
}
bool addWith(const T &Main, const T &ToAdd) {
assert(has(Main));
auto MainIndex = *indexOf(Main);
return addAtMerging(ToAdd, MainIndex);
}
void noteAttribute(const T &Main, unsigned AttrNum) {
assert(has(Main));
assert(AttrNum < StratifiedLink::SetSentinel);
auto *Info = *get(Main);
auto &Link = linksAt(Info->Index);
Link.setAttr(AttrNum);
}
void noteAttributes(const T &Main, const StratifiedAttrs &NewAttrs) {
assert(has(Main));
auto *Info = *get(Main);
auto &Link = linksAt(Info->Index);
Link.setAttrs(NewAttrs);
}
StratifiedAttrs getAttributes(const T &Main) {
assert(has(Main));
auto *Info = *get(Main);
auto *Link = &linksAt(Info->Index);
auto Attrs = Link->getAttrs();
while (Link->hasAbove()) {
Link = &linksAt(Link->getAbove());
Attrs |= Link->getAttrs();
}
return Attrs;
}
bool getAttribute(const T &Main, unsigned AttrNum) {
assert(AttrNum < StratifiedLink::SetSentinel);
auto Attrs = getAttributes(Main);
return Attrs[AttrNum];
}
// \brief Gets the attributes that have been applied to the set that Main
// belongs to. It ignores attributes in any sets above the one that Main
// resides in.
StratifiedAttrs getRawAttributes(const T &Main) {
assert(has(Main));
auto *Info = *get(Main);
auto &Link = linksAt(Info->Index);
return Link.getAttrs();
}
// \brief Gets an attribute from the attributes that have been applied to the
// set that Main belongs to. It ignores attributes in any sets above the one
// that Main resides in.
bool getRawAttribute(const T &Main, unsigned AttrNum) {
assert(AttrNum < StratifiedLink::SetSentinel);
auto Attrs = getRawAttributes(Main);
return Attrs[AttrNum];
}
private:
DenseMap<T, StratifiedInfo> Values;
std::vector<BuilderLink> Links;
// \brief Adds the given element at the given index, merging sets if
// necessary.
bool addAtMerging(const T &ToAdd, StratifiedIndex Index) {
StratifiedInfo Info = {Index};
auto Pair = Values.insert(std::make_pair(ToAdd, Info));
if (Pair.second)
return true;
auto &Iter = Pair.first;
auto &IterSet = linksAt(Iter->second.Index);
auto &ReqSet = linksAt(Index);
// Failed to add where we wanted to. Merge the sets.
if (&IterSet != &ReqSet)
merge(IterSet.Number, ReqSet.Number);
return false;
}
// \brief Gets the BuilderLink at the given index, taking set remapping into
// account.
BuilderLink &linksAt(StratifiedIndex Index) {
auto *Start = &Links[Index];
if (!Start->isRemapped())
return *Start;
auto *Current = Start;
while (Current->isRemapped())
Current = &Links[Current->getRemapIndex()];
auto NewRemap = Current->Number;
// Run through everything that has yet to be updated, and update them to
// remap to NewRemap
Current = Start;
while (Current->isRemapped()) {
auto *Next = &Links[Current->getRemapIndex()];
Current->updateRemap(NewRemap);
Current = Next;
}
return *Current;
}
// \brief Merges two sets into one another. Assumes that these sets are not
// already one in the same
void merge(StratifiedIndex Idx1, StratifiedIndex Idx2) {
assert(inbounds(Idx1) && inbounds(Idx2));
assert(&linksAt(Idx1) != &linksAt(Idx2) &&
"Merging a set into itself is not allowed");
// CASE 1: If the set at `Idx1` is above or below `Idx2`, we need to merge
// both the
// given sets, and all sets between them, into one.
if (tryMergeUpwards(Idx1, Idx2))
return;
if (tryMergeUpwards(Idx2, Idx1))
return;
// CASE 2: The set at `Idx1` is not in the same chain as the set at `Idx2`.
// We therefore need to merge the two chains together.
mergeDirect(Idx1, Idx2);
}
// \brief Merges two sets assuming that the set at `Idx1` is unreachable from
// traversing above or below the set at `Idx2`.
void mergeDirect(StratifiedIndex Idx1, StratifiedIndex Idx2) {
assert(inbounds(Idx1) && inbounds(Idx2));
auto *LinksInto = &linksAt(Idx1);
auto *LinksFrom = &linksAt(Idx2);
// Merging everything above LinksInto then proceeding to merge everything
// below LinksInto becomes problematic, so we go as far "up" as possible!
while (LinksInto->hasAbove() && LinksFrom->hasAbove()) {
LinksInto = &linksAt(LinksInto->getAbove());
LinksFrom = &linksAt(LinksFrom->getAbove());
}
if (LinksFrom->hasAbove()) {
LinksInto->setAbove(LinksFrom->getAbove());
auto &NewAbove = linksAt(LinksInto->getAbove());
NewAbove.setBelow(LinksInto->Number);
}
// Merging strategy:
// > If neither has links below, stop.
// > If only `LinksInto` has links below, stop.
// > If only `LinksFrom` has links below, reset `LinksInto.Below` to
// match `LinksFrom.Below`
// > If both have links above, deal with those next.
while (LinksInto->hasBelow() && LinksFrom->hasBelow()) {
auto &FromAttrs = LinksFrom->getAttrs();
LinksInto->setAttrs(FromAttrs);
// Remap needs to happen after getBelow(), but before
// assignment of LinksFrom
auto *NewLinksFrom = &linksAt(LinksFrom->getBelow());
LinksFrom->remapTo(LinksInto->Number);
LinksFrom = NewLinksFrom;
LinksInto = &linksAt(LinksInto->getBelow());
}
if (LinksFrom->hasBelow()) {
LinksInto->setBelow(LinksFrom->getBelow());
auto &NewBelow = linksAt(LinksInto->getBelow());
NewBelow.setAbove(LinksInto->Number);
}
LinksFrom->remapTo(LinksInto->Number);
}
// \brief Checks to see if lowerIndex is at a level lower than upperIndex.
// If so, it will merge lowerIndex with upperIndex (and all of the sets
// between) and return true. Otherwise, it will return false.
bool tryMergeUpwards(StratifiedIndex LowerIndex, StratifiedIndex UpperIndex) {
assert(inbounds(LowerIndex) && inbounds(UpperIndex));
auto *Lower = &linksAt(LowerIndex);
auto *Upper = &linksAt(UpperIndex);
if (Lower == Upper)
return true;
SmallVector<BuilderLink *, 8> Found;
auto *Current = Lower;
auto Attrs = Current->getAttrs();
while (Current->hasAbove() && Current != Upper) {
Found.push_back(Current);
Attrs |= Current->getAttrs();
Current = &linksAt(Current->getAbove());
}
if (Current != Upper)
return false;
Upper->setAttrs(Attrs);
if (Lower->hasBelow()) {
auto NewBelowIndex = Lower->getBelow();
Upper->setBelow(NewBelowIndex);
auto &NewBelow = linksAt(NewBelowIndex);
NewBelow.setAbove(UpperIndex);
} else {
Upper->clearBelow();
}
for (const auto &Ptr : Found)
Ptr->remapTo(Upper->Number);
return true;
}
Optional<const StratifiedInfo *> get(const T &Val) const {
auto Result = Values.find(Val);
if (Result == Values.end())
return NoneType();
return &Result->second;
}
Optional<StratifiedInfo *> get(const T &Val) {
auto Result = Values.find(Val);
if (Result == Values.end())
return NoneType();
return &Result->second;
}
Optional<StratifiedIndex> indexOf(const T &Val) {
auto MaybeVal = get(Val);
if (!MaybeVal.hasValue())
return NoneType();
auto *Info = *MaybeVal;
auto &Link = linksAt(Info->Index);
return Link.Number;
}
StratifiedIndex addLinkBelow(StratifiedIndex Set) {
auto At = addLinks();
Links[Set].setBelow(At);
Links[At].setAbove(Set);
return At;
}
StratifiedIndex addLinkAbove(StratifiedIndex Set) {
auto At = addLinks();
Links[At].setBelow(Set);
Links[Set].setAbove(At);
return At;
}
StratifiedIndex getNewUnlinkedIndex() { return addLinks(); }
StratifiedIndex addLinks() {
auto Link = Links.size();
Links.push_back(BuilderLink(Link));
return Link;
}
bool inbounds(StratifiedIndex N) const { return N < Links.size(); }
};
}
#endif // LLVM_ADT_STRATIFIEDSETS_H
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/Analysis/AssumptionCache.cpp | //===- AssumptionCache.cpp - Cache finding @llvm.assume calls -------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file contains a pass that keeps track of @llvm.assume intrinsics in
// the functions of a module.
//
//===----------------------------------------------------------------------===//
#include "llvm/Analysis/AssumptionCache.h"
#include "llvm/IR/CallSite.h"
#include "llvm/IR/Dominators.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/PassManager.h"
#include "llvm/IR/PatternMatch.h"
#include "llvm/Support/Debug.h"
using namespace llvm;
using namespace llvm::PatternMatch;
void AssumptionCache::scanFunction() {
assert(!Scanned && "Tried to scan the function twice!");
assert(AssumeHandles.empty() && "Already have assumes when scanning!");
// Go through all instructions in all blocks, add all calls to @llvm.assume
// to this cache.
for (BasicBlock &B : F)
for (Instruction &II : B)
if (match(&II, m_Intrinsic<Intrinsic::assume>()))
AssumeHandles.push_back(&II);
// Mark the scan as complete.
Scanned = true;
}
void AssumptionCache::registerAssumption(CallInst *CI) {
assert(match(CI, m_Intrinsic<Intrinsic::assume>()) &&
"Registered call does not call @llvm.assume");
// If we haven't scanned the function yet, just drop this assumption. It will
// be found when we scan later.
if (!Scanned)
return;
AssumeHandles.push_back(CI);
#ifndef NDEBUG
assert(CI->getParent() &&
"Cannot register @llvm.assume call not in a basic block");
assert(&F == CI->getParent()->getParent() &&
"Cannot register @llvm.assume call not in this function");
// We expect the number of assumptions to be small, so in an asserts build
// check that we don't accumulate duplicates and that all assumptions point
// to the same function.
SmallPtrSet<Value *, 16> AssumptionSet;
for (auto &VH : AssumeHandles) {
if (!VH)
continue;
assert(&F == cast<Instruction>(VH)->getParent()->getParent() &&
"Cached assumption not inside this function!");
assert(match(cast<CallInst>(VH), m_Intrinsic<Intrinsic::assume>()) &&
"Cached something other than a call to @llvm.assume!");
assert(AssumptionSet.insert(VH).second &&
"Cache contains multiple copies of a call!");
}
#endif
}
char AssumptionAnalysis::PassID;
PreservedAnalyses AssumptionPrinterPass::run(Function &F,
AnalysisManager<Function> *AM) {
AssumptionCache &AC = AM->getResult<AssumptionAnalysis>(F);
OS << "Cached assumptions for function: " << F.getName() << "\n";
for (auto &VH : AC.assumptions())
if (VH)
OS << " " << *cast<CallInst>(VH)->getArgOperand(0) << "\n";
return PreservedAnalyses::all();
}
void AssumptionCacheTracker::FunctionCallbackVH::deleted() {
auto I = ACT->AssumptionCaches.find_as(cast<Function>(getValPtr()));
if (I != ACT->AssumptionCaches.end())
ACT->AssumptionCaches.erase(I);
// 'this' now dangles!
}
AssumptionCache &AssumptionCacheTracker::getAssumptionCache(Function &F) {
// We probe the function map twice to try and avoid creating a value handle
// around the function in common cases. This makes insertion a bit slower,
// but if we have to insert we're going to scan the whole function so that
// shouldn't matter.
auto I = AssumptionCaches.find_as(&F);
if (I != AssumptionCaches.end())
return *I->second;
// Ok, build a new cache by scanning the function, insert it and the value
// handle into our map, and return the newly populated cache.
auto IP = AssumptionCaches.insert(std::make_pair(
FunctionCallbackVH(&F, this), llvm::make_unique<AssumptionCache>(F)));
assert(IP.second && "Scanning function already in the map?");
return *IP.first->second;
}
void AssumptionCacheTracker::verifyAnalysis() const {
#ifndef NDEBUG
SmallPtrSet<const CallInst *, 4> AssumptionSet;
for (const auto &I : AssumptionCaches) {
for (auto &VH : I.second->assumptions())
if (VH)
AssumptionSet.insert(cast<CallInst>(VH));
for (const BasicBlock &B : cast<Function>(*I.first))
for (const Instruction &II : B)
if (match(&II, m_Intrinsic<Intrinsic::assume>()))
assert(AssumptionSet.count(cast<CallInst>(&II)) &&
"Assumption in scanned function not in cache");
}
#endif
}
AssumptionCacheTracker::AssumptionCacheTracker() : ImmutablePass(ID) {
initializeAssumptionCacheTrackerPass(*PassRegistry::getPassRegistry());
}
AssumptionCacheTracker::~AssumptionCacheTracker() {}
INITIALIZE_PASS(AssumptionCacheTracker, "assumption-cache-tracker",
"Assumption Cache Tracker", false, true)
char AssumptionCacheTracker::ID = 0;
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/Analysis/DivergenceAnalysis.cpp | //===- DivergenceAnalysis.cpp --------- Divergence Analysis Implementation -==//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements divergence analysis which determines whether a branch
// in a GPU program is divergent.It can help branch optimizations such as jump
// threading and loop unswitching to make better decisions.
//
// GPU programs typically use the SIMD execution model, where multiple threads
// in the same execution group have to execute in lock-step. Therefore, if the
// code contains divergent branches (i.e., threads in a group do not agree on
// which path of the branch to take), the group of threads has to execute all
// the paths from that branch with different subsets of threads enabled until
// they converge at the immediately post-dominating BB of the paths.
//
// Due to this execution model, some optimizations such as jump
// threading and loop unswitching can be unfortunately harmful when performed on
// divergent branches. Therefore, an analysis that computes which branches in a
// GPU program are divergent can help the compiler to selectively run these
// optimizations.
//
// This file defines divergence analysis which computes a conservative but
// non-trivial approximation of all divergent branches in a GPU program. It
// partially implements the approach described in
//
// Divergence Analysis
// Sampaio, Souza, Collange, Pereira
// TOPLAS '13
//
// The divergence analysis identifies the sources of divergence (e.g., special
// variables that hold the thread ID), and recursively marks variables that are
// data or sync dependent on a source of divergence as divergent.
//
// While data dependency is a well-known concept, the notion of sync dependency
// is worth more explanation. Sync dependence characterizes the control flow
// aspect of the propagation of branch divergence. For example,
//
// %cond = icmp slt i32 %tid, 10
// br i1 %cond, label %then, label %else
// then:
// br label %merge
// else:
// br label %merge
// merge:
// %a = phi i32 [ 0, %then ], [ 1, %else ]
//
// Suppose %tid holds the thread ID. Although %a is not data dependent on %tid
// because %tid is not on its use-def chains, %a is sync dependent on %tid
// because the branch "br i1 %cond" depends on %tid and affects which value %a
// is assigned to.
//
// The current implementation has the following limitations:
// 1. intra-procedural. It conservatively considers the arguments of a
// non-kernel-entry function and the return value of a function call as
// divergent.
// 2. memory as black box. It conservatively considers values loaded from
// generic or local address as divergent. This can be improved by leveraging
// pointer analysis.
//
//===----------------------------------------------------------------------===//
#include "llvm/Analysis/DivergenceAnalysis.h"
#include "llvm/Analysis/Passes.h"
#include "llvm/Analysis/PostDominators.h"
#include "llvm/Analysis/TargetTransformInfo.h"
#include "llvm/IR/Dominators.h"
#include "llvm/IR/InstIterator.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/Value.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Transforms/Scalar.h"
#include <vector>
using namespace llvm;
namespace {
class DivergencePropagator {
public:
DivergencePropagator(Function &F, TargetTransformInfo &TTI, DominatorTree &DT,
PostDominatorTree &PDT, DenseSet<const Value *> &DV)
: F(F), TTI(TTI), DT(DT), PDT(PDT), DV(DV) {}
void populateWithSourcesOfDivergence();
void propagate();
private:
// A helper function that explores data dependents of V.
void exploreDataDependency(Value *V);
// A helper function that explores sync dependents of TI.
void exploreSyncDependency(TerminatorInst *TI);
// Computes the influence region from Start to End. This region includes all
// basic blocks on any simple path from Start to End.
void computeInfluenceRegion(BasicBlock *Start, BasicBlock *End,
DenseSet<BasicBlock *> &InfluenceRegion);
// Finds all users of I that are outside the influence region, and add these
// users to Worklist.
void findUsersOutsideInfluenceRegion(
Instruction &I, const DenseSet<BasicBlock *> &InfluenceRegion);
Function &F;
TargetTransformInfo &TTI;
DominatorTree &DT;
PostDominatorTree &PDT;
std::vector<Value *> Worklist; // Stack for DFS.
DenseSet<const Value *> &DV; // Stores all divergent values.
};
void DivergencePropagator::populateWithSourcesOfDivergence() {
Worklist.clear();
DV.clear();
for (auto &I : inst_range(F)) {
if (TTI.isSourceOfDivergence(&I)) {
Worklist.push_back(&I);
DV.insert(&I);
}
}
for (auto &Arg : F.args()) {
if (TTI.isSourceOfDivergence(&Arg)) {
Worklist.push_back(&Arg);
DV.insert(&Arg);
}
}
}
void DivergencePropagator::exploreSyncDependency(TerminatorInst *TI) {
// Propagation rule 1: if branch TI is divergent, all PHINodes in TI's
// immediate post dominator are divergent. This rule handles if-then-else
// patterns. For example,
//
// if (tid < 5)
// a1 = 1;
// else
// a2 = 2;
// a = phi(a1, a2); // sync dependent on (tid < 5)
BasicBlock *ThisBB = TI->getParent();
BasicBlock *IPostDom = PDT.getNode(ThisBB)->getIDom()->getBlock();
if (IPostDom == nullptr)
return;
for (auto I = IPostDom->begin(); isa<PHINode>(I); ++I) {
// A PHINode is uniform if it returns the same value no matter which path is
// taken.
if (!cast<PHINode>(I)->hasConstantValue() && DV.insert(&*I).second)
Worklist.push_back(&*I);
}
// Propagation rule 2: if a value defined in a loop is used outside, the user
// is sync dependent on the condition of the loop exits that dominate the
// user. For example,
//
// int i = 0;
// do {
// i++;
// if (foo(i)) ... // uniform
// } while (i < tid);
// if (bar(i)) ... // divergent
//
// A program may contain unstructured loops. Therefore, we cannot leverage
// LoopInfo, which only recognizes natural loops.
//
// The algorithm used here handles both natural and unstructured loops. Given
// a branch TI, we first compute its influence region, the union of all simple
// paths from TI to its immediate post dominator (IPostDom). Then, we search
// for all the values defined in the influence region but used outside. All
// these users are sync dependent on TI.
DenseSet<BasicBlock *> InfluenceRegion;
computeInfluenceRegion(ThisBB, IPostDom, InfluenceRegion);
// An insight that can speed up the search process is that all the in-region
// values that are used outside must dominate TI. Therefore, instead of
// searching every basic blocks in the influence region, we search all the
// dominators of TI until it is outside the influence region.
BasicBlock *InfluencedBB = ThisBB;
while (InfluenceRegion.count(InfluencedBB)) {
for (auto &I : *InfluencedBB)
findUsersOutsideInfluenceRegion(I, InfluenceRegion);
DomTreeNode *IDomNode = DT.getNode(InfluencedBB)->getIDom();
if (IDomNode == nullptr)
break;
InfluencedBB = IDomNode->getBlock();
}
}
void DivergencePropagator::findUsersOutsideInfluenceRegion(
Instruction &I, const DenseSet<BasicBlock *> &InfluenceRegion) {
for (User *U : I.users()) {
Instruction *UserInst = cast<Instruction>(U);
if (!InfluenceRegion.count(UserInst->getParent())) {
if (DV.insert(UserInst).second)
Worklist.push_back(UserInst);
}
}
}
// A helper function for computeInfluenceRegion that adds successors of "ThisBB"
// to the influence region.
static void
addSuccessorsToInfluenceRegion(BasicBlock *ThisBB, BasicBlock *End,
DenseSet<BasicBlock *> &InfluenceRegion,
std::vector<BasicBlock *> &InfluenceStack) {
for (BasicBlock *Succ : successors(ThisBB)) {
if (Succ != End && InfluenceRegion.insert(Succ).second)
InfluenceStack.push_back(Succ);
}
}
void DivergencePropagator::computeInfluenceRegion(
BasicBlock *Start, BasicBlock *End,
DenseSet<BasicBlock *> &InfluenceRegion) {
assert(PDT.properlyDominates(End, Start) &&
"End does not properly dominate Start");
// The influence region starts from the end of "Start" to the beginning of
// "End". Therefore, "Start" should not be in the region unless "Start" is in
// a loop that doesn't contain "End".
std::vector<BasicBlock *> InfluenceStack;
addSuccessorsToInfluenceRegion(Start, End, InfluenceRegion, InfluenceStack);
while (!InfluenceStack.empty()) {
BasicBlock *BB = InfluenceStack.back();
InfluenceStack.pop_back();
addSuccessorsToInfluenceRegion(BB, End, InfluenceRegion, InfluenceStack);
}
}
void DivergencePropagator::exploreDataDependency(Value *V) {
// Follow def-use chains of V.
for (User *U : V->users()) {
Instruction *UserInst = cast<Instruction>(U);
if (DV.insert(UserInst).second)
Worklist.push_back(UserInst);
}
}
void DivergencePropagator::propagate() {
// Traverse the dependency graph using DFS.
while (!Worklist.empty()) {
Value *V = Worklist.back();
Worklist.pop_back();
if (TerminatorInst *TI = dyn_cast<TerminatorInst>(V)) {
// Terminators with less than two successors won't introduce sync
// dependency. Ignore them.
if (TI->getNumSuccessors() > 1)
exploreSyncDependency(TI);
}
exploreDataDependency(V);
}
}
} /// end namespace anonymous
// Register this pass.
char DivergenceAnalysis::ID = 0;
INITIALIZE_PASS_BEGIN(DivergenceAnalysis, "divergence", "Divergence Analysis",
false, true)
INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
INITIALIZE_PASS_DEPENDENCY(PostDominatorTree)
INITIALIZE_PASS_END(DivergenceAnalysis, "divergence", "Divergence Analysis",
false, true)
FunctionPass *llvm::createDivergenceAnalysisPass() {
return new DivergenceAnalysis();
}
void DivergenceAnalysis::getAnalysisUsage(AnalysisUsage &AU) const {
AU.addRequired<DominatorTreeWrapperPass>();
AU.addRequired<PostDominatorTree>();
AU.setPreservesAll();
}
bool DivergenceAnalysis::runOnFunction(Function &F) {
auto *TTIWP = getAnalysisIfAvailable<TargetTransformInfoWrapperPass>();
if (TTIWP == nullptr)
return false;
TargetTransformInfo &TTI = TTIWP->getTTI(F);
// Fast path: if the target does not have branch divergence, we do not mark
// any branch as divergent.
if (!TTI.hasBranchDivergence())
return false;
DivergentValues.clear();
DivergencePropagator DP(F, TTI,
getAnalysis<DominatorTreeWrapperPass>().getDomTree(),
getAnalysis<PostDominatorTree>(), DivergentValues);
DP.populateWithSourcesOfDivergence();
DP.propagate();
return false;
}
void DivergenceAnalysis::print(raw_ostream &OS, const Module *) const {
if (DivergentValues.empty())
return;
const Value *FirstDivergentValue = *DivergentValues.begin();
const Function *F;
if (const Argument *Arg = dyn_cast<Argument>(FirstDivergentValue)) {
F = Arg->getParent();
} else if (const Instruction *I =
dyn_cast<Instruction>(FirstDivergentValue)) {
F = I->getParent()->getParent();
} else {
llvm_unreachable("Only arguments and instructions can be divergent");
}
// Dumps all divergent values in F, arguments and then instructions.
for (auto &Arg : F->args()) {
if (DivergentValues.count(&Arg))
OS << "DIVERGENT: " << Arg << "\n";
}
// Iterate instructions using inst_range to ensure a deterministic order.
for (auto &I : inst_range(F)) {
if (DivergentValues.count(&I))
OS << "DIVERGENT:" << I << "\n";
}
}
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/Analysis/ScalarEvolutionExpander.cpp | //===- ScalarEvolutionExpander.cpp - Scalar Evolution Analysis --*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file contains the implementation of the scalar evolution expander,
// which is used to generate the code corresponding to a given scalar evolution
// expression.
//
//===----------------------------------------------------------------------===//
#include "llvm/Analysis/ScalarEvolutionExpander.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/Analysis/InstructionSimplify.h"
#include "llvm/Analysis/LoopInfo.h"
#include "llvm/Analysis/TargetTransformInfo.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/Dominators.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/PatternMatch.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
using namespace PatternMatch;
/// ReuseOrCreateCast - Arrange for there to be a cast of V to Ty at IP,
/// reusing an existing cast if a suitable one exists, moving an existing
/// cast if a suitable one exists but isn't in the right place, or
/// creating a new one.
Value *SCEVExpander::ReuseOrCreateCast(Value *V, Type *Ty,
Instruction::CastOps Op,
BasicBlock::iterator IP) {
// This function must be called with the builder having a valid insertion
// point. It doesn't need to be the actual IP where the uses of the returned
// cast will be added, but it must dominate such IP.
// We use this precondition to produce a cast that will dominate all its
// uses. In particular, this is crucial for the case where the builder's
// insertion point *is* the point where we were asked to put the cast.
// Since we don't know the builder's insertion point is actually
// where the uses will be added (only that it dominates it), we are
// not allowed to move it.
BasicBlock::iterator BIP = Builder.GetInsertPoint();
Instruction *Ret = nullptr;
// Check to see if there is already a cast!
for (User *U : V->users())
if (U->getType() == Ty)
if (CastInst *CI = dyn_cast<CastInst>(U))
if (CI->getOpcode() == Op) {
// If the cast isn't where we want it, create a new cast at IP.
// Likewise, do not reuse a cast at BIP because it must dominate
// instructions that might be inserted before BIP.
if (BasicBlock::iterator(CI) != IP || BIP == IP) {
// Create a new cast, and leave the old cast in place in case
// it is being used as an insert point. Clear its operand
// so that it doesn't hold anything live.
Ret = CastInst::Create(Op, V, Ty, "", IP);
Ret->takeName(CI);
CI->replaceAllUsesWith(Ret);
CI->setOperand(0, UndefValue::get(V->getType()));
break;
}
Ret = CI;
break;
}
// Create a new cast.
if (!Ret)
Ret = CastInst::Create(Op, V, Ty, V->getName(), IP);
// We assert at the end of the function since IP might point to an
// instruction with different dominance properties than a cast
// (an invoke for example) and not dominate BIP (but the cast does).
assert(SE.DT->dominates(Ret, BIP));
rememberInstruction(Ret);
return Ret;
}
/// InsertNoopCastOfTo - Insert a cast of V to the specified type,
/// which must be possible with a noop cast, doing what we can to share
/// the casts.
Value *SCEVExpander::InsertNoopCastOfTo(Value *V, Type *Ty) {
Instruction::CastOps Op = CastInst::getCastOpcode(V, false, Ty, false);
assert((Op == Instruction::BitCast ||
Op == Instruction::PtrToInt ||
Op == Instruction::IntToPtr) &&
"InsertNoopCastOfTo cannot perform non-noop casts!");
assert(SE.getTypeSizeInBits(V->getType()) == SE.getTypeSizeInBits(Ty) &&
"InsertNoopCastOfTo cannot change sizes!");
// Short-circuit unnecessary bitcasts.
if (Op == Instruction::BitCast) {
if (V->getType() == Ty)
return V;
if (CastInst *CI = dyn_cast<CastInst>(V)) {
if (CI->getOperand(0)->getType() == Ty)
return CI->getOperand(0);
}
}
// Short-circuit unnecessary inttoptr<->ptrtoint casts.
if ((Op == Instruction::PtrToInt || Op == Instruction::IntToPtr) &&
SE.getTypeSizeInBits(Ty) == SE.getTypeSizeInBits(V->getType())) {
if (CastInst *CI = dyn_cast<CastInst>(V))
if ((CI->getOpcode() == Instruction::PtrToInt ||
CI->getOpcode() == Instruction::IntToPtr) &&
SE.getTypeSizeInBits(CI->getType()) ==
SE.getTypeSizeInBits(CI->getOperand(0)->getType()))
return CI->getOperand(0);
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V))
if ((CE->getOpcode() == Instruction::PtrToInt ||
CE->getOpcode() == Instruction::IntToPtr) &&
SE.getTypeSizeInBits(CE->getType()) ==
SE.getTypeSizeInBits(CE->getOperand(0)->getType()))
return CE->getOperand(0);
}
// Fold a cast of a constant.
if (Constant *C = dyn_cast<Constant>(V))
return ConstantExpr::getCast(Op, C, Ty);
// Cast the argument at the beginning of the entry block, after
// any bitcasts of other arguments.
if (Argument *A = dyn_cast<Argument>(V)) {
BasicBlock::iterator IP = A->getParent()->getEntryBlock().begin();
while ((isa<BitCastInst>(IP) &&
isa<Argument>(cast<BitCastInst>(IP)->getOperand(0)) &&
cast<BitCastInst>(IP)->getOperand(0) != A) ||
isa<DbgInfoIntrinsic>(IP) ||
isa<LandingPadInst>(IP))
++IP;
return ReuseOrCreateCast(A, Ty, Op, IP);
}
// Cast the instruction immediately after the instruction.
Instruction *I = cast<Instruction>(V);
BasicBlock::iterator IP = I; ++IP;
if (InvokeInst *II = dyn_cast<InvokeInst>(I))
IP = II->getNormalDest()->begin();
while (isa<PHINode>(IP) || isa<LandingPadInst>(IP))
++IP;
return ReuseOrCreateCast(I, Ty, Op, IP);
}
/// InsertBinop - Insert the specified binary operator, doing a small amount
/// of work to avoid inserting an obviously redundant operation.
Value *SCEVExpander::InsertBinop(Instruction::BinaryOps Opcode,
Value *LHS, Value *RHS) {
// Fold a binop with constant operands.
if (Constant *CLHS = dyn_cast<Constant>(LHS))
if (Constant *CRHS = dyn_cast<Constant>(RHS))
return ConstantExpr::get(Opcode, CLHS, CRHS);
// Do a quick scan to see if we have this binop nearby. If so, reuse it.
unsigned ScanLimit = 6;
BasicBlock::iterator BlockBegin = Builder.GetInsertBlock()->begin();
// Scanning starts from the last instruction before the insertion point.
BasicBlock::iterator IP = Builder.GetInsertPoint();
if (IP != BlockBegin) {
--IP;
for (; ScanLimit; --IP, --ScanLimit) {
// Don't count dbg.value against the ScanLimit, to avoid perturbing the
// generated code.
if (isa<DbgInfoIntrinsic>(IP))
ScanLimit++;
if (IP->getOpcode() == (unsigned)Opcode && IP->getOperand(0) == LHS &&
IP->getOperand(1) == RHS)
return IP;
if (IP == BlockBegin) break;
}
}
// Save the original insertion point so we can restore it when we're done.
DebugLoc Loc = Builder.GetInsertPoint()->getDebugLoc();
BuilderType::InsertPointGuard Guard(Builder);
// Move the insertion point out of as many loops as we can.
while (const Loop *L = SE.LI->getLoopFor(Builder.GetInsertBlock())) {
if (!L->isLoopInvariant(LHS) || !L->isLoopInvariant(RHS)) break;
BasicBlock *Preheader = L->getLoopPreheader();
if (!Preheader) break;
// Ok, move up a level.
Builder.SetInsertPoint(Preheader, Preheader->getTerminator());
}
// If we haven't found this binop, insert it.
Instruction *BO = cast<Instruction>(Builder.CreateBinOp(Opcode, LHS, RHS));
BO->setDebugLoc(Loc);
rememberInstruction(BO);
return BO;
}
/// FactorOutConstant - Test if S is divisible by Factor, using signed
/// division. If so, update S with Factor divided out and return true.
/// S need not be evenly divisible if a reasonable remainder can be
/// computed.
/// TODO: When ScalarEvolution gets a SCEVSDivExpr, this can be made
/// unnecessary; in its place, just signed-divide Ops[i] by the scale and
/// check to see if the divide was folded.
static bool FactorOutConstant(const SCEV *&S, const SCEV *&Remainder,
const SCEV *Factor, ScalarEvolution &SE,
const DataLayout &DL) {
// Everything is divisible by one.
if (Factor->isOne())
return true;
// x/x == 1.
if (S == Factor) {
S = SE.getConstant(S->getType(), 1);
return true;
}
// For a Constant, check for a multiple of the given factor.
if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) {
// 0/x == 0.
if (C->isZero())
return true;
// Check for divisibility.
if (const SCEVConstant *FC = dyn_cast<SCEVConstant>(Factor)) {
ConstantInt *CI =
ConstantInt::get(SE.getContext(),
C->getValue()->getValue().sdiv(
FC->getValue()->getValue()));
// If the quotient is zero and the remainder is non-zero, reject
// the value at this scale. It will be considered for subsequent
// smaller scales.
if (!CI->isZero()) {
const SCEV *Div = SE.getConstant(CI);
S = Div;
Remainder =
SE.getAddExpr(Remainder,
SE.getConstant(C->getValue()->getValue().srem(
FC->getValue()->getValue())));
return true;
}
}
}
// In a Mul, check if there is a constant operand which is a multiple
// of the given factor.
if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(S)) {
// Size is known, check if there is a constant operand which is a multiple
// of the given factor. If so, we can factor it.
const SCEVConstant *FC = cast<SCEVConstant>(Factor);
if (const SCEVConstant *C = dyn_cast<SCEVConstant>(M->getOperand(0)))
if (!C->getValue()->getValue().srem(FC->getValue()->getValue())) {
SmallVector<const SCEV *, 4> NewMulOps(M->op_begin(), M->op_end());
NewMulOps[0] = SE.getConstant(
C->getValue()->getValue().sdiv(FC->getValue()->getValue()));
S = SE.getMulExpr(NewMulOps);
return true;
}
}
// In an AddRec, check if both start and step are divisible.
if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(S)) {
const SCEV *Step = A->getStepRecurrence(SE);
const SCEV *StepRem = SE.getConstant(Step->getType(), 0);
if (!FactorOutConstant(Step, StepRem, Factor, SE, DL))
return false;
if (!StepRem->isZero())
return false;
const SCEV *Start = A->getStart();
if (!FactorOutConstant(Start, Remainder, Factor, SE, DL))
return false;
S = SE.getAddRecExpr(Start, Step, A->getLoop(),
A->getNoWrapFlags(SCEV::FlagNW));
return true;
}
return false;
}
/// SimplifyAddOperands - Sort and simplify a list of add operands. NumAddRecs
/// is the number of SCEVAddRecExprs present, which are kept at the end of
/// the list.
///
static void SimplifyAddOperands(SmallVectorImpl<const SCEV *> &Ops,
Type *Ty,
ScalarEvolution &SE) {
unsigned NumAddRecs = 0;
for (unsigned i = Ops.size(); i > 0 && isa<SCEVAddRecExpr>(Ops[i-1]); --i)
++NumAddRecs;
// Group Ops into non-addrecs and addrecs.
SmallVector<const SCEV *, 8> NoAddRecs(Ops.begin(), Ops.end() - NumAddRecs);
SmallVector<const SCEV *, 8> AddRecs(Ops.end() - NumAddRecs, Ops.end());
// Let ScalarEvolution sort and simplify the non-addrecs list.
const SCEV *Sum = NoAddRecs.empty() ?
SE.getConstant(Ty, 0) :
SE.getAddExpr(NoAddRecs);
// If it returned an add, use the operands. Otherwise it simplified
// the sum into a single value, so just use that.
Ops.clear();
if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Sum))
Ops.append(Add->op_begin(), Add->op_end());
else if (!Sum->isZero())
Ops.push_back(Sum);
// Then append the addrecs.
Ops.append(AddRecs.begin(), AddRecs.end());
}
/// SplitAddRecs - Flatten a list of add operands, moving addrec start values
/// out to the top level. For example, convert {a + b,+,c} to a, b, {0,+,d}.
/// This helps expose more opportunities for folding parts of the expressions
/// into GEP indices.
///
static void SplitAddRecs(SmallVectorImpl<const SCEV *> &Ops,
Type *Ty,
ScalarEvolution &SE) {
// Find the addrecs.
SmallVector<const SCEV *, 8> AddRecs;
for (unsigned i = 0, e = Ops.size(); i != e; ++i)
while (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(Ops[i])) {
const SCEV *Start = A->getStart();
if (Start->isZero()) break;
const SCEV *Zero = SE.getConstant(Ty, 0);
AddRecs.push_back(SE.getAddRecExpr(Zero,
A->getStepRecurrence(SE),
A->getLoop(),
A->getNoWrapFlags(SCEV::FlagNW)));
if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Start)) {
Ops[i] = Zero;
Ops.append(Add->op_begin(), Add->op_end());
e += Add->getNumOperands();
} else {
Ops[i] = Start;
}
}
if (!AddRecs.empty()) {
// Add the addrecs onto the end of the list.
Ops.append(AddRecs.begin(), AddRecs.end());
// Resort the operand list, moving any constants to the front.
SimplifyAddOperands(Ops, Ty, SE);
}
}
/// expandAddToGEP - Expand an addition expression with a pointer type into
/// a GEP instead of using ptrtoint+arithmetic+inttoptr. This helps
/// BasicAliasAnalysis and other passes analyze the result. See the rules
/// for getelementptr vs. inttoptr in
/// http://llvm.org/docs/LangRef.html#pointeraliasing
/// for details.
///
/// Design note: The correctness of using getelementptr here depends on
/// ScalarEvolution not recognizing inttoptr and ptrtoint operators, as
/// they may introduce pointer arithmetic which may not be safely converted
/// into getelementptr.
///
/// Design note: It might seem desirable for this function to be more
/// loop-aware. If some of the indices are loop-invariant while others
/// aren't, it might seem desirable to emit multiple GEPs, keeping the
/// loop-invariant portions of the overall computation outside the loop.
/// However, there are a few reasons this is not done here. Hoisting simple
/// arithmetic is a low-level optimization that often isn't very
/// important until late in the optimization process. In fact, passes
/// like InstructionCombining will combine GEPs, even if it means
/// pushing loop-invariant computation down into loops, so even if the
/// GEPs were split here, the work would quickly be undone. The
/// LoopStrengthReduction pass, which is usually run quite late (and
/// after the last InstructionCombining pass), takes care of hoisting
/// loop-invariant portions of expressions, after considering what
/// can be folded using target addressing modes.
///
Value *SCEVExpander::expandAddToGEP(const SCEV *const *op_begin,
const SCEV *const *op_end,
PointerType *PTy,
Type *Ty,
Value *V) {
Type *OriginalElTy = PTy->getElementType();
Type *ElTy = OriginalElTy;
SmallVector<Value *, 4> GepIndices;
SmallVector<const SCEV *, 8> Ops(op_begin, op_end);
bool AnyNonZeroIndices = false;
// Split AddRecs up into parts as either of the parts may be usable
// without the other.
SplitAddRecs(Ops, Ty, SE);
Type *IntPtrTy = DL.getIntPtrType(PTy);
// Descend down the pointer's type and attempt to convert the other
// operands into GEP indices, at each level. The first index in a GEP
// indexes into the array implied by the pointer operand; the rest of
// the indices index into the element or field type selected by the
// preceding index.
for (;;) {
// If the scale size is not 0, attempt to factor out a scale for
// array indexing.
SmallVector<const SCEV *, 8> ScaledOps;
if (ElTy->isSized()) {
const SCEV *ElSize = SE.getSizeOfExpr(IntPtrTy, ElTy);
if (!ElSize->isZero()) {
SmallVector<const SCEV *, 8> NewOps;
for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
const SCEV *Op = Ops[i];
const SCEV *Remainder = SE.getConstant(Ty, 0);
if (FactorOutConstant(Op, Remainder, ElSize, SE, DL)) {
// Op now has ElSize factored out.
ScaledOps.push_back(Op);
if (!Remainder->isZero())
NewOps.push_back(Remainder);
AnyNonZeroIndices = true;
} else {
// The operand was not divisible, so add it to the list of operands
// we'll scan next iteration.
NewOps.push_back(Ops[i]);
}
}
// If we made any changes, update Ops.
if (!ScaledOps.empty()) {
Ops = NewOps;
SimplifyAddOperands(Ops, Ty, SE);
}
}
}
// Record the scaled array index for this level of the type. If
// we didn't find any operands that could be factored, tentatively
// assume that element zero was selected (since the zero offset
// would obviously be folded away).
Value *Scaled = ScaledOps.empty() ?
Constant::getNullValue(Ty) :
expandCodeFor(SE.getAddExpr(ScaledOps), Ty);
GepIndices.push_back(Scaled);
// Collect struct field index operands.
while (StructType *STy = dyn_cast<StructType>(ElTy)) {
bool FoundFieldNo = false;
// An empty struct has no fields.
if (STy->getNumElements() == 0) break;
// Field offsets are known. See if a constant offset falls within any of
// the struct fields.
if (Ops.empty())
break;
if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[0]))
if (SE.getTypeSizeInBits(C->getType()) <= 64) {
const StructLayout &SL = *DL.getStructLayout(STy);
uint64_t FullOffset = C->getValue()->getZExtValue();
if (FullOffset < SL.getSizeInBytes()) {
unsigned ElIdx = SL.getElementContainingOffset(FullOffset);
GepIndices.push_back(
ConstantInt::get(Type::getInt32Ty(Ty->getContext()), ElIdx));
ElTy = STy->getTypeAtIndex(ElIdx);
Ops[0] =
SE.getConstant(Ty, FullOffset - SL.getElementOffset(ElIdx));
AnyNonZeroIndices = true;
FoundFieldNo = true;
}
}
// If no struct field offsets were found, tentatively assume that
// field zero was selected (since the zero offset would obviously
// be folded away).
if (!FoundFieldNo) {
ElTy = STy->getTypeAtIndex(0u);
GepIndices.push_back(
Constant::getNullValue(Type::getInt32Ty(Ty->getContext())));
}
}
if (ArrayType *ATy = dyn_cast<ArrayType>(ElTy))
ElTy = ATy->getElementType();
else
break;
}
// If none of the operands were convertible to proper GEP indices, cast
// the base to i8* and do an ugly getelementptr with that. It's still
// better than ptrtoint+arithmetic+inttoptr at least.
if (!AnyNonZeroIndices) {
// Cast the base to i8*.
V = InsertNoopCastOfTo(V,
Type::getInt8PtrTy(Ty->getContext(), PTy->getAddressSpace()));
assert(!isa<Instruction>(V) ||
SE.DT->dominates(cast<Instruction>(V), Builder.GetInsertPoint()));
// Expand the operands for a plain byte offset.
Value *Idx = expandCodeFor(SE.getAddExpr(Ops), Ty);
// Fold a GEP with constant operands.
if (Constant *CLHS = dyn_cast<Constant>(V))
if (Constant *CRHS = dyn_cast<Constant>(Idx))
return ConstantExpr::getGetElementPtr(Type::getInt8Ty(Ty->getContext()),
CLHS, CRHS);
// Do a quick scan to see if we have this GEP nearby. If so, reuse it.
unsigned ScanLimit = 6;
BasicBlock::iterator BlockBegin = Builder.GetInsertBlock()->begin();
// Scanning starts from the last instruction before the insertion point.
BasicBlock::iterator IP = Builder.GetInsertPoint();
if (IP != BlockBegin) {
--IP;
for (; ScanLimit; --IP, --ScanLimit) {
// Don't count dbg.value against the ScanLimit, to avoid perturbing the
// generated code.
if (isa<DbgInfoIntrinsic>(IP))
ScanLimit++;
if (IP->getOpcode() == Instruction::GetElementPtr &&
IP->getOperand(0) == V && IP->getOperand(1) == Idx)
return IP;
if (IP == BlockBegin) break;
}
}
// Save the original insertion point so we can restore it when we're done.
BuilderType::InsertPointGuard Guard(Builder);
// Move the insertion point out of as many loops as we can.
while (const Loop *L = SE.LI->getLoopFor(Builder.GetInsertBlock())) {
if (!L->isLoopInvariant(V) || !L->isLoopInvariant(Idx)) break;
BasicBlock *Preheader = L->getLoopPreheader();
if (!Preheader) break;
// Ok, move up a level.
Builder.SetInsertPoint(Preheader, Preheader->getTerminator());
}
// Emit a GEP.
Value *GEP = Builder.CreateGEP(Builder.getInt8Ty(), V, Idx, "uglygep");
rememberInstruction(GEP);
return GEP;
}
// Save the original insertion point so we can restore it when we're done.
BuilderType::InsertPoint SaveInsertPt = Builder.saveIP();
// Move the insertion point out of as many loops as we can.
while (const Loop *L = SE.LI->getLoopFor(Builder.GetInsertBlock())) {
if (!L->isLoopInvariant(V)) break;
bool AnyIndexNotLoopInvariant = false;
for (SmallVectorImpl<Value *>::const_iterator I = GepIndices.begin(),
E = GepIndices.end(); I != E; ++I)
if (!L->isLoopInvariant(*I)) {
AnyIndexNotLoopInvariant = true;
break;
}
if (AnyIndexNotLoopInvariant)
break;
BasicBlock *Preheader = L->getLoopPreheader();
if (!Preheader) break;
// Ok, move up a level.
Builder.SetInsertPoint(Preheader, Preheader->getTerminator());
}
// Insert a pretty getelementptr. Note that this GEP is not marked inbounds,
// because ScalarEvolution may have changed the address arithmetic to
// compute a value which is beyond the end of the allocated object.
Value *Casted = V;
if (V->getType() != PTy)
Casted = InsertNoopCastOfTo(Casted, PTy);
Value *GEP = Builder.CreateGEP(OriginalElTy, Casted,
GepIndices,
"scevgep");
Ops.push_back(SE.getUnknown(GEP));
rememberInstruction(GEP);
// Restore the original insert point.
Builder.restoreIP(SaveInsertPt);
return expand(SE.getAddExpr(Ops));
}
/// PickMostRelevantLoop - Given two loops pick the one that's most relevant for
/// SCEV expansion. If they are nested, this is the most nested. If they are
/// neighboring, pick the later.
static const Loop *PickMostRelevantLoop(const Loop *A, const Loop *B,
DominatorTree &DT) {
if (!A) return B;
if (!B) return A;
if (A->contains(B)) return B;
if (B->contains(A)) return A;
if (DT.dominates(A->getHeader(), B->getHeader())) return B;
if (DT.dominates(B->getHeader(), A->getHeader())) return A;
return A; // Arbitrarily break the tie.
}
/// getRelevantLoop - Get the most relevant loop associated with the given
/// expression, according to PickMostRelevantLoop.
const Loop *SCEVExpander::getRelevantLoop(const SCEV *S) {
// Test whether we've already computed the most relevant loop for this SCEV.
std::pair<DenseMap<const SCEV *, const Loop *>::iterator, bool> Pair =
RelevantLoops.insert(std::make_pair(S, nullptr));
if (!Pair.second)
return Pair.first->second;
if (isa<SCEVConstant>(S))
// A constant has no relevant loops.
return nullptr;
if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
if (const Instruction *I = dyn_cast<Instruction>(U->getValue()))
return Pair.first->second = SE.LI->getLoopFor(I->getParent());
// A non-instruction has no relevant loops.
return nullptr;
}
if (const SCEVNAryExpr *N = dyn_cast<SCEVNAryExpr>(S)) {
const Loop *L = nullptr;
if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S))
L = AR->getLoop();
for (SCEVNAryExpr::op_iterator I = N->op_begin(), E = N->op_end();
I != E; ++I)
L = PickMostRelevantLoop(L, getRelevantLoop(*I), *SE.DT);
return RelevantLoops[N] = L;
}
if (const SCEVCastExpr *C = dyn_cast<SCEVCastExpr>(S)) {
const Loop *Result = getRelevantLoop(C->getOperand());
return RelevantLoops[C] = Result;
}
if (const SCEVUDivExpr *D = dyn_cast<SCEVUDivExpr>(S)) {
const Loop *Result =
PickMostRelevantLoop(getRelevantLoop(D->getLHS()),
getRelevantLoop(D->getRHS()),
*SE.DT);
return RelevantLoops[D] = Result;
}
llvm_unreachable("Unexpected SCEV type!");
}
namespace {
/// LoopCompare - Compare loops by PickMostRelevantLoop.
class LoopCompare {
DominatorTree &DT;
public:
explicit LoopCompare(DominatorTree &dt) : DT(dt) {}
bool operator()(std::pair<const Loop *, const SCEV *> LHS,
std::pair<const Loop *, const SCEV *> RHS) const {
// Keep pointer operands sorted at the end.
if (LHS.second->getType()->isPointerTy() !=
RHS.second->getType()->isPointerTy())
return LHS.second->getType()->isPointerTy();
// Compare loops with PickMostRelevantLoop.
if (LHS.first != RHS.first)
return PickMostRelevantLoop(LHS.first, RHS.first, DT) != LHS.first;
// If one operand is a non-constant negative and the other is not,
// put the non-constant negative on the right so that a sub can
// be used instead of a negate and add.
if (LHS.second->isNonConstantNegative()) {
if (!RHS.second->isNonConstantNegative())
return false;
} else if (RHS.second->isNonConstantNegative())
return true;
// Otherwise they are equivalent according to this comparison.
return false;
}
};
}
Value *SCEVExpander::visitAddExpr(const SCEVAddExpr *S) {
Type *Ty = SE.getEffectiveSCEVType(S->getType());
// Collect all the add operands in a loop, along with their associated loops.
// Iterate in reverse so that constants are emitted last, all else equal, and
// so that pointer operands are inserted first, which the code below relies on
// to form more involved GEPs.
SmallVector<std::pair<const Loop *, const SCEV *>, 8> OpsAndLoops;
for (std::reverse_iterator<SCEVAddExpr::op_iterator> I(S->op_end()),
E(S->op_begin()); I != E; ++I)
OpsAndLoops.push_back(std::make_pair(getRelevantLoop(*I), *I));
// Sort by loop. Use a stable sort so that constants follow non-constants and
// pointer operands precede non-pointer operands.
std::stable_sort(OpsAndLoops.begin(), OpsAndLoops.end(), LoopCompare(*SE.DT));
// Emit instructions to add all the operands. Hoist as much as possible
// out of loops, and form meaningful getelementptrs where possible.
Value *Sum = nullptr;
for (SmallVectorImpl<std::pair<const Loop *, const SCEV *> >::iterator
I = OpsAndLoops.begin(), E = OpsAndLoops.end(); I != E; ) {
const Loop *CurLoop = I->first;
const SCEV *Op = I->second;
if (!Sum) {
// This is the first operand. Just expand it.
Sum = expand(Op);
++I;
} else if (PointerType *PTy = dyn_cast<PointerType>(Sum->getType())) {
// The running sum expression is a pointer. Try to form a getelementptr
// at this level with that as the base.
SmallVector<const SCEV *, 4> NewOps;
for (; I != E && I->first == CurLoop; ++I) {
// If the operand is SCEVUnknown and not instructions, peek through
// it, to enable more of it to be folded into the GEP.
const SCEV *X = I->second;
if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(X))
if (!isa<Instruction>(U->getValue()))
X = SE.getSCEV(U->getValue());
NewOps.push_back(X);
}
Sum = expandAddToGEP(NewOps.begin(), NewOps.end(), PTy, Ty, Sum);
} else if (PointerType *PTy = dyn_cast<PointerType>(Op->getType())) {
// The running sum is an integer, and there's a pointer at this level.
// Try to form a getelementptr. If the running sum is instructions,
// use a SCEVUnknown to avoid re-analyzing them.
SmallVector<const SCEV *, 4> NewOps;
NewOps.push_back(isa<Instruction>(Sum) ? SE.getUnknown(Sum) :
SE.getSCEV(Sum));
for (++I; I != E && I->first == CurLoop; ++I)
NewOps.push_back(I->second);
Sum = expandAddToGEP(NewOps.begin(), NewOps.end(), PTy, Ty, expand(Op));
} else if (Op->isNonConstantNegative()) {
// Instead of doing a negate and add, just do a subtract.
Value *W = expandCodeFor(SE.getNegativeSCEV(Op), Ty);
Sum = InsertNoopCastOfTo(Sum, Ty);
Sum = InsertBinop(Instruction::Sub, Sum, W);
++I;
} else {
// A simple add.
Value *W = expandCodeFor(Op, Ty);
Sum = InsertNoopCastOfTo(Sum, Ty);
// Canonicalize a constant to the RHS.
if (isa<Constant>(Sum)) std::swap(Sum, W);
Sum = InsertBinop(Instruction::Add, Sum, W);
++I;
}
}
return Sum;
}
Value *SCEVExpander::visitMulExpr(const SCEVMulExpr *S) {
Type *Ty = SE.getEffectiveSCEVType(S->getType());
// Collect all the mul operands in a loop, along with their associated loops.
// Iterate in reverse so that constants are emitted last, all else equal.
SmallVector<std::pair<const Loop *, const SCEV *>, 8> OpsAndLoops;
for (std::reverse_iterator<SCEVMulExpr::op_iterator> I(S->op_end()),
E(S->op_begin()); I != E; ++I)
OpsAndLoops.push_back(std::make_pair(getRelevantLoop(*I), *I));
// Sort by loop. Use a stable sort so that constants follow non-constants.
std::stable_sort(OpsAndLoops.begin(), OpsAndLoops.end(), LoopCompare(*SE.DT));
// Emit instructions to mul all the operands. Hoist as much as possible
// out of loops.
Value *Prod = nullptr;
for (SmallVectorImpl<std::pair<const Loop *, const SCEV *> >::iterator
I = OpsAndLoops.begin(), E = OpsAndLoops.end(); I != E; ++I) {
const SCEV *Op = I->second;
if (!Prod) {
// This is the first operand. Just expand it.
Prod = expand(Op);
} else if (Op->isAllOnesValue()) {
// Instead of doing a multiply by negative one, just do a negate.
Prod = InsertNoopCastOfTo(Prod, Ty);
Prod = InsertBinop(Instruction::Sub, Constant::getNullValue(Ty), Prod);
} else {
// A simple mul.
Value *W = expandCodeFor(Op, Ty);
Prod = InsertNoopCastOfTo(Prod, Ty);
// Canonicalize a constant to the RHS.
if (isa<Constant>(Prod)) std::swap(Prod, W);
const APInt *RHS;
if (match(W, m_Power2(RHS))) {
// Canonicalize Prod*(1<<C) to Prod<<C.
assert(!Ty->isVectorTy() && "vector types are not SCEVable");
Prod = InsertBinop(Instruction::Shl, Prod,
ConstantInt::get(Ty, RHS->logBase2()));
} else {
Prod = InsertBinop(Instruction::Mul, Prod, W);
}
}
}
return Prod;
}
Value *SCEVExpander::visitUDivExpr(const SCEVUDivExpr *S) {
Type *Ty = SE.getEffectiveSCEVType(S->getType());
Value *LHS = expandCodeFor(S->getLHS(), Ty);
if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(S->getRHS())) {
const APInt &RHS = SC->getValue()->getValue();
if (RHS.isPowerOf2())
return InsertBinop(Instruction::LShr, LHS,
ConstantInt::get(Ty, RHS.logBase2()));
}
Value *RHS = expandCodeFor(S->getRHS(), Ty);
return InsertBinop(Instruction::UDiv, LHS, RHS);
}
/// Move parts of Base into Rest to leave Base with the minimal
/// expression that provides a pointer operand suitable for a
/// GEP expansion.
static void ExposePointerBase(const SCEV *&Base, const SCEV *&Rest,
ScalarEvolution &SE) {
while (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(Base)) {
Base = A->getStart();
Rest = SE.getAddExpr(Rest,
SE.getAddRecExpr(SE.getConstant(A->getType(), 0),
A->getStepRecurrence(SE),
A->getLoop(),
A->getNoWrapFlags(SCEV::FlagNW)));
}
if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(Base)) {
Base = A->getOperand(A->getNumOperands()-1);
SmallVector<const SCEV *, 8> NewAddOps(A->op_begin(), A->op_end());
NewAddOps.back() = Rest;
Rest = SE.getAddExpr(NewAddOps);
ExposePointerBase(Base, Rest, SE);
}
}
/// Determine if this is a well-behaved chain of instructions leading back to
/// the PHI. If so, it may be reused by expanded expressions.
bool SCEVExpander::isNormalAddRecExprPHI(PHINode *PN, Instruction *IncV,
const Loop *L) {
if (IncV->getNumOperands() == 0 || isa<PHINode>(IncV) ||
(isa<CastInst>(IncV) && !isa<BitCastInst>(IncV)))
return false;
// If any of the operands don't dominate the insert position, bail.
// Addrec operands are always loop-invariant, so this can only happen
// if there are instructions which haven't been hoisted.
if (L == IVIncInsertLoop) {
for (User::op_iterator OI = IncV->op_begin()+1,
OE = IncV->op_end(); OI != OE; ++OI)
if (Instruction *OInst = dyn_cast<Instruction>(OI))
if (!SE.DT->dominates(OInst, IVIncInsertPos))
return false;
}
// Advance to the next instruction.
IncV = dyn_cast<Instruction>(IncV->getOperand(0));
if (!IncV)
return false;
if (IncV->mayHaveSideEffects())
return false;
if (IncV != PN)
return true;
return isNormalAddRecExprPHI(PN, IncV, L);
}
/// getIVIncOperand returns an induction variable increment's induction
/// variable operand.
///
/// If allowScale is set, any type of GEP is allowed as long as the nonIV
/// operands dominate InsertPos.
///
/// If allowScale is not set, ensure that a GEP increment conforms to one of the
/// simple patterns generated by getAddRecExprPHILiterally and
/// expandAddtoGEP. If the pattern isn't recognized, return NULL.
Instruction *SCEVExpander::getIVIncOperand(Instruction *IncV,
Instruction *InsertPos,
bool allowScale) {
if (IncV == InsertPos)
return nullptr;
switch (IncV->getOpcode()) {
default:
return nullptr;
// Check for a simple Add/Sub or GEP of a loop invariant step.
case Instruction::Add:
case Instruction::Sub: {
Instruction *OInst = dyn_cast<Instruction>(IncV->getOperand(1));
if (!OInst || SE.DT->dominates(OInst, InsertPos))
return dyn_cast<Instruction>(IncV->getOperand(0));
return nullptr;
}
case Instruction::BitCast:
return dyn_cast<Instruction>(IncV->getOperand(0));
case Instruction::GetElementPtr:
for (Instruction::op_iterator I = IncV->op_begin()+1, E = IncV->op_end();
I != E; ++I) {
if (isa<Constant>(*I))
continue;
if (Instruction *OInst = dyn_cast<Instruction>(*I)) {
if (!SE.DT->dominates(OInst, InsertPos))
return nullptr;
}
if (allowScale) {
// allow any kind of GEP as long as it can be hoisted.
continue;
}
// This must be a pointer addition of constants (pretty), which is already
// handled, or some number of address-size elements (ugly). Ugly geps
// have 2 operands. i1* is used by the expander to represent an
// address-size element.
if (IncV->getNumOperands() != 2)
return nullptr;
unsigned AS = cast<PointerType>(IncV->getType())->getAddressSpace();
if (IncV->getType() != Type::getInt1PtrTy(SE.getContext(), AS)
&& IncV->getType() != Type::getInt8PtrTy(SE.getContext(), AS))
return nullptr;
break;
}
return dyn_cast<Instruction>(IncV->getOperand(0));
}
}
/// hoistStep - Attempt to hoist a simple IV increment above InsertPos to make
/// it available to other uses in this loop. Recursively hoist any operands,
/// until we reach a value that dominates InsertPos.
bool SCEVExpander::hoistIVInc(Instruction *IncV, Instruction *InsertPos) {
if (SE.DT->dominates(IncV, InsertPos))
return true;
// InsertPos must itself dominate IncV so that IncV's new position satisfies
// its existing users.
if (isa<PHINode>(InsertPos)
|| !SE.DT->dominates(InsertPos->getParent(), IncV->getParent()))
return false;
// Check that the chain of IV operands leading back to Phi can be hoisted.
SmallVector<Instruction*, 4> IVIncs;
for(;;) {
Instruction *Oper = getIVIncOperand(IncV, InsertPos, /*allowScale*/true);
if (!Oper)
return false;
// IncV is safe to hoist.
IVIncs.push_back(IncV);
IncV = Oper;
if (SE.DT->dominates(IncV, InsertPos))
break;
}
for (SmallVectorImpl<Instruction*>::reverse_iterator I = IVIncs.rbegin(),
E = IVIncs.rend(); I != E; ++I) {
(*I)->moveBefore(InsertPos);
}
return true;
}
/// Determine if this cyclic phi is in a form that would have been generated by
/// LSR. We don't care if the phi was actually expanded in this pass, as long
/// as it is in a low-cost form, for example, no implied multiplication. This
/// should match any patterns generated by getAddRecExprPHILiterally and
/// expandAddtoGEP.
bool SCEVExpander::isExpandedAddRecExprPHI(PHINode *PN, Instruction *IncV,
const Loop *L) {
for(Instruction *IVOper = IncV;
(IVOper = getIVIncOperand(IVOper, L->getLoopPreheader()->getTerminator(),
/*allowScale=*/false));) {
if (IVOper == PN)
return true;
}
return false;
}
/// expandIVInc - Expand an IV increment at Builder's current InsertPos.
/// Typically this is the LatchBlock terminator or IVIncInsertPos, but we may
/// need to materialize IV increments elsewhere to handle difficult situations.
Value *SCEVExpander::expandIVInc(PHINode *PN, Value *StepV, const Loop *L,
Type *ExpandTy, Type *IntTy,
bool useSubtract) {
Value *IncV;
// If the PHI is a pointer, use a GEP, otherwise use an add or sub.
if (ExpandTy->isPointerTy()) {
PointerType *GEPPtrTy = cast<PointerType>(ExpandTy);
// If the step isn't constant, don't use an implicitly scaled GEP, because
// that would require a multiply inside the loop.
if (!isa<ConstantInt>(StepV))
GEPPtrTy = PointerType::get(Type::getInt1Ty(SE.getContext()),
GEPPtrTy->getAddressSpace());
const SCEV *const StepArray[1] = { SE.getSCEV(StepV) };
IncV = expandAddToGEP(StepArray, StepArray+1, GEPPtrTy, IntTy, PN);
if (IncV->getType() != PN->getType()) {
IncV = Builder.CreateBitCast(IncV, PN->getType());
rememberInstruction(IncV);
}
} else {
IncV = useSubtract ?
Builder.CreateSub(PN, StepV, Twine(IVName) + ".iv.next") :
Builder.CreateAdd(PN, StepV, Twine(IVName) + ".iv.next");
rememberInstruction(IncV);
}
return IncV;
}
/// \brief Hoist the addrec instruction chain rooted in the loop phi above the
/// position. This routine assumes that this is possible (has been checked).
static void hoistBeforePos(DominatorTree *DT, Instruction *InstToHoist,
Instruction *Pos, PHINode *LoopPhi) {
do {
if (DT->dominates(InstToHoist, Pos))
break;
// Make sure the increment is where we want it. But don't move it
// down past a potential existing post-inc user.
InstToHoist->moveBefore(Pos);
Pos = InstToHoist;
InstToHoist = cast<Instruction>(InstToHoist->getOperand(0));
} while (InstToHoist != LoopPhi);
}
/// \brief Check whether we can cheaply express the requested SCEV in terms of
/// the available PHI SCEV by truncation and/or invertion of the step.
static bool canBeCheaplyTransformed(ScalarEvolution &SE,
const SCEVAddRecExpr *Phi,
const SCEVAddRecExpr *Requested,
bool &InvertStep) {
Type *PhiTy = SE.getEffectiveSCEVType(Phi->getType());
Type *RequestedTy = SE.getEffectiveSCEVType(Requested->getType());
if (RequestedTy->getIntegerBitWidth() > PhiTy->getIntegerBitWidth())
return false;
// Try truncate it if necessary.
Phi = dyn_cast<SCEVAddRecExpr>(SE.getTruncateOrNoop(Phi, RequestedTy));
if (!Phi)
return false;
// Check whether truncation will help.
if (Phi == Requested) {
InvertStep = false;
return true;
}
// Check whether inverting will help: {R,+,-1} == R - {0,+,1}.
if (SE.getAddExpr(Requested->getStart(),
SE.getNegativeSCEV(Requested)) == Phi) {
InvertStep = true;
return true;
}
return false;
}
static bool IsIncrementNSW(ScalarEvolution &SE, const SCEVAddRecExpr *AR) {
if (!isa<IntegerType>(AR->getType()))
return false;
unsigned BitWidth = cast<IntegerType>(AR->getType())->getBitWidth();
Type *WideTy = IntegerType::get(AR->getType()->getContext(), BitWidth * 2);
const SCEV *Step = AR->getStepRecurrence(SE);
const SCEV *OpAfterExtend = SE.getAddExpr(SE.getSignExtendExpr(Step, WideTy),
SE.getSignExtendExpr(AR, WideTy));
const SCEV *ExtendAfterOp =
SE.getSignExtendExpr(SE.getAddExpr(AR, Step), WideTy);
return ExtendAfterOp == OpAfterExtend;
}
static bool IsIncrementNUW(ScalarEvolution &SE, const SCEVAddRecExpr *AR) {
if (!isa<IntegerType>(AR->getType()))
return false;
unsigned BitWidth = cast<IntegerType>(AR->getType())->getBitWidth();
Type *WideTy = IntegerType::get(AR->getType()->getContext(), BitWidth * 2);
const SCEV *Step = AR->getStepRecurrence(SE);
const SCEV *OpAfterExtend = SE.getAddExpr(SE.getZeroExtendExpr(Step, WideTy),
SE.getZeroExtendExpr(AR, WideTy));
const SCEV *ExtendAfterOp =
SE.getZeroExtendExpr(SE.getAddExpr(AR, Step), WideTy);
return ExtendAfterOp == OpAfterExtend;
}
/// getAddRecExprPHILiterally - Helper for expandAddRecExprLiterally. Expand
/// the base addrec, which is the addrec without any non-loop-dominating
/// values, and return the PHI.
PHINode *
SCEVExpander::getAddRecExprPHILiterally(const SCEVAddRecExpr *Normalized,
const Loop *L,
Type *ExpandTy,
Type *IntTy,
Type *&TruncTy,
bool &InvertStep) {
assert((!IVIncInsertLoop||IVIncInsertPos) && "Uninitialized insert position");
// Reuse a previously-inserted PHI, if present.
BasicBlock *LatchBlock = L->getLoopLatch();
if (LatchBlock) {
PHINode *AddRecPhiMatch = nullptr;
Instruction *IncV = nullptr;
TruncTy = nullptr;
InvertStep = false;
// Only try partially matching scevs that need truncation and/or
// step-inversion if we know this loop is outside the current loop.
bool TryNonMatchingSCEV = IVIncInsertLoop &&
SE.DT->properlyDominates(LatchBlock, IVIncInsertLoop->getHeader());
for (BasicBlock::iterator I = L->getHeader()->begin();
PHINode *PN = dyn_cast<PHINode>(I); ++I) {
if (!SE.isSCEVable(PN->getType()))
continue;
const SCEVAddRecExpr *PhiSCEV = dyn_cast<SCEVAddRecExpr>(SE.getSCEV(PN));
if (!PhiSCEV)
continue;
bool IsMatchingSCEV = PhiSCEV == Normalized;
// We only handle truncation and inversion of phi recurrences for the
// expanded expression if the expanded expression's loop dominates the
// loop we insert to. Check now, so we can bail out early.
if (!IsMatchingSCEV && !TryNonMatchingSCEV)
continue;
Instruction *TempIncV =
cast<Instruction>(PN->getIncomingValueForBlock(LatchBlock));
// Check whether we can reuse this PHI node.
if (LSRMode) {
if (!isExpandedAddRecExprPHI(PN, TempIncV, L))
continue;
if (L == IVIncInsertLoop && !hoistIVInc(TempIncV, IVIncInsertPos))
continue;
} else {
if (!isNormalAddRecExprPHI(PN, TempIncV, L))
continue;
}
// Stop if we have found an exact match SCEV.
if (IsMatchingSCEV) {
IncV = TempIncV;
TruncTy = nullptr;
InvertStep = false;
AddRecPhiMatch = PN;
break;
}
// Try whether the phi can be translated into the requested form
// (truncated and/or offset by a constant).
if ((!TruncTy || InvertStep) &&
canBeCheaplyTransformed(SE, PhiSCEV, Normalized, InvertStep)) {
// Record the phi node. But don't stop we might find an exact match
// later.
AddRecPhiMatch = PN;
IncV = TempIncV;
TruncTy = SE.getEffectiveSCEVType(Normalized->getType());
}
}
if (AddRecPhiMatch) {
// Potentially, move the increment. We have made sure in
// isExpandedAddRecExprPHI or hoistIVInc that this is possible.
if (L == IVIncInsertLoop)
hoistBeforePos(SE.DT, IncV, IVIncInsertPos, AddRecPhiMatch);
// Ok, the add recurrence looks usable.
// Remember this PHI, even in post-inc mode.
InsertedValues.insert(AddRecPhiMatch);
// Remember the increment.
rememberInstruction(IncV);
return AddRecPhiMatch;
}
}
// Save the original insertion point so we can restore it when we're done.
BuilderType::InsertPointGuard Guard(Builder);
// Another AddRec may need to be recursively expanded below. For example, if
// this AddRec is quadratic, the StepV may itself be an AddRec in this
// loop. Remove this loop from the PostIncLoops set before expanding such
// AddRecs. Otherwise, we cannot find a valid position for the step
// (i.e. StepV can never dominate its loop header). Ideally, we could do
// SavedIncLoops.swap(PostIncLoops), but we generally have a single element,
// so it's not worth implementing SmallPtrSet::swap.
PostIncLoopSet SavedPostIncLoops = PostIncLoops;
PostIncLoops.clear();
// Expand code for the start value.
Value *StartV = expandCodeFor(Normalized->getStart(), ExpandTy,
L->getHeader()->begin());
// StartV must be hoisted into L's preheader to dominate the new phi.
assert(!isa<Instruction>(StartV) ||
SE.DT->properlyDominates(cast<Instruction>(StartV)->getParent(),
L->getHeader()));
// Expand code for the step value. Do this before creating the PHI so that PHI
// reuse code doesn't see an incomplete PHI.
const SCEV *Step = Normalized->getStepRecurrence(SE);
// If the stride is negative, insert a sub instead of an add for the increment
// (unless it's a constant, because subtracts of constants are canonicalized
// to adds).
bool useSubtract = !ExpandTy->isPointerTy() && Step->isNonConstantNegative();
if (useSubtract)
Step = SE.getNegativeSCEV(Step);
// Expand the step somewhere that dominates the loop header.
Value *StepV = expandCodeFor(Step, IntTy, L->getHeader()->begin());
// The no-wrap behavior proved by IsIncrement(NUW|NSW) is only applicable if
// we actually do emit an addition. It does not apply if we emit a
// subtraction.
bool IncrementIsNUW = !useSubtract && IsIncrementNUW(SE, Normalized);
bool IncrementIsNSW = !useSubtract && IsIncrementNSW(SE, Normalized);
// Create the PHI.
BasicBlock *Header = L->getHeader();
Builder.SetInsertPoint(Header, Header->begin());
pred_iterator HPB = pred_begin(Header), HPE = pred_end(Header);
PHINode *PN = Builder.CreatePHI(ExpandTy, std::distance(HPB, HPE),
Twine(IVName) + ".iv");
rememberInstruction(PN);
// Create the step instructions and populate the PHI.
for (pred_iterator HPI = HPB; HPI != HPE; ++HPI) {
BasicBlock *Pred = *HPI;
// Add a start value.
if (!L->contains(Pred)) {
PN->addIncoming(StartV, Pred);
continue;
}
// Create a step value and add it to the PHI.
// If IVIncInsertLoop is non-null and equal to the addrec's loop, insert the
// instructions at IVIncInsertPos.
Instruction *InsertPos = L == IVIncInsertLoop ?
IVIncInsertPos : Pred->getTerminator();
Builder.SetInsertPoint(InsertPos);
Value *IncV = expandIVInc(PN, StepV, L, ExpandTy, IntTy, useSubtract);
if (isa<OverflowingBinaryOperator>(IncV)) {
if (IncrementIsNUW)
cast<BinaryOperator>(IncV)->setHasNoUnsignedWrap();
if (IncrementIsNSW)
cast<BinaryOperator>(IncV)->setHasNoSignedWrap();
}
PN->addIncoming(IncV, Pred);
}
// After expanding subexpressions, restore the PostIncLoops set so the caller
// can ensure that IVIncrement dominates the current uses.
PostIncLoops = SavedPostIncLoops;
// Remember this PHI, even in post-inc mode.
InsertedValues.insert(PN);
return PN;
}
Value *SCEVExpander::expandAddRecExprLiterally(const SCEVAddRecExpr *S) {
Type *STy = S->getType();
Type *IntTy = SE.getEffectiveSCEVType(STy);
const Loop *L = S->getLoop();
// Determine a normalized form of this expression, which is the expression
// before any post-inc adjustment is made.
const SCEVAddRecExpr *Normalized = S;
if (PostIncLoops.count(L)) {
PostIncLoopSet Loops;
Loops.insert(L);
Normalized =
cast<SCEVAddRecExpr>(TransformForPostIncUse(Normalize, S, nullptr,
nullptr, Loops, SE, *SE.DT));
}
// Strip off any non-loop-dominating component from the addrec start.
const SCEV *Start = Normalized->getStart();
const SCEV *PostLoopOffset = nullptr;
if (!SE.properlyDominates(Start, L->getHeader())) {
PostLoopOffset = Start;
Start = SE.getConstant(Normalized->getType(), 0);
Normalized = cast<SCEVAddRecExpr>(
SE.getAddRecExpr(Start, Normalized->getStepRecurrence(SE),
Normalized->getLoop(),
Normalized->getNoWrapFlags(SCEV::FlagNW)));
}
// Strip off any non-loop-dominating component from the addrec step.
const SCEV *Step = Normalized->getStepRecurrence(SE);
const SCEV *PostLoopScale = nullptr;
if (!SE.dominates(Step, L->getHeader())) {
PostLoopScale = Step;
Step = SE.getConstant(Normalized->getType(), 1);
Normalized =
cast<SCEVAddRecExpr>(SE.getAddRecExpr(
Start, Step, Normalized->getLoop(),
Normalized->getNoWrapFlags(SCEV::FlagNW)));
}
// Expand the core addrec. If we need post-loop scaling, force it to
// expand to an integer type to avoid the need for additional casting.
Type *ExpandTy = PostLoopScale ? IntTy : STy;
// In some cases, we decide to reuse an existing phi node but need to truncate
// it and/or invert the step.
Type *TruncTy = nullptr;
bool InvertStep = false;
PHINode *PN = getAddRecExprPHILiterally(Normalized, L, ExpandTy, IntTy,
TruncTy, InvertStep);
// Accommodate post-inc mode, if necessary.
Value *Result;
if (!PostIncLoops.count(L))
Result = PN;
else {
// In PostInc mode, use the post-incremented value.
BasicBlock *LatchBlock = L->getLoopLatch();
assert(LatchBlock && "PostInc mode requires a unique loop latch!");
Result = PN->getIncomingValueForBlock(LatchBlock);
// For an expansion to use the postinc form, the client must call
// expandCodeFor with an InsertPoint that is either outside the PostIncLoop
// or dominated by IVIncInsertPos.
if (isa<Instruction>(Result)
&& !SE.DT->dominates(cast<Instruction>(Result),
Builder.GetInsertPoint())) {
// The induction variable's postinc expansion does not dominate this use.
// IVUsers tries to prevent this case, so it is rare. However, it can
// happen when an IVUser outside the loop is not dominated by the latch
// block. Adjusting IVIncInsertPos before expansion begins cannot handle
// all cases. Consider a phi outide whose operand is replaced during
// expansion with the value of the postinc user. Without fundamentally
// changing the way postinc users are tracked, the only remedy is
// inserting an extra IV increment. StepV might fold into PostLoopOffset,
// but hopefully expandCodeFor handles that.
bool useSubtract =
!ExpandTy->isPointerTy() && Step->isNonConstantNegative();
if (useSubtract)
Step = SE.getNegativeSCEV(Step);
Value *StepV;
{
// Expand the step somewhere that dominates the loop header.
BuilderType::InsertPointGuard Guard(Builder);
StepV = expandCodeFor(Step, IntTy, L->getHeader()->begin());
}
Result = expandIVInc(PN, StepV, L, ExpandTy, IntTy, useSubtract);
}
}
// We have decided to reuse an induction variable of a dominating loop. Apply
// truncation and/or invertion of the step.
if (TruncTy) {
Type *ResTy = Result->getType();
// Normalize the result type.
if (ResTy != SE.getEffectiveSCEVType(ResTy))
Result = InsertNoopCastOfTo(Result, SE.getEffectiveSCEVType(ResTy));
// Truncate the result.
if (TruncTy != Result->getType()) {
Result = Builder.CreateTrunc(Result, TruncTy);
rememberInstruction(Result);
}
// Invert the result.
if (InvertStep) {
Result = Builder.CreateSub(expandCodeFor(Normalized->getStart(), TruncTy),
Result);
rememberInstruction(Result);
}
}
// Re-apply any non-loop-dominating scale.
if (PostLoopScale) {
assert(S->isAffine() && "Can't linearly scale non-affine recurrences.");
Result = InsertNoopCastOfTo(Result, IntTy);
Result = Builder.CreateMul(Result,
expandCodeFor(PostLoopScale, IntTy));
rememberInstruction(Result);
}
// Re-apply any non-loop-dominating offset.
if (PostLoopOffset) {
if (PointerType *PTy = dyn_cast<PointerType>(ExpandTy)) {
const SCEV *const OffsetArray[1] = { PostLoopOffset };
Result = expandAddToGEP(OffsetArray, OffsetArray+1, PTy, IntTy, Result);
} else {
Result = InsertNoopCastOfTo(Result, IntTy);
Result = Builder.CreateAdd(Result,
expandCodeFor(PostLoopOffset, IntTy));
rememberInstruction(Result);
}
}
return Result;
}
Value *SCEVExpander::visitAddRecExpr(const SCEVAddRecExpr *S) {
if (!CanonicalMode) return expandAddRecExprLiterally(S);
Type *Ty = SE.getEffectiveSCEVType(S->getType());
const Loop *L = S->getLoop();
// First check for an existing canonical IV in a suitable type.
PHINode *CanonicalIV = nullptr;
if (PHINode *PN = L->getCanonicalInductionVariable())
if (SE.getTypeSizeInBits(PN->getType()) >= SE.getTypeSizeInBits(Ty))
CanonicalIV = PN;
// Rewrite an AddRec in terms of the canonical induction variable, if
// its type is more narrow.
if (CanonicalIV &&
SE.getTypeSizeInBits(CanonicalIV->getType()) >
SE.getTypeSizeInBits(Ty)) {
SmallVector<const SCEV *, 4> NewOps(S->getNumOperands());
for (unsigned i = 0, e = S->getNumOperands(); i != e; ++i)
NewOps[i] = SE.getAnyExtendExpr(S->op_begin()[i], CanonicalIV->getType());
Value *V = expand(SE.getAddRecExpr(NewOps, S->getLoop(),
S->getNoWrapFlags(SCEV::FlagNW)));
BasicBlock::iterator NewInsertPt =
std::next(BasicBlock::iterator(cast<Instruction>(V)));
BuilderType::InsertPointGuard Guard(Builder);
while (isa<PHINode>(NewInsertPt) || isa<DbgInfoIntrinsic>(NewInsertPt) ||
isa<LandingPadInst>(NewInsertPt))
++NewInsertPt;
V = expandCodeFor(SE.getTruncateExpr(SE.getUnknown(V), Ty), nullptr,
NewInsertPt);
return V;
}
// {X,+,F} --> X + {0,+,F}
if (!S->getStart()->isZero()) {
SmallVector<const SCEV *, 4> NewOps(S->op_begin(), S->op_end());
NewOps[0] = SE.getConstant(Ty, 0);
const SCEV *Rest = SE.getAddRecExpr(NewOps, L,
S->getNoWrapFlags(SCEV::FlagNW));
// Turn things like ptrtoint+arithmetic+inttoptr into GEP. See the
// comments on expandAddToGEP for details.
const SCEV *Base = S->getStart();
const SCEV *RestArray[1] = { Rest };
// Dig into the expression to find the pointer base for a GEP.
ExposePointerBase(Base, RestArray[0], SE);
// If we found a pointer, expand the AddRec with a GEP.
if (PointerType *PTy = dyn_cast<PointerType>(Base->getType())) {
// Make sure the Base isn't something exotic, such as a multiplied
// or divided pointer value. In those cases, the result type isn't
// actually a pointer type.
if (!isa<SCEVMulExpr>(Base) && !isa<SCEVUDivExpr>(Base)) {
Value *StartV = expand(Base);
assert(StartV->getType() == PTy && "Pointer type mismatch for GEP!");
return expandAddToGEP(RestArray, RestArray+1, PTy, Ty, StartV);
}
}
// Just do a normal add. Pre-expand the operands to suppress folding.
return expand(SE.getAddExpr(SE.getUnknown(expand(S->getStart())),
SE.getUnknown(expand(Rest))));
}
// If we don't yet have a canonical IV, create one.
if (!CanonicalIV) {
// Create and insert the PHI node for the induction variable in the
// specified loop.
BasicBlock *Header = L->getHeader();
pred_iterator HPB = pred_begin(Header), HPE = pred_end(Header);
CanonicalIV = PHINode::Create(Ty, std::distance(HPB, HPE), "indvar",
Header->begin());
rememberInstruction(CanonicalIV);
SmallSet<BasicBlock *, 4> PredSeen;
Constant *One = ConstantInt::get(Ty, 1);
for (pred_iterator HPI = HPB; HPI != HPE; ++HPI) {
BasicBlock *HP = *HPI;
if (!PredSeen.insert(HP).second) {
// There must be an incoming value for each predecessor, even the
// duplicates!
CanonicalIV->addIncoming(CanonicalIV->getIncomingValueForBlock(HP), HP);
continue;
}
if (L->contains(HP)) {
// Insert a unit add instruction right before the terminator
// corresponding to the back-edge.
Instruction *Add = BinaryOperator::CreateAdd(CanonicalIV, One,
"indvar.next",
HP->getTerminator());
Add->setDebugLoc(HP->getTerminator()->getDebugLoc());
rememberInstruction(Add);
CanonicalIV->addIncoming(Add, HP);
} else {
CanonicalIV->addIncoming(Constant::getNullValue(Ty), HP);
}
}
}
// {0,+,1} --> Insert a canonical induction variable into the loop!
if (S->isAffine() && S->getOperand(1)->isOne()) {
assert(Ty == SE.getEffectiveSCEVType(CanonicalIV->getType()) &&
"IVs with types different from the canonical IV should "
"already have been handled!");
return CanonicalIV;
}
// {0,+,F} --> {0,+,1} * F
// If this is a simple linear addrec, emit it now as a special case.
if (S->isAffine()) // {0,+,F} --> i*F
return
expand(SE.getTruncateOrNoop(
SE.getMulExpr(SE.getUnknown(CanonicalIV),
SE.getNoopOrAnyExtend(S->getOperand(1),
CanonicalIV->getType())),
Ty));
// If this is a chain of recurrences, turn it into a closed form, using the
// folders, then expandCodeFor the closed form. This allows the folders to
// simplify the expression without having to build a bunch of special code
// into this folder.
const SCEV *IH = SE.getUnknown(CanonicalIV); // Get I as a "symbolic" SCEV.
// Promote S up to the canonical IV type, if the cast is foldable.
const SCEV *NewS = S;
const SCEV *Ext = SE.getNoopOrAnyExtend(S, CanonicalIV->getType());
if (isa<SCEVAddRecExpr>(Ext))
NewS = Ext;
const SCEV *V = cast<SCEVAddRecExpr>(NewS)->evaluateAtIteration(IH, SE);
//cerr << "Evaluated: " << *this << "\n to: " << *V << "\n";
// Truncate the result down to the original type, if needed.
const SCEV *T = SE.getTruncateOrNoop(V, Ty);
return expand(T);
}
Value *SCEVExpander::visitTruncateExpr(const SCEVTruncateExpr *S) {
Type *Ty = SE.getEffectiveSCEVType(S->getType());
Value *V = expandCodeFor(S->getOperand(),
SE.getEffectiveSCEVType(S->getOperand()->getType()));
Value *I = Builder.CreateTrunc(V, Ty);
rememberInstruction(I);
return I;
}
Value *SCEVExpander::visitZeroExtendExpr(const SCEVZeroExtendExpr *S) {
Type *Ty = SE.getEffectiveSCEVType(S->getType());
Value *V = expandCodeFor(S->getOperand(),
SE.getEffectiveSCEVType(S->getOperand()->getType()));
Value *I = Builder.CreateZExt(V, Ty);
rememberInstruction(I);
return I;
}
Value *SCEVExpander::visitSignExtendExpr(const SCEVSignExtendExpr *S) {
Type *Ty = SE.getEffectiveSCEVType(S->getType());
Value *V = expandCodeFor(S->getOperand(),
SE.getEffectiveSCEVType(S->getOperand()->getType()));
Value *I = Builder.CreateSExt(V, Ty);
rememberInstruction(I);
return I;
}
Value *SCEVExpander::visitSMaxExpr(const SCEVSMaxExpr *S) {
Value *LHS = expand(S->getOperand(S->getNumOperands()-1));
Type *Ty = LHS->getType();
for (int i = S->getNumOperands()-2; i >= 0; --i) {
// In the case of mixed integer and pointer types, do the
// rest of the comparisons as integer.
if (S->getOperand(i)->getType() != Ty) {
Ty = SE.getEffectiveSCEVType(Ty);
LHS = InsertNoopCastOfTo(LHS, Ty);
}
Value *RHS = expandCodeFor(S->getOperand(i), Ty);
Value *ICmp = Builder.CreateICmpSGT(LHS, RHS);
rememberInstruction(ICmp);
Value *Sel = Builder.CreateSelect(ICmp, LHS, RHS, "smax");
rememberInstruction(Sel);
LHS = Sel;
}
// In the case of mixed integer and pointer types, cast the
// final result back to the pointer type.
if (LHS->getType() != S->getType())
LHS = InsertNoopCastOfTo(LHS, S->getType());
return LHS;
}
Value *SCEVExpander::visitUMaxExpr(const SCEVUMaxExpr *S) {
Value *LHS = expand(S->getOperand(S->getNumOperands()-1));
Type *Ty = LHS->getType();
for (int i = S->getNumOperands()-2; i >= 0; --i) {
// In the case of mixed integer and pointer types, do the
// rest of the comparisons as integer.
if (S->getOperand(i)->getType() != Ty) {
Ty = SE.getEffectiveSCEVType(Ty);
LHS = InsertNoopCastOfTo(LHS, Ty);
}
Value *RHS = expandCodeFor(S->getOperand(i), Ty);
Value *ICmp = Builder.CreateICmpUGT(LHS, RHS);
rememberInstruction(ICmp);
Value *Sel = Builder.CreateSelect(ICmp, LHS, RHS, "umax");
rememberInstruction(Sel);
LHS = Sel;
}
// In the case of mixed integer and pointer types, cast the
// final result back to the pointer type.
if (LHS->getType() != S->getType())
LHS = InsertNoopCastOfTo(LHS, S->getType());
return LHS;
}
Value *SCEVExpander::expandCodeFor(const SCEV *SH, Type *Ty,
Instruction *IP) {
Builder.SetInsertPoint(IP->getParent(), IP);
return expandCodeFor(SH, Ty);
}
Value *SCEVExpander::expandCodeFor(const SCEV *SH, Type *Ty) {
// Expand the code for this SCEV.
Value *V = expand(SH);
if (Ty) {
assert(SE.getTypeSizeInBits(Ty) == SE.getTypeSizeInBits(SH->getType()) &&
"non-trivial casts should be done with the SCEVs directly!");
V = InsertNoopCastOfTo(V, Ty);
}
return V;
}
Value *SCEVExpander::expand(const SCEV *S) {
// Compute an insertion point for this SCEV object. Hoist the instructions
// as far out in the loop nest as possible.
Instruction *InsertPt = Builder.GetInsertPoint();
for (Loop *L = SE.LI->getLoopFor(Builder.GetInsertBlock()); ;
L = L->getParentLoop())
if (SE.isLoopInvariant(S, L)) {
if (!L) break;
if (BasicBlock *Preheader = L->getLoopPreheader())
InsertPt = Preheader->getTerminator();
else {
// LSR sets the insertion point for AddRec start/step values to the
// block start to simplify value reuse, even though it's an invalid
// position. SCEVExpander must correct for this in all cases.
InsertPt = L->getHeader()->getFirstInsertionPt();
}
} else {
// If the SCEV is computable at this level, insert it into the header
// after the PHIs (and after any other instructions that we've inserted
// there) so that it is guaranteed to dominate any user inside the loop.
if (L && SE.hasComputableLoopEvolution(S, L) && !PostIncLoops.count(L))
InsertPt = L->getHeader()->getFirstInsertionPt();
while (InsertPt != Builder.GetInsertPoint()
&& (isInsertedInstruction(InsertPt)
|| isa<DbgInfoIntrinsic>(InsertPt))) {
InsertPt = std::next(BasicBlock::iterator(InsertPt));
}
break;
}
// Check to see if we already expanded this here.
std::map<std::pair<const SCEV *, Instruction *>, TrackingVH<Value> >::iterator
I = InsertedExpressions.find(std::make_pair(S, InsertPt));
if (I != InsertedExpressions.end())
return I->second;
BuilderType::InsertPointGuard Guard(Builder);
Builder.SetInsertPoint(InsertPt->getParent(), InsertPt);
// Expand the expression into instructions.
Value *V = visit(S);
// Remember the expanded value for this SCEV at this location.
//
// This is independent of PostIncLoops. The mapped value simply materializes
// the expression at this insertion point. If the mapped value happened to be
// a postinc expansion, it could be reused by a non-postinc user, but only if
// its insertion point was already at the head of the loop.
InsertedExpressions[std::make_pair(S, InsertPt)] = V;
return V;
}
void SCEVExpander::rememberInstruction(Value *I) {
if (!PostIncLoops.empty())
InsertedPostIncValues.insert(I);
else
InsertedValues.insert(I);
}
/// getOrInsertCanonicalInductionVariable - This method returns the
/// canonical induction variable of the specified type for the specified
/// loop (inserting one if there is none). A canonical induction variable
/// starts at zero and steps by one on each iteration.
PHINode *
SCEVExpander::getOrInsertCanonicalInductionVariable(const Loop *L,
Type *Ty) {
assert(Ty->isIntegerTy() && "Can only insert integer induction variables!");
// Build a SCEV for {0,+,1}<L>.
// Conservatively use FlagAnyWrap for now.
const SCEV *H = SE.getAddRecExpr(SE.getConstant(Ty, 0),
SE.getConstant(Ty, 1), L, SCEV::FlagAnyWrap);
// Emit code for it.
BuilderType::InsertPointGuard Guard(Builder);
PHINode *V = cast<PHINode>(expandCodeFor(H, nullptr,
L->getHeader()->begin()));
return V;
}
/// replaceCongruentIVs - Check for congruent phis in this loop header and
/// replace them with their most canonical representative. Return the number of
/// phis eliminated.
///
/// This does not depend on any SCEVExpander state but should be used in
/// the same context that SCEVExpander is used.
unsigned
SCEVExpander::replaceCongruentIVs(Loop *L, const DominatorTree *DT,
SmallVectorImpl<WeakTrackingVH> &DeadInsts,
const TargetTransformInfo *TTI) {
// Find integer phis in order of increasing width.
SmallVector<PHINode*, 8> Phis;
for (BasicBlock::iterator I = L->getHeader()->begin();
PHINode *Phi = dyn_cast<PHINode>(I); ++I) {
Phis.push_back(Phi);
}
if (TTI)
std::sort(Phis.begin(), Phis.end(), [](Value *LHS, Value *RHS) {
// Put pointers at the back and make sure pointer < pointer = false.
if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
return RHS->getType()->isIntegerTy() && !LHS->getType()->isIntegerTy();
return RHS->getType()->getPrimitiveSizeInBits() <
LHS->getType()->getPrimitiveSizeInBits();
});
unsigned NumElim = 0;
DenseMap<const SCEV *, PHINode *> ExprToIVMap;
// Process phis from wide to narrow. Map wide phis to their truncation
// so narrow phis can reuse them.
for (SmallVectorImpl<PHINode*>::const_iterator PIter = Phis.begin(),
PEnd = Phis.end(); PIter != PEnd; ++PIter) {
PHINode *Phi = *PIter;
// Fold constant phis. They may be congruent to other constant phis and
// would confuse the logic below that expects proper IVs.
if (Value *V = SimplifyInstruction(Phi, DL, SE.TLI, SE.DT, SE.AC)) {
Phi->replaceAllUsesWith(V);
DeadInsts.emplace_back(Phi);
++NumElim;
DEBUG_WITH_TYPE(DebugType, dbgs()
<< "INDVARS: Eliminated constant iv: " << *Phi << '\n');
continue;
}
if (!SE.isSCEVable(Phi->getType()))
continue;
PHINode *&OrigPhiRef = ExprToIVMap[SE.getSCEV(Phi)];
if (!OrigPhiRef) {
OrigPhiRef = Phi;
if (Phi->getType()->isIntegerTy() && TTI
&& TTI->isTruncateFree(Phi->getType(), Phis.back()->getType())) {
// This phi can be freely truncated to the narrowest phi type. Map the
// truncated expression to it so it will be reused for narrow types.
const SCEV *TruncExpr =
SE.getTruncateExpr(SE.getSCEV(Phi), Phis.back()->getType());
ExprToIVMap[TruncExpr] = Phi;
}
continue;
}
// Replacing a pointer phi with an integer phi or vice-versa doesn't make
// sense.
if (OrigPhiRef->getType()->isPointerTy() != Phi->getType()->isPointerTy())
continue;
if (BasicBlock *LatchBlock = L->getLoopLatch()) {
Instruction *OrigInc =
cast<Instruction>(OrigPhiRef->getIncomingValueForBlock(LatchBlock));
Instruction *IsomorphicInc =
cast<Instruction>(Phi->getIncomingValueForBlock(LatchBlock));
// If this phi has the same width but is more canonical, replace the
// original with it. As part of the "more canonical" determination,
// respect a prior decision to use an IV chain.
if (OrigPhiRef->getType() == Phi->getType()
&& !(ChainedPhis.count(Phi)
|| isExpandedAddRecExprPHI(OrigPhiRef, OrigInc, L))
&& (ChainedPhis.count(Phi)
|| isExpandedAddRecExprPHI(Phi, IsomorphicInc, L))) {
std::swap(OrigPhiRef, Phi);
std::swap(OrigInc, IsomorphicInc);
}
// Replacing the congruent phi is sufficient because acyclic redundancy
// elimination, CSE/GVN, should handle the rest. However, once SCEV proves
// that a phi is congruent, it's often the head of an IV user cycle that
// is isomorphic with the original phi. It's worth eagerly cleaning up the
// common case of a single IV increment so that DeleteDeadPHIs can remove
// cycles that had postinc uses.
const SCEV *TruncExpr = SE.getTruncateOrNoop(SE.getSCEV(OrigInc),
IsomorphicInc->getType());
if (OrigInc != IsomorphicInc
&& TruncExpr == SE.getSCEV(IsomorphicInc)
&& ((isa<PHINode>(OrigInc) && isa<PHINode>(IsomorphicInc))
|| hoistIVInc(OrigInc, IsomorphicInc))) {
DEBUG_WITH_TYPE(DebugType, dbgs()
<< "INDVARS: Eliminated congruent iv.inc: "
<< *IsomorphicInc << '\n');
Value *NewInc = OrigInc;
if (OrigInc->getType() != IsomorphicInc->getType()) {
Instruction *IP = nullptr;
if (PHINode *PN = dyn_cast<PHINode>(OrigInc))
IP = PN->getParent()->getFirstInsertionPt();
else
IP = OrigInc->getNextNode();
IRBuilder<> Builder(IP);
Builder.SetCurrentDebugLocation(IsomorphicInc->getDebugLoc());
NewInc = Builder.
CreateTruncOrBitCast(OrigInc, IsomorphicInc->getType(), IVName);
}
IsomorphicInc->replaceAllUsesWith(NewInc);
DeadInsts.emplace_back(IsomorphicInc);
}
}
DEBUG_WITH_TYPE(DebugType, dbgs()
<< "INDVARS: Eliminated congruent iv: " << *Phi << '\n');
++NumElim;
Value *NewIV = OrigPhiRef;
if (OrigPhiRef->getType() != Phi->getType()) {
IRBuilder<> Builder(L->getHeader()->getFirstInsertionPt());
Builder.SetCurrentDebugLocation(Phi->getDebugLoc());
NewIV = Builder.CreateTruncOrBitCast(OrigPhiRef, Phi->getType(), IVName);
}
Phi->replaceAllUsesWith(NewIV);
DeadInsts.emplace_back(Phi);
}
return NumElim;
}
bool SCEVExpander::isHighCostExpansionHelper(
const SCEV *S, Loop *L, SmallPtrSetImpl<const SCEV *> &Processed) {
// Zero/One operand expressions
switch (S->getSCEVType()) {
case scUnknown:
case scConstant:
return false;
case scTruncate:
return isHighCostExpansionHelper(cast<SCEVTruncateExpr>(S)->getOperand(), L,
Processed);
case scZeroExtend:
return isHighCostExpansionHelper(cast<SCEVZeroExtendExpr>(S)->getOperand(),
L, Processed);
case scSignExtend:
return isHighCostExpansionHelper(cast<SCEVSignExtendExpr>(S)->getOperand(),
L, Processed);
}
if (!Processed.insert(S).second)
return false;
if (auto *UDivExpr = dyn_cast<SCEVUDivExpr>(S)) {
// If the divisor is a power of two and the SCEV type fits in a native
// integer, consider the divison cheap irrespective of whether it occurs in
// the user code since it can be lowered into a right shift.
if (auto *SC = dyn_cast<SCEVConstant>(UDivExpr->getRHS()))
if (SC->getValue()->getValue().isPowerOf2()) {
const DataLayout &DL =
L->getHeader()->getParent()->getParent()->getDataLayout();
unsigned Width = cast<IntegerType>(UDivExpr->getType())->getBitWidth();
return DL.isIllegalInteger(Width);
}
// UDivExpr is very likely a UDiv that ScalarEvolution's HowFarToZero or
// HowManyLessThans produced to compute a precise expression, rather than a
// UDiv from the user's code. If we can't find a UDiv in the code with some
// simple searching, assume the former consider UDivExpr expensive to
// compute.
BasicBlock *ExitingBB = L->getExitingBlock();
if (!ExitingBB)
return true;
BranchInst *ExitingBI = dyn_cast<BranchInst>(ExitingBB->getTerminator());
if (!ExitingBI || !ExitingBI->isConditional())
return true;
ICmpInst *OrigCond = dyn_cast<ICmpInst>(ExitingBI->getCondition());
if (!OrigCond)
return true;
const SCEV *RHS = SE.getSCEV(OrigCond->getOperand(1));
RHS = SE.getMinusSCEV(RHS, SE.getConstant(RHS->getType(), 1));
if (RHS != S) {
const SCEV *LHS = SE.getSCEV(OrigCond->getOperand(0));
LHS = SE.getMinusSCEV(LHS, SE.getConstant(LHS->getType(), 1));
if (LHS != S)
return true;
}
}
// HowManyLessThans uses a Max expression whenever the loop is not guarded by
// the exit condition.
if (isa<SCEVSMaxExpr>(S) || isa<SCEVUMaxExpr>(S))
return true;
// Recurse past nary expressions, which commonly occur in the
// BackedgeTakenCount. They may already exist in program code, and if not,
// they are not too expensive rematerialize.
if (const SCEVNAryExpr *NAry = dyn_cast<SCEVNAryExpr>(S)) {
for (SCEVNAryExpr::op_iterator I = NAry->op_begin(), E = NAry->op_end();
I != E; ++I) {
if (isHighCostExpansionHelper(*I, L, Processed))
return true;
}
}
// If we haven't recognized an expensive SCEV pattern, assume it's an
// expression produced by program code.
return false;
}
namespace {
// Search for a SCEV subexpression that is not safe to expand. Any expression
// that may expand to a !isSafeToSpeculativelyExecute value is unsafe, namely
// UDiv expressions. We don't know if the UDiv is derived from an IR divide
// instruction, but the important thing is that we prove the denominator is
// nonzero before expansion.
//
// IVUsers already checks that IV-derived expressions are safe. So this check is
// only needed when the expression includes some subexpression that is not IV
// derived.
//
// Currently, we only allow division by a nonzero constant here. If this is
// inadequate, we could easily allow division by SCEVUnknown by using
// ValueTracking to check isKnownNonZero().
//
// We cannot generally expand recurrences unless the step dominates the loop
// header. The expander handles the special case of affine recurrences by
// scaling the recurrence outside the loop, but this technique isn't generally
// applicable. Expanding a nested recurrence outside a loop requires computing
// binomial coefficients. This could be done, but the recurrence has to be in a
// perfectly reduced form, which can't be guaranteed.
struct SCEVFindUnsafe {
ScalarEvolution &SE;
bool IsUnsafe;
SCEVFindUnsafe(ScalarEvolution &se): SE(se), IsUnsafe(false) {}
bool follow(const SCEV *S) {
if (const SCEVUDivExpr *D = dyn_cast<SCEVUDivExpr>(S)) {
const SCEVConstant *SC = dyn_cast<SCEVConstant>(D->getRHS());
if (!SC || SC->getValue()->isZero()) {
IsUnsafe = true;
return false;
}
}
if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) {
const SCEV *Step = AR->getStepRecurrence(SE);
if (!AR->isAffine() && !SE.dominates(Step, AR->getLoop()->getHeader())) {
IsUnsafe = true;
return false;
}
}
return true;
}
bool isDone() const { return IsUnsafe; }
};
}
namespace llvm {
bool isSafeToExpand(const SCEV *S, ScalarEvolution &SE) {
SCEVFindUnsafe Search(SE);
visitAll(S, Search);
return !Search.IsUnsafe;
}
}
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/Analysis/ScalarEvolutionNormalization.cpp | //===- ScalarEvolutionNormalization.cpp - See below -------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements utilities for working with "normalized" expressions.
// See the comments at the top of ScalarEvolutionNormalization.h for details.
//
//===----------------------------------------------------------------------===//
#include "llvm/IR/Dominators.h"
#include "llvm/Analysis/LoopInfo.h"
#include "llvm/Analysis/ScalarEvolutionExpressions.h"
#include "llvm/Analysis/ScalarEvolutionNormalization.h"
using namespace llvm;
/// IVUseShouldUsePostIncValue - We have discovered a "User" of an IV expression
/// and now we need to decide whether the user should use the preinc or post-inc
/// value. If this user should use the post-inc version of the IV, return true.
///
/// Choosing wrong here can break dominance properties (if we choose to use the
/// post-inc value when we cannot) or it can end up adding extra live-ranges to
/// the loop, resulting in reg-reg copies (if we use the pre-inc value when we
/// should use the post-inc value).
static bool IVUseShouldUsePostIncValue(Instruction *User, Value *Operand,
const Loop *L, DominatorTree *DT) {
// If the user is in the loop, use the preinc value.
if (L->contains(User)) return false;
BasicBlock *LatchBlock = L->getLoopLatch();
if (!LatchBlock)
return false;
// Ok, the user is outside of the loop. If it is dominated by the latch
// block, use the post-inc value.
if (DT->dominates(LatchBlock, User->getParent()))
return true;
// There is one case we have to be careful of: PHI nodes. These little guys
// can live in blocks that are not dominated by the latch block, but (since
// their uses occur in the predecessor block, not the block the PHI lives in)
// should still use the post-inc value. Check for this case now.
PHINode *PN = dyn_cast<PHINode>(User);
if (!PN || !Operand) return false; // not a phi, not dominated by latch block.
// Look at all of the uses of Operand by the PHI node. If any use corresponds
// to a block that is not dominated by the latch block, give up and use the
// preincremented value.
for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
if (PN->getIncomingValue(i) == Operand &&
!DT->dominates(LatchBlock, PN->getIncomingBlock(i)))
return false;
// Okay, all uses of Operand by PN are in predecessor blocks that really are
// dominated by the latch block. Use the post-incremented value.
return true;
}
namespace {
/// Hold the state used during post-inc expression transformation, including a
/// map of transformed expressions.
class PostIncTransform {
TransformKind Kind;
PostIncLoopSet &Loops;
ScalarEvolution &SE;
DominatorTree &DT;
DenseMap<const SCEV*, const SCEV*> Transformed;
public:
PostIncTransform(TransformKind kind, PostIncLoopSet &loops,
ScalarEvolution &se, DominatorTree &dt):
Kind(kind), Loops(loops), SE(se), DT(dt) {}
const SCEV *TransformSubExpr(const SCEV *S, Instruction *User,
Value *OperandValToReplace);
protected:
const SCEV *TransformImpl(const SCEV *S, Instruction *User,
Value *OperandValToReplace);
};
} // namespace
/// Implement post-inc transformation for all valid expression types.
const SCEV *PostIncTransform::
TransformImpl(const SCEV *S, Instruction *User, Value *OperandValToReplace) {
if (const SCEVCastExpr *X = dyn_cast<SCEVCastExpr>(S)) {
const SCEV *O = X->getOperand();
const SCEV *N = TransformSubExpr(O, User, OperandValToReplace);
if (O != N)
switch (S->getSCEVType()) {
case scZeroExtend: return SE.getZeroExtendExpr(N, S->getType());
case scSignExtend: return SE.getSignExtendExpr(N, S->getType());
case scTruncate: return SE.getTruncateExpr(N, S->getType());
default: llvm_unreachable("Unexpected SCEVCastExpr kind!");
}
return S;
}
if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) {
// An addrec. This is the interesting part.
SmallVector<const SCEV *, 8> Operands;
const Loop *L = AR->getLoop();
// The addrec conceptually uses its operands at loop entry.
Instruction *LUser = L->getHeader()->begin();
// Transform each operand.
for (SCEVNAryExpr::op_iterator I = AR->op_begin(), E = AR->op_end();
I != E; ++I) {
Operands.push_back(TransformSubExpr(*I, LUser, nullptr));
}
// Conservatively use AnyWrap until/unless we need FlagNW.
const SCEV *Result = SE.getAddRecExpr(Operands, L, SCEV::FlagAnyWrap);
switch (Kind) {
case NormalizeAutodetect:
// Normalize this SCEV by subtracting the expression for the final step.
// We only allow affine AddRecs to be normalized, otherwise we would not
// be able to correctly denormalize.
// e.g. {1,+,3,+,2} == {-2,+,1,+,2} + {3,+,2}
// Normalized form: {-2,+,1,+,2}
// Denormalized form: {1,+,3,+,2}
//
// However, denormalization would use a different step expression than
// normalization (see getPostIncExpr), generating the wrong final
// expression: {-2,+,1,+,2} + {1,+,2} => {-1,+,3,+,2}
if (AR->isAffine() &&
IVUseShouldUsePostIncValue(User, OperandValToReplace, L, &DT)) {
const SCEV *TransformedStep =
TransformSubExpr(AR->getStepRecurrence(SE),
User, OperandValToReplace);
Result = SE.getMinusSCEV(Result, TransformedStep);
Loops.insert(L);
}
#if 0
// This assert is conceptually correct, but ScalarEvolution currently
// sometimes fails to canonicalize two equal SCEVs to exactly the same
// form. It's possibly a pessimization when this happens, but it isn't a
// correctness problem, so disable this assert for now.
assert(S == TransformSubExpr(Result, User, OperandValToReplace) &&
"SCEV normalization is not invertible!");
#endif
break;
case Normalize:
// We want to normalize step expression, because otherwise we might not be
// able to denormalize to the original expression.
//
// Here is an example what will happen if we don't normalize step:
// ORIGINAL ISE:
// {(100 /u {1,+,1}<%bb16>),+,(100 /u {1,+,1}<%bb16>)}<%bb25>
// NORMALIZED ISE:
// {((-1 * (100 /u {1,+,1}<%bb16>)) + (100 /u {0,+,1}<%bb16>)),+,
// (100 /u {0,+,1}<%bb16>)}<%bb25>
// DENORMALIZED BACK ISE:
// {((2 * (100 /u {1,+,1}<%bb16>)) + (-1 * (100 /u {2,+,1}<%bb16>))),+,
// (100 /u {1,+,1}<%bb16>)}<%bb25>
// Note that the initial value changes after normalization +
// denormalization, which isn't correct.
if (Loops.count(L)) {
const SCEV *TransformedStep =
TransformSubExpr(AR->getStepRecurrence(SE),
User, OperandValToReplace);
Result = SE.getMinusSCEV(Result, TransformedStep);
}
#if 0
// See the comment on the assert above.
assert(S == TransformSubExpr(Result, User, OperandValToReplace) &&
"SCEV normalization is not invertible!");
#endif
break;
case Denormalize:
// Here we want to normalize step expressions for the same reasons, as
// stated above.
if (Loops.count(L)) {
const SCEV *TransformedStep =
TransformSubExpr(AR->getStepRecurrence(SE),
User, OperandValToReplace);
Result = SE.getAddExpr(Result, TransformedStep);
}
break;
}
return Result;
}
if (const SCEVNAryExpr *X = dyn_cast<SCEVNAryExpr>(S)) {
SmallVector<const SCEV *, 8> Operands;
bool Changed = false;
// Transform each operand.
for (SCEVNAryExpr::op_iterator I = X->op_begin(), E = X->op_end();
I != E; ++I) {
const SCEV *O = *I;
const SCEV *N = TransformSubExpr(O, User, OperandValToReplace);
Changed |= N != O;
Operands.push_back(N);
}
// If any operand actually changed, return a transformed result.
if (Changed)
switch (S->getSCEVType()) {
case scAddExpr: return SE.getAddExpr(Operands);
case scMulExpr: return SE.getMulExpr(Operands);
case scSMaxExpr: return SE.getSMaxExpr(Operands);
case scUMaxExpr: return SE.getUMaxExpr(Operands);
default: llvm_unreachable("Unexpected SCEVNAryExpr kind!");
}
return S;
}
if (const SCEVUDivExpr *X = dyn_cast<SCEVUDivExpr>(S)) {
const SCEV *LO = X->getLHS();
const SCEV *RO = X->getRHS();
const SCEV *LN = TransformSubExpr(LO, User, OperandValToReplace);
const SCEV *RN = TransformSubExpr(RO, User, OperandValToReplace);
if (LO != LN || RO != RN)
return SE.getUDivExpr(LN, RN);
return S;
}
llvm_unreachable("Unexpected SCEV kind!");
}
/// Manage recursive transformation across an expression DAG. Revisiting
/// expressions would lead to exponential recursion.
const SCEV *PostIncTransform::
TransformSubExpr(const SCEV *S, Instruction *User, Value *OperandValToReplace) {
if (isa<SCEVConstant>(S) || isa<SCEVUnknown>(S))
return S;
const SCEV *Result = Transformed.lookup(S);
if (Result)
return Result;
Result = TransformImpl(S, User, OperandValToReplace);
Transformed[S] = Result;
return Result;
}
/// Top level driver for transforming an expression DAG into its requested
/// post-inc form (either "Normalized" or "Denormalized").
const SCEV *llvm::TransformForPostIncUse(TransformKind Kind,
const SCEV *S,
Instruction *User,
Value *OperandValToReplace,
PostIncLoopSet &Loops,
ScalarEvolution &SE,
DominatorTree &DT) {
PostIncTransform Transform(Kind, Loops, SE, DT);
return Transform.TransformSubExpr(S, User, OperandValToReplace);
}
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/Analysis/MemoryLocation.cpp | //===- MemoryLocation.cpp - Memory location descriptions -------------------==//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "llvm/Analysis/MemoryLocation.h"
#include "llvm/Analysis/TargetLibraryInfo.h"
#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/Type.h"
using namespace llvm;
MemoryLocation MemoryLocation::get(const LoadInst *LI) {
AAMDNodes AATags;
LI->getAAMetadata(AATags);
const auto &DL = LI->getModule()->getDataLayout();
return MemoryLocation(LI->getPointerOperand(),
DL.getTypeStoreSize(LI->getType()), AATags);
}
MemoryLocation MemoryLocation::get(const StoreInst *SI) {
AAMDNodes AATags;
SI->getAAMetadata(AATags);
const auto &DL = SI->getModule()->getDataLayout();
return MemoryLocation(SI->getPointerOperand(),
DL.getTypeStoreSize(SI->getValueOperand()->getType()),
AATags);
}
MemoryLocation MemoryLocation::get(const VAArgInst *VI) {
AAMDNodes AATags;
VI->getAAMetadata(AATags);
return MemoryLocation(VI->getPointerOperand(), UnknownSize, AATags);
}
MemoryLocation MemoryLocation::get(const AtomicCmpXchgInst *CXI) {
AAMDNodes AATags;
CXI->getAAMetadata(AATags);
const auto &DL = CXI->getModule()->getDataLayout();
return MemoryLocation(
CXI->getPointerOperand(),
DL.getTypeStoreSize(CXI->getCompareOperand()->getType()), AATags);
}
MemoryLocation MemoryLocation::get(const AtomicRMWInst *RMWI) {
AAMDNodes AATags;
RMWI->getAAMetadata(AATags);
const auto &DL = RMWI->getModule()->getDataLayout();
return MemoryLocation(RMWI->getPointerOperand(),
DL.getTypeStoreSize(RMWI->getValOperand()->getType()),
AATags);
}
MemoryLocation MemoryLocation::getForSource(const MemTransferInst *MTI) {
uint64_t Size = UnknownSize;
if (ConstantInt *C = dyn_cast<ConstantInt>(MTI->getLength()))
Size = C->getValue().getZExtValue();
// memcpy/memmove can have AA tags. For memcpy, they apply
// to both the source and the destination.
AAMDNodes AATags;
MTI->getAAMetadata(AATags);
return MemoryLocation(MTI->getRawSource(), Size, AATags);
}
MemoryLocation MemoryLocation::getForDest(const MemIntrinsic *MTI) {
uint64_t Size = UnknownSize;
if (ConstantInt *C = dyn_cast<ConstantInt>(MTI->getLength()))
Size = C->getValue().getZExtValue();
// memcpy/memmove can have AA tags. For memcpy, they apply
// to both the source and the destination.
AAMDNodes AATags;
MTI->getAAMetadata(AATags);
return MemoryLocation(MTI->getRawDest(), Size, AATags);
}
// FIXME: This code is duplicated with BasicAliasAnalysis and should be hoisted
// to some common utility location.
static bool isMemsetPattern16(const Function *MS,
const TargetLibraryInfo &TLI) {
if (TLI.has(LibFunc::memset_pattern16) &&
MS->getName() == "memset_pattern16") {
FunctionType *MemsetType = MS->getFunctionType();
if (!MemsetType->isVarArg() && MemsetType->getNumParams() == 3 &&
isa<PointerType>(MemsetType->getParamType(0)) &&
isa<PointerType>(MemsetType->getParamType(1)) &&
isa<IntegerType>(MemsetType->getParamType(2)))
return true;
}
return false;
}
MemoryLocation MemoryLocation::getForArgument(ImmutableCallSite CS,
unsigned ArgIdx,
const TargetLibraryInfo &TLI) {
AAMDNodes AATags;
CS->getAAMetadata(AATags);
const Value *Arg = CS.getArgument(ArgIdx);
// We may be able to produce an exact size for known intrinsics.
if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(CS.getInstruction())) {
const DataLayout &DL = II->getModule()->getDataLayout();
(void)DL; // HLSL Change - unreferenced local variable
switch (II->getIntrinsicID()) {
default:
break;
case Intrinsic::memset:
case Intrinsic::memcpy:
case Intrinsic::memmove:
assert((ArgIdx == 0 || ArgIdx == 1) &&
"Invalid argument index for memory intrinsic");
if (ConstantInt *LenCI = dyn_cast<ConstantInt>(II->getArgOperand(2)))
return MemoryLocation(Arg, LenCI->getZExtValue(), AATags);
break;
case Intrinsic::lifetime_start:
case Intrinsic::lifetime_end:
case Intrinsic::invariant_start:
assert(ArgIdx == 1 && "Invalid argument index");
return MemoryLocation(
Arg, cast<ConstantInt>(II->getArgOperand(0))->getZExtValue(), AATags);
case Intrinsic::invariant_end:
assert(ArgIdx == 2 && "Invalid argument index");
return MemoryLocation(
Arg, cast<ConstantInt>(II->getArgOperand(1))->getZExtValue(), AATags);
#if 0 // HLSL Change - remove platform intrinsics
case Intrinsic::arm_neon_vld1:
assert(ArgIdx == 0 && "Invalid argument index");
// LLVM's vld1 and vst1 intrinsics currently only support a single
// vector register.
return MemoryLocation(Arg, DL.getTypeStoreSize(II->getType()), AATags);
case Intrinsic::arm_neon_vst1:
assert(ArgIdx == 0 && "Invalid argument index");
return MemoryLocation(
Arg, DL.getTypeStoreSize(II->getArgOperand(1)->getType()), AATags);
#endif // HLSL Change - remove platform intrinsics
}
}
// We can bound the aliasing properties of memset_pattern16 just as we can
// for memcpy/memset. This is particularly important because the
// LoopIdiomRecognizer likes to turn loops into calls to memset_pattern16
// whenever possible.
if (CS.getCalledFunction() &&
isMemsetPattern16(CS.getCalledFunction(), TLI)) {
assert((ArgIdx == 0 || ArgIdx == 1) &&
"Invalid argument index for memset_pattern16");
if (ArgIdx == 1)
return MemoryLocation(Arg, 16, AATags);
if (const ConstantInt *LenCI = dyn_cast<ConstantInt>(CS.getArgument(2)))
return MemoryLocation(Arg, LenCI->getZExtValue(), AATags);
}
// FIXME: Handle memset_pattern4 and memset_pattern8 also.
return MemoryLocation(CS.getArgument(ArgIdx), UnknownSize, AATags);
}
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/Analysis/ModuleDebugInfoPrinter.cpp | //===-- ModuleDebugInfoPrinter.cpp - Prints module debug info metadata ----===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This pass decodes the debug info metadata in a module and prints in a
// (sufficiently-prepared-) human-readable form.
//
// For example, run this pass from opt along with the -analyze option, and
// it'll print to standard output.
//
//===----------------------------------------------------------------------===//
#include "llvm/Analysis/Passes.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/IR/DebugInfo.h"
#include "llvm/IR/Function.h"
#include "llvm/Pass.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
namespace {
class ModuleDebugInfoPrinter : public ModulePass {
DebugInfoFinder Finder;
public:
static char ID; // Pass identification, replacement for typeid
ModuleDebugInfoPrinter() : ModulePass(ID) {
initializeModuleDebugInfoPrinterPass(*PassRegistry::getPassRegistry());
}
bool runOnModule(Module &M) override;
void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.setPreservesAll();
}
void print(raw_ostream &O, const Module *M) const override;
};
}
char ModuleDebugInfoPrinter::ID = 0;
INITIALIZE_PASS(ModuleDebugInfoPrinter, "module-debuginfo",
"Decodes module-level debug info", false, true)
ModulePass *llvm::createModuleDebugInfoPrinterPass() {
return new ModuleDebugInfoPrinter();
}
bool ModuleDebugInfoPrinter::runOnModule(Module &M) {
Finder.processModule(M);
return false;
}
static void printFile(raw_ostream &O, StringRef Filename, StringRef Directory,
unsigned Line = 0) {
if (Filename.empty())
return;
O << " from ";
if (!Directory.empty())
O << Directory << "/";
O << Filename;
if (Line)
O << ":" << Line;
}
void ModuleDebugInfoPrinter::print(raw_ostream &O, const Module *M) const {
// Printing the nodes directly isn't particularly helpful (since they
// reference other nodes that won't be printed, particularly for the
// filenames), so just print a few useful things.
for (DICompileUnit *CU : Finder.compile_units()) {
O << "Compile unit: ";
if (const char *Lang = dwarf::LanguageString(CU->getSourceLanguage()))
O << Lang;
else
O << "unknown-language(" << CU->getSourceLanguage() << ")";
printFile(O, CU->getFilename(), CU->getDirectory());
O << '\n';
}
for (DISubprogram *S : Finder.subprograms()) {
O << "Subprogram: " << S->getName();
printFile(O, S->getFilename(), S->getDirectory(), S->getLine());
if (!S->getLinkageName().empty())
O << " ('" << S->getLinkageName() << "')";
O << '\n';
}
for (const DIGlobalVariable *GV : Finder.global_variables()) {
O << "Global variable: " << GV->getName();
printFile(O, GV->getFilename(), GV->getDirectory(), GV->getLine());
if (!GV->getLinkageName().empty())
O << " ('" << GV->getLinkageName() << "')";
O << '\n';
}
for (const DIType *T : Finder.types()) {
O << "Type:";
if (!T->getName().empty())
O << ' ' << T->getName();
printFile(O, T->getFilename(), T->getDirectory(), T->getLine());
if (auto *BT = dyn_cast<DIBasicType>(T)) {
O << " ";
if (const char *Encoding =
dwarf::AttributeEncodingString(BT->getEncoding()))
O << Encoding;
else
O << "unknown-encoding(" << BT->getEncoding() << ')';
} else {
O << ' ';
if (const char *Tag = dwarf::TagString(T->getTag()))
O << Tag;
else
O << "unknown-tag(" << T->getTag() << ")";
}
if (auto *CT = dyn_cast<DICompositeType>(T)) {
if (auto *S = CT->getRawIdentifier())
O << " (identifier: '" << S->getString() << "')";
}
O << '\n';
}
}
|
0 | repos/DirectXShaderCompiler/lib | repos/DirectXShaderCompiler/lib/Analysis/Analysis.cpp | //===-- Analysis.cpp ------------------------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "llvm-c/Analysis.h"
#include "llvm-c/Initialization.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/Verifier.h"
#include "llvm/InitializePasses.h"
#include "llvm/PassRegistry.h"
#include "llvm/Support/raw_ostream.h"
#include <cstring>
using namespace llvm;
// HLSL Change Begin - Windows doesn't support __attribute__((used)) so these methods
// need to be forcibly bound or they could be stripped at build time.
#if defined(_MSC_VER) && (!defined(NDEBUG) || defined(LLVM_ENABLE_DUMP))
#pragma optimize("", off)
void BindDumpMethods() {
// Pin LLVM dump methods.
void (__thiscall Module::*pfnModuleDump)() const = &Module::dump;
(void)pfnModuleDump;
void (__thiscall Type::*pfnTypeDump)() const = &Type::dump;
(void)pfnTypeDump;
void (__thiscall Function::*pfnViewCFGOnly)() const = &Function::viewCFGOnly;
(void)pfnViewCFGOnly;
}
#pragma optimize("", on)
#define HLSL_BIND_DUMP_METHODS BindDumpMethods();
#else
#define HLSL_BIND_DUMP_METHODS
#endif
// HLSL Change End
/// initializeAnalysis - Initialize all passes linked into the Analysis library.
void llvm::initializeAnalysis(PassRegistry &Registry) {
initializeAliasAnalysisAnalysisGroup(Registry);
initializeAliasAnalysisCounterPass(Registry);
initializeAAEvalPass(Registry);
initializeAliasDebuggerPass(Registry);
initializeAliasSetPrinterPass(Registry);
initializeNoAAPass(Registry);
initializeBasicAliasAnalysisPass(Registry);
initializeBlockFrequencyInfoPass(Registry);
initializeBranchProbabilityInfoPass(Registry);
initializeCostModelAnalysisPass(Registry);
initializeCFGViewerPass(Registry);
initializeCFGPrinterPass(Registry);
initializeCFGOnlyViewerPass(Registry);
initializeCFGOnlyPrinterPass(Registry);
initializeCFLAliasAnalysisPass(Registry);
initializeDependenceAnalysisPass(Registry);
initializeDelinearizationPass(Registry);
initializeDivergenceAnalysisPass(Registry);
initializeDominanceFrontierPass(Registry);
initializeDomViewerPass(Registry);
initializeDomPrinterPass(Registry);
initializeDomOnlyViewerPass(Registry);
initializePostDomViewerPass(Registry);
initializeDomOnlyPrinterPass(Registry);
initializePostDomPrinterPass(Registry);
initializePostDomOnlyViewerPass(Registry);
initializePostDomOnlyPrinterPass(Registry);
initializeIVUsersPass(Registry);
initializeInstCountPass(Registry);
initializeIntervalPartitionPass(Registry);
initializeLazyValueInfoPass(Registry);
initializeLibCallAliasAnalysisPass(Registry);
initializeLintPass(Registry);
initializeLoopInfoWrapperPassPass(Registry);
initializeMemDepPrinterPass(Registry);
initializeMemDerefPrinterPass(Registry);
initializeMemoryDependenceAnalysisPass(Registry);
initializeModuleDebugInfoPrinterPass(Registry);
initializePostDominatorTreePass(Registry);
initializeRegionInfoPassPass(Registry);
initializeRegionViewerPass(Registry);
initializeRegionPrinterPass(Registry);
initializeRegionOnlyViewerPass(Registry);
initializeRegionOnlyPrinterPass(Registry);
initializeScalarEvolutionPass(Registry);
initializeScalarEvolutionAliasAnalysisPass(Registry);
initializeTargetTransformInfoWrapperPassPass(Registry);
initializeTypeBasedAliasAnalysisPass(Registry);
initializeScopedNoAliasAAPass(Registry);
HLSL_BIND_DUMP_METHODS // HLSL Change - Force binding dump methods.
}
void LLVMInitializeAnalysis(LLVMPassRegistryRef R) {
initializeAnalysis(*unwrap(R));
}
LLVMBool LLVMVerifyModule(LLVMModuleRef M, LLVMVerifierFailureAction Action,
char **OutMessages) {
raw_ostream *DebugOS = Action != LLVMReturnStatusAction ? &errs() : nullptr;
std::string Messages;
raw_string_ostream MsgsOS(Messages);
LLVMBool Result = verifyModule(*unwrap(M), OutMessages ? &MsgsOS : DebugOS);
// Duplicate the output to stderr.
if (DebugOS && OutMessages)
*DebugOS << MsgsOS.str();
if (Action == LLVMAbortProcessAction && Result)
report_fatal_error("Broken module found, compilation aborted!");
if (OutMessages)
*OutMessages = _strdup(MsgsOS.str().c_str()); // HLSL Change for strdup
return Result;
}
LLVMBool LLVMVerifyFunction(LLVMValueRef Fn, LLVMVerifierFailureAction Action) {
LLVMBool Result = verifyFunction(
*unwrap<Function>(Fn), Action != LLVMReturnStatusAction ? &errs()
: nullptr);
if (Action == LLVMAbortProcessAction && Result)
report_fatal_error("Broken function found, compilation aborted!");
return Result;
}
void LLVMViewFunctionCFG(LLVMValueRef Fn) {
Function *F = unwrap<Function>(Fn);
F->viewCFG();
}
void LLVMViewFunctionCFGOnly(LLVMValueRef Fn) {
Function *F = unwrap<Function>(Fn);
F->viewCFGOnly();
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.