Skip to content

Commit 2be03ab

Browse files
author
iclsrc
committed
Merge from 'sycl' to 'sycl-web'
2 parents 7ca3373 + 2140536 commit 2be03ab

File tree

67 files changed

+9891
-395
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

67 files changed

+9891
-395
lines changed

clang/lib/CodeGen/BackendUtil.cpp

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -280,7 +280,8 @@ static bool asanUseGlobalsGC(const Triple &T, const CodeGenOptions &CGOpts) {
280280

281281
static TargetLibraryInfoImpl *createTLII(llvm::Triple &TargetTriple,
282282
const CodeGenOptions &CodeGenOpts) {
283-
TargetLibraryInfoImpl *TLII = new TargetLibraryInfoImpl(TargetTriple);
283+
TargetLibraryInfoImpl *TLII =
284+
llvm::driver::createTLII(TargetTriple, CodeGenOpts.getVecLib());
284285

285286
switch (CodeGenOpts.getAltMathLib()) {
286287
case CodeGenOptions::TestAltMathLibrary:
@@ -584,7 +585,7 @@ bool EmitAssemblyHelper::AddEmitPasses(legacy::PassManager &CodeGenPasses,
584585
raw_pwrite_stream *DwoOS) {
585586
// Add LibraryInfo.
586587
std::unique_ptr<TargetLibraryInfoImpl> TLII(
587-
llvm::driver::createTLII(TargetTriple, CodeGenOpts.getVecLib()));
588+
createTLII(TargetTriple, CodeGenOpts));
588589
CodeGenPasses.add(new TargetLibraryInfoWrapperPass(*TLII));
589590

590591
// Normal mode, emit a .s or .o file by running the code generator. Note,
@@ -920,7 +921,7 @@ void EmitAssemblyHelper::RunOptimizationPipeline(
920921
// Register the target library analysis directly and give it a customized
921922
// preset TLI.
922923
std::unique_ptr<TargetLibraryInfoImpl> TLII(
923-
llvm::driver::createTLII(TargetTriple, CodeGenOpts.getVecLib()));
924+
createTLII(TargetTriple, CodeGenOpts));
924925
FAM.registerPass([&] { return TargetLibraryAnalysis(*TLII); });
925926

926927
// Register all the basic analyses with the managers.

llvm-spirv/.github/workflows/check-code-style.yml

Lines changed: 2 additions & 43 deletions
Original file line numberDiff line numberDiff line change
@@ -83,14 +83,7 @@ jobs:
8383
id: run-clang-format
8484
run: |
8585
cat diff-to-inspect.txt | /usr/share/clang/clang-format-${{ env.LLVM_VERSION }}/clang-format-diff.py \
86-
-p1 -binary clang-format-${{ env.LLVM_VERSION }} > clang-format.patch
87-
if [ -s clang-format.patch ]; then
88-
echo "clang-format found incorrectly formatted code:"
89-
cat clang-format.patch;
90-
exit 1;
91-
else
92-
rm clang-format.patch # to avoid uploading empty file
93-
fi
86+
-p1 -binary clang-format-${{ env.LLVM_VERSION }}
9487
9588
- name: Run clang-tidy
9689
# By some reason, GitHub Actions automatically include "success()"
@@ -103,41 +96,7 @@ jobs:
10396
run: |
10497
cat diff-to-inspect.txt | /usr/lib/llvm-${{ env.LLVM_VERSION }}/share/clang/clang-tidy-diff.py \
10598
-p1 -clang-tidy-binary clang-tidy-${{ env.LLVM_VERSION }} -quiet \
106-
-path ${{ github.workspace}}/build > clang-tidy.log 2>/dev/null
107-
# By some reason, clang-tidy log contains tons of extra empty lines,
108-
# that confuse the check below
109-
sed -i '/^$/d' clang-tidy.log
110-
if [ -s clang-tidy.log ]; then
111-
if ! grep -q "No relevant changes found." clang-tidy.log; then
112-
# Emit annotations
113-
while read -r line; do
114-
type="error"
115-
if [[ $line == *"warning:"* ]]; then
116-
type="warning"
117-
elif [[ $line == *"error:"* ]]; then
118-
type="error"
119-
else
120-
continue
121-
fi
122-
123-
absolute_path=$(echo $line | grep -Po "^[\w\d-./]+(?=:)")
124-
relative_path=${absolute_path##"${{ github.workspace }}"}
125-
126-
line_number=$(echo $line | grep -Po "(?<=:)\d+(?=:\d)")
127-
128-
message=$(echo $line | grep -Po "(?<=${type}: ).*$")
129-
130-
# see [workflow-commands] for documentation
131-
echo "::${type} file=${relative_path},line=${line_number}::${message}"
132-
done < clang-tidy.log
133-
134-
echo "clang-tidy found incorrectly written code:"
135-
cat clang-tidy.log
136-
exit 1
137-
else
138-
rm clang-tidy.log # to avoid uploading empty file
139-
fi
140-
fi
99+
-path ${{ github.workspace}}/build
141100
142101
- name: Upload patch with clang-format fixes
143102
uses: actions/upload-artifact@v2

llvm-spirv/include/LLVMSPIRVExtensions.inc

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@
22
#define EXT(X)
33
#endif
44

5+
EXT(SPV_EXT_shader_atomic_float16_add)
56
EXT(SPV_EXT_shader_atomic_float_add)
67
EXT(SPV_EXT_shader_atomic_float_min_max)
78
EXT(SPV_EXT_image_raw10_raw12)

llvm-spirv/lib/SPIRV/LLVMToSPIRVDbgTran.cpp

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -359,6 +359,9 @@ SPIRVEntry *LLVMToSPIRVDbgTran::transDbgEntryImpl(const MDNode *MDN) {
359359
return transDbgLocalVariable(LV);
360360
if (const DIGlobalVariable *GV = dyn_cast<DIGlobalVariable>(DIEntry))
361361
return transDbgGlobalVariable(GV);
362+
if (const DIDerivedType *MT = dyn_cast<DIDerivedType>(DIEntry))
363+
if (MT->isStaticMember())
364+
return transDbgMemberType(MT);
362365
llvm_unreachable("Unxpected debug info type for variable");
363366
case dwarf::DW_TAG_formal_parameter:
364367
return transDbgLocalVariable(cast<DILocalVariable>(DIEntry));

llvm-spirv/lib/SPIRV/SPIRVToLLVMDbgTran.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -802,7 +802,7 @@ DINode *SPIRVToLLVMDbgTran::transTypeEnum(const SPIRVExtInst *DebugInst) {
802802
UnderlyingType = transDebugInst<DIType>(static_cast<SPIRVExtInst *>(E));
803803
return getDIBuilder(DebugInst).createEnumerationType(
804804
Scope, Name, File, LineNo, SizeInBits, AlignInBits, Enumerators,
805-
UnderlyingType, 0, "", UnderlyingType);
805+
UnderlyingType, 0, "", UnderlyingType);
806806
}
807807
}
808808

llvm-spirv/lib/SPIRV/SPIRVWriter.cpp

Lines changed: 116 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4077,6 +4077,122 @@ SPIRVValue *LLVMToSPIRVBase::transIntrinsicInst(IntrinsicInst *II,
40774077
transValue(II->getArgOperand(0), BB),
40784078
transValue(II->getArgOperand(1), BB), BB);
40794079
}
4080+
case Intrinsic::vector_reduce_add:
4081+
case Intrinsic::vector_reduce_mul:
4082+
case Intrinsic::vector_reduce_and:
4083+
case Intrinsic::vector_reduce_or:
4084+
case Intrinsic::vector_reduce_xor: {
4085+
Op Op;
4086+
if (IID == Intrinsic::vector_reduce_add) {
4087+
Op = OpIAdd;
4088+
} else if (IID == Intrinsic::vector_reduce_mul) {
4089+
Op = OpIMul;
4090+
} else if (IID == Intrinsic::vector_reduce_and) {
4091+
Op = OpBitwiseAnd;
4092+
} else if (IID == Intrinsic::vector_reduce_or) {
4093+
Op = OpBitwiseOr;
4094+
} else {
4095+
Op = OpBitwiseXor;
4096+
}
4097+
VectorType *VecTy = cast<VectorType>(II->getArgOperand(0)->getType());
4098+
SPIRVValue *VecSVal = transValue(II->getArgOperand(0), BB);
4099+
SPIRVTypeInt *ResultSType =
4100+
BM->addIntegerType(VecTy->getElementType()->getIntegerBitWidth());
4101+
SPIRVTypeInt *I32STy = BM->addIntegerType(32);
4102+
unsigned VecSize = VecTy->getElementCount().getFixedValue();
4103+
SmallVector<SPIRVValue *, 16> Extracts(VecSize);
4104+
for (unsigned Idx = 0; Idx < VecSize; ++Idx) {
4105+
Extracts[Idx] = BM->addVectorExtractDynamicInst(
4106+
VecSVal, BM->addIntegerConstant(I32STy, Idx), BB);
4107+
}
4108+
unsigned Counter = VecSize >> 1;
4109+
while (Counter != 0) {
4110+
for (unsigned Idx = 0; Idx < Counter; ++Idx) {
4111+
Extracts[Idx] = BM->addBinaryInst(Op, ResultSType, Extracts[Idx << 1],
4112+
Extracts[(Idx << 1) + 1], BB);
4113+
}
4114+
Counter >>= 1;
4115+
}
4116+
if ((VecSize & 1) != 0) {
4117+
Extracts[0] = BM->addBinaryInst(Op, ResultSType, Extracts[0],
4118+
Extracts[VecSize - 1], BB);
4119+
}
4120+
return Extracts[0];
4121+
}
4122+
case Intrinsic::vector_reduce_fadd:
4123+
case Intrinsic::vector_reduce_fmul: {
4124+
Op Op = IID == Intrinsic::vector_reduce_fadd ? OpFAdd : OpFMul;
4125+
VectorType *VecTy = cast<VectorType>(II->getArgOperand(1)->getType());
4126+
SPIRVValue *VecSVal = transValue(II->getArgOperand(1), BB);
4127+
SPIRVValue *StartingSVal = transValue(II->getArgOperand(0), BB);
4128+
SPIRVTypeInt *I32STy = BM->addIntegerType(32);
4129+
unsigned VecSize = VecTy->getElementCount().getFixedValue();
4130+
SmallVector<SPIRVValue *, 16> Extracts(VecSize);
4131+
for (unsigned Idx = 0; Idx < VecSize; ++Idx) {
4132+
Extracts[Idx] = BM->addVectorExtractDynamicInst(
4133+
VecSVal, BM->addIntegerConstant(I32STy, Idx), BB);
4134+
}
4135+
SPIRVValue *V = BM->addBinaryInst(Op, StartingSVal->getType(), StartingSVal,
4136+
Extracts[0], BB);
4137+
for (unsigned Idx = 1; Idx < VecSize; ++Idx) {
4138+
V = BM->addBinaryInst(Op, StartingSVal->getType(), V, Extracts[Idx], BB);
4139+
}
4140+
return V;
4141+
}
4142+
case Intrinsic::vector_reduce_smax:
4143+
case Intrinsic::vector_reduce_smin:
4144+
case Intrinsic::vector_reduce_umax:
4145+
case Intrinsic::vector_reduce_umin:
4146+
case Intrinsic::vector_reduce_fmax:
4147+
case Intrinsic::vector_reduce_fmin:
4148+
case Intrinsic::vector_reduce_fmaximum:
4149+
case Intrinsic::vector_reduce_fminimum: {
4150+
Op Op;
4151+
if (IID == Intrinsic::vector_reduce_smax) {
4152+
Op = OpSGreaterThan;
4153+
} else if (IID == Intrinsic::vector_reduce_smin) {
4154+
Op = OpSLessThan;
4155+
} else if (IID == Intrinsic::vector_reduce_umax) {
4156+
Op = OpUGreaterThan;
4157+
} else if (IID == Intrinsic::vector_reduce_umin) {
4158+
Op = OpULessThan;
4159+
} else if (IID == Intrinsic::vector_reduce_fmax) {
4160+
Op = OpFOrdGreaterThan;
4161+
} else if (IID == Intrinsic::vector_reduce_fmin) {
4162+
Op = OpFOrdLessThan;
4163+
} else if (IID == Intrinsic::vector_reduce_fmaximum) {
4164+
Op = OpFUnordGreaterThan;
4165+
} else {
4166+
Op = OpFUnordLessThan;
4167+
}
4168+
VectorType *VecTy = cast<VectorType>(II->getArgOperand(0)->getType());
4169+
SPIRVValue *VecSVal = transValue(II->getArgOperand(0), BB);
4170+
SPIRVType *BoolSTy = transType(Type::getInt1Ty(II->getContext()));
4171+
SPIRVTypeInt *I32STy = BM->addIntegerType(32);
4172+
unsigned VecSize = VecTy->getElementCount().getFixedValue();
4173+
SmallVector<SPIRVValue *, 16> Extracts(VecSize);
4174+
for (unsigned Idx = 0; Idx < VecSize; ++Idx) {
4175+
Extracts[Idx] = BM->addVectorExtractDynamicInst(
4176+
VecSVal, BM->addIntegerConstant(I32STy, Idx), BB);
4177+
}
4178+
unsigned Counter = VecSize >> 1;
4179+
while (Counter != 0) {
4180+
for (unsigned Idx = 0; Idx < Counter; ++Idx) {
4181+
SPIRVValue *Cond = BM->addBinaryInst(Op, BoolSTy, Extracts[Idx << 1],
4182+
Extracts[(Idx << 1) + 1], BB);
4183+
Extracts[Idx] = BM->addSelectInst(Cond, Extracts[Idx << 1],
4184+
Extracts[(Idx << 1) + 1], BB);
4185+
}
4186+
Counter >>= 1;
4187+
}
4188+
if ((VecSize & 1) != 0) {
4189+
SPIRVValue *Cond = BM->addBinaryInst(Op, BoolSTy, Extracts[0],
4190+
Extracts[VecSize - 1], BB);
4191+
Extracts[0] =
4192+
BM->addSelectInst(Cond, Extracts[0], Extracts[VecSize - 1], BB);
4193+
}
4194+
return Extracts[0];
4195+
}
40804196
case Intrinsic::memset: {
40814197
// Generally there is no direct mapping of memset to SPIR-V. But it turns
40824198
// out that memset is emitted by Clang for initialization in default

llvm-spirv/lib/SPIRV/libSPIRV/SPIRVEnum.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -215,6 +215,8 @@ template <> inline void SPIRVMap<SPIRVCapabilityKind, SPIRVCapVec>::init() {
215215
{internal::CapabilityJointMatrixINTEL});
216216
ADD_VEC_INIT(internal::CapabilityJointMatrixPackedInt4ComponentTypeINTEL,
217217
{internal::CapabilityJointMatrixINTEL});
218+
ADD_VEC_INIT(internal::CapabilityCooperativeMatrixPrefetchINTEL,
219+
{CapabilityCooperativeMatrixKHR});
218220
}
219221

220222
template <> inline void SPIRVMap<SPIRVExecutionModelKind, SPIRVCapVec>::init() {

llvm-spirv/lib/SPIRV/libSPIRV/SPIRVInstruction.h

Lines changed: 78 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -2746,16 +2746,22 @@ class SPIRVAtomicStoreInst : public SPIRVAtomicInstBase {
27462746
class SPIRVAtomicFAddEXTInst : public SPIRVAtomicInstBase {
27472747
public:
27482748
std::optional<ExtensionID> getRequiredExtension() const override {
2749+
assert(hasType());
2750+
if (getType()->isTypeFloat(16))
2751+
return ExtensionID::SPV_EXT_shader_atomic_float16_add;
27492752
return ExtensionID::SPV_EXT_shader_atomic_float_add;
27502753
}
27512754

27522755
SPIRVCapVec getRequiredCapability() const override {
27532756
assert(hasType());
2757+
if (getType()->isTypeFloat(16))
2758+
return {CapabilityAtomicFloat16AddEXT};
27542759
if (getType()->isTypeFloat(32))
27552760
return {CapabilityAtomicFloat32AddEXT};
2756-
assert(getType()->isTypeFloat(64) &&
2757-
"AtomicFAddEXT can only be generated for f32 or f64 types");
2758-
return {CapabilityAtomicFloat64AddEXT};
2761+
if (getType()->isTypeFloat(64))
2762+
return {CapabilityAtomicFloat64AddEXT};
2763+
llvm_unreachable(
2764+
"AtomicFAddEXT can only be generated for f16, f32, f64 types");
27592765
}
27602766
};
27612767

@@ -3288,10 +3294,17 @@ template <Op OC>
32883294
class SPIRVBfloat16ConversionINTELInstBase : public SPIRVUnaryInst<OC> {
32893295
protected:
32903296
SPIRVCapVec getRequiredCapability() const override {
3297+
SPIRVType *ResCompTy = this->getType();
3298+
if (ResCompTy->isTypeCooperativeMatrixKHR())
3299+
return getVec(internal::CapabilityBfloat16ConversionINTEL,
3300+
internal::CapabilityJointMatrixBF16ComponentTypeINTEL);
32913301
return getVec(internal::CapabilityBfloat16ConversionINTEL);
32923302
}
32933303

32943304
std::optional<ExtensionID> getRequiredExtension() const override {
3305+
SPIRVType *ResCompTy = this->getType();
3306+
if (ResCompTy->isTypeCooperativeMatrixKHR())
3307+
this->getModule()->addExtension(ExtensionID::SPV_INTEL_joint_matrix);
32953308
return ExtensionID::SPV_INTEL_bfloat16_conversion;
32963309
}
32973310

@@ -3320,8 +3333,25 @@ class SPIRVBfloat16ConversionINTELInstBase : public SPIRVUnaryInst<OC> {
33203333
}
33213334

33223335
auto InstName = OpCodeNameMap::map(OC);
3323-
SPIRVErrorLog &SPVErrLog = this->getModule()->getErrorLog();
3336+
auto *Module = this->getModule();
3337+
SPIRVErrorLog &SPVErrLog = Module->getErrorLog();
33243338

3339+
// Cooperative matrix type is allowed as input/output of the instruction
3340+
// if SPV_INTEL_joint_matrix is enabled
3341+
if (ResCompTy->isTypeCooperativeMatrixKHR()) {
3342+
SPVErrLog.checkError(
3343+
Module->isAllowedToUseExtension(ExtensionID::SPV_INTEL_joint_matrix),
3344+
SPIRVEC_InvalidInstruction,
3345+
InstName + "\nCan be used with "
3346+
"cooperative matrices only when SPV_INTEL_joint_matrix is "
3347+
"enabled\n");
3348+
assert(InCompTy->isTypeCooperativeMatrixKHR() &&
3349+
"Input must also be a cooperative matrix");
3350+
ResCompTy = static_cast<SPIRVTypeCooperativeMatrixKHR *>(ResCompTy)
3351+
->getCompType();
3352+
InCompTy =
3353+
static_cast<SPIRVTypeCooperativeMatrixKHR *>(InCompTy)->getCompType();
3354+
}
33253355
if (OC == internal::OpConvertFToBF16INTEL) {
33263356
SPVErrLog.checkError(
33273357
ResCompTy->isTypeInt(16), SPIRVEC_InvalidInstruction,
@@ -3396,6 +3426,24 @@ class SPIRVJointMatrixINTELWorkItemInst : public SPIRVJointMatrixINTELInstBase {
33963426
_SPIRV_OP(JointMatrixGetElementCoord, true, 5)
33973427
#undef _SPIRV_OP
33983428

3429+
class SPIRVCooperativeMatrixPrefetchINTELInstBase
3430+
: public SPIRVInstTemplateBase {
3431+
protected:
3432+
std::optional<ExtensionID> getRequiredExtension() const override {
3433+
return ExtensionID::SPV_INTEL_joint_matrix;
3434+
}
3435+
SPIRVCapVec getRequiredCapability() const override {
3436+
return getVec(internal::CapabilityCooperativeMatrixPrefetchINTEL);
3437+
}
3438+
};
3439+
3440+
#define _SPIRV_OP(x, ...) \
3441+
typedef SPIRVInstTemplate<SPIRVCooperativeMatrixPrefetchINTELInstBase, \
3442+
internal::Op##x##INTEL, __VA_ARGS__> \
3443+
SPIRV##x##INTEL;
3444+
_SPIRV_OP(CooperativeMatrixPrefetch, false, 8, true, 5)
3445+
#undef _SPIRV_OP
3446+
33993447
class SPIRVCooperativeMatrixKHRInstBase : public SPIRVInstTemplateBase {
34003448
protected:
34013449
std::optional<ExtensionID> getRequiredExtension() const override {
@@ -3655,10 +3703,17 @@ template <Op OC>
36553703
class SPIRVTensorFloat32RoundingINTELInstBase : public SPIRVUnaryInst<OC> {
36563704
protected:
36573705
SPIRVCapVec getRequiredCapability() const override {
3706+
SPIRVType *ResCompTy = this->getType();
3707+
if (ResCompTy->isTypeCooperativeMatrixKHR())
3708+
return getVec(internal::CapabilityTensorFloat32RoundingINTEL,
3709+
internal::CapabilityJointMatrixTF32ComponentTypeINTEL);
36583710
return getVec(internal::CapabilityTensorFloat32RoundingINTEL);
36593711
}
36603712

36613713
std::optional<ExtensionID> getRequiredExtension() const override {
3714+
SPIRVType *ResCompTy = this->getType();
3715+
if (ResCompTy->isTypeCooperativeMatrixKHR())
3716+
this->getModule()->addExtension(ExtensionID::SPV_INTEL_joint_matrix);
36623717
return ExtensionID::SPV_INTEL_tensor_float32_conversion;
36633718
}
36643719

@@ -3687,7 +3742,25 @@ class SPIRVTensorFloat32RoundingINTELInstBase : public SPIRVUnaryInst<OC> {
36873742
}
36883743

36893744
auto InstName = OpCodeNameMap::map(OC);
3690-
SPIRVErrorLog &SPVErrLog = this->getModule()->getErrorLog();
3745+
auto *Module = this->getModule();
3746+
SPIRVErrorLog &SPVErrLog = Module->getErrorLog();
3747+
3748+
// Cooperative matrix type is allowed as input/output of the instruction
3749+
// if SPV_INTEL_joint_matrix is enabled
3750+
if (ResCompTy->isTypeCooperativeMatrixKHR()) {
3751+
SPVErrLog.checkError(
3752+
Module->isAllowedToUseExtension(ExtensionID::SPV_INTEL_joint_matrix),
3753+
SPIRVEC_InvalidInstruction,
3754+
InstName + "\nCan be used with "
3755+
"cooperative matrices only when SPV_INTEL_joint_matrix is "
3756+
"enabled\n");
3757+
assert(InCompTy->isTypeCooperativeMatrixKHR() &&
3758+
"Input must also be a cooperative matrix");
3759+
ResCompTy = static_cast<SPIRVTypeCooperativeMatrixKHR *>(ResCompTy)
3760+
->getCompType();
3761+
InCompTy =
3762+
static_cast<SPIRVTypeCooperativeMatrixKHR *>(InCompTy)->getCompType();
3763+
}
36913764

36923765
SPVErrLog.checkError(
36933766
ResCompTy->isTypeFloat(32), SPIRVEC_InvalidInstruction,

0 commit comments

Comments
 (0)