Skip to content

Commit 2b47103

Browse files
Firestar99eddyb
authored andcommitted
update rspirv 0.11 to 0.12, bulk adjustments
1 parent 8b753aa commit 2b47103

20 files changed

+145
-148
lines changed

Cargo.lock

Lines changed: 5 additions & 31 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

crates/rustc_codegen_spirv-types/Cargo.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,6 @@ license.workspace = true
88
repository.workspace = true
99

1010
[dependencies]
11-
rspirv = "0.11"
11+
rspirv = "0.12"
1212
serde = { version = "1.0", features = ["derive"] }
1313
serde_json = "1.0"

crates/rustc_codegen_spirv/Cargo.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -50,7 +50,7 @@ regex = { version = "1", features = ["perf"] }
5050
ar = "0.9.0"
5151
either = "1.8.0"
5252
indexmap = "1.6.0"
53-
rspirv = "0.11"
53+
rspirv = "0.12"
5454
rustc_codegen_spirv-types.workspace = true
5555
rustc-demangle = "0.1.21"
5656
sanitize-filename = "0.4"

crates/rustc_codegen_spirv/src/abi.rs

Lines changed: 26 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@
44
use crate::attr::{AggregatedSpirvAttributes, IntrinsicType};
55
use crate::codegen_cx::CodegenCx;
66
use crate::spirv_type::SpirvType;
7-
use rspirv::spirv::{StorageClass, Word};
7+
use rspirv::spirv::{Dim, ImageFormat, StorageClass, Word};
88
use rustc_data_structures::fx::FxHashMap;
99
use rustc_errors::ErrorGuaranteed;
1010
use rustc_index::Idx;
@@ -653,7 +653,7 @@ fn trans_aggregate<'tcx>(cx: &CodegenCx<'tcx>, span: Span, ty: TyAndLayout<'tcx>
653653
// spir-v doesn't support zero-sized arrays
654654
create_zst(cx, span, ty)
655655
} else {
656-
let count_const = cx.constant_u32(span, count as u32);
656+
let count_const = cx.constant_bit32(span, count as u32);
657657
let element_spv = cx.lookup_type(element_type);
658658
let stride_spv = element_spv
659659
.sizeof(cx)
@@ -862,13 +862,35 @@ fn trans_intrinsic_type<'tcx>(
862862
// let image_format: spirv::ImageFormat =
863863
// type_from_variant_discriminant(cx, args.const_at(6));
864864

865-
fn const_int_value<'tcx, P: FromPrimitive>(
865+
trait FromU128Const: Sized {
866+
fn from_u128_const(n: u128) -> Option<Self>;
867+
}
868+
869+
impl FromU128Const for u32 {
870+
fn from_u128_const(n: u128) -> Option<Self> {
871+
u32::from_u128(n)
872+
}
873+
}
874+
875+
impl FromU128Const for Dim {
876+
fn from_u128_const(n: u128) -> Option<Self> {
877+
Dim::from_u32(u32::from_u128(n)?)
878+
}
879+
}
880+
881+
impl FromU128Const for ImageFormat {
882+
fn from_u128_const(n: u128) -> Option<Self> {
883+
ImageFormat::from_u32(u32::from_u128(n)?)
884+
}
885+
}
886+
887+
fn const_int_value<'tcx, P: FromU128Const>(
866888
cx: &CodegenCx<'tcx>,
867889
const_: Const<'tcx>,
868890
) -> Result<P, ErrorGuaranteed> {
869891
assert!(const_.ty().is_integral());
870892
let value = const_.eval_bits(cx.tcx, ParamEnv::reveal_all());
871-
match P::from_u128(value) {
893+
match P::from_u128_const(value) {
872894
Some(v) => Ok(v),
873895
None => Err(cx
874896
.tcx

crates/rustc_codegen_spirv/src/builder/builder_methods.rs

Lines changed: 19 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -175,7 +175,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
175175
| MemorySemantics::SEQUENTIALLY_CONSISTENT
176176
}
177177
};
178-
let semantics = self.constant_u32(self.span(), semantics.bits());
178+
let semantics = self.constant_bit32(self.span(), semantics.bits());
179179
if invalid_seq_cst {
180180
self.zombie(
181181
semantics.def(self),
@@ -196,10 +196,10 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
196196
.constant_u16(self.span(), memset_fill_u16(fill_byte))
197197
.def(self),
198198
32 => self
199-
.constant_u32(self.span(), memset_fill_u32(fill_byte))
199+
.constant_bit32(self.span(), memset_fill_u32(fill_byte))
200200
.def(self),
201201
64 => self
202-
.constant_u64(self.span(), memset_fill_u64(fill_byte))
202+
.constant_bit64(self.span(), memset_fill_u64(fill_byte))
203203
.def(self),
204204
_ => self.fatal(format!(
205205
"memset on integer width {width} not implemented yet"
@@ -314,7 +314,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
314314
self.store(pat, ptr, Align::from_bytes(0).unwrap());
315315
} else {
316316
for index in 0..count {
317-
let const_index = self.constant_u32(self.span(), index as u32);
317+
let const_index = self.constant_bit32(self.span(), index as u32);
318318
let gep_ptr = self.gep(pat.ty, ptr, &[const_index]);
319319
self.store(pat, gep_ptr, Align::from_bytes(0).unwrap());
320320
}
@@ -431,7 +431,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
431431
} else {
432432
let indices = indices
433433
.into_iter()
434-
.map(|idx| self.constant_u32(self.span(), idx).def(self))
434+
.map(|idx| self.constant_bit32(self.span(), idx).def(self))
435435
.collect::<Vec<_>>();
436436
self.emit()
437437
.in_bounds_access_chain(leaf_ptr_ty, None, ptr.def(self), indices)
@@ -614,7 +614,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
614614
};
615615
let indices = base_indices
616616
.into_iter()
617-
.map(|idx| self.constant_u32(self.span(), idx).def(self))
617+
.map(|idx| self.constant_bit32(self.span(), idx).def(self))
618618
.chain(indices)
619619
.collect();
620620
return self.emit_access_chain(
@@ -1106,9 +1106,9 @@ impl<'a, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'tcx> {
11061106
))
11071107
} else if signed {
11081108
// this cast chain can probably be collapsed, but, whatever, be safe
1109-
Operand::LiteralInt32(v as u8 as i8 as i32 as u32)
1109+
Operand::LiteralBit32(v as u8 as i8 as i32 as u32)
11101110
} else {
1111-
Operand::LiteralInt32(v as u8 as u32)
1111+
Operand::LiteralBit32(v as u8 as u32)
11121112
}
11131113
}
11141114
fn construct_16(self_: &Builder<'_, '_>, signed: bool, v: u128) -> Operand {
@@ -1117,9 +1117,9 @@ impl<'a, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'tcx> {
11171117
"Switches to values above u16::MAX not supported: {v:?}"
11181118
))
11191119
} else if signed {
1120-
Operand::LiteralInt32(v as u16 as i16 as i32 as u32)
1120+
Operand::LiteralBit32(v as u16 as i16 as i32 as u32)
11211121
} else {
1122-
Operand::LiteralInt32(v as u16 as u32)
1122+
Operand::LiteralBit32(v as u16 as u32)
11231123
}
11241124
}
11251125
fn construct_32(self_: &Builder<'_, '_>, _signed: bool, v: u128) -> Operand {
@@ -1128,7 +1128,7 @@ impl<'a, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'tcx> {
11281128
"Switches to values above u32::MAX not supported: {v:?}"
11291129
))
11301130
} else {
1131-
Operand::LiteralInt32(v as u32)
1131+
Operand::LiteralBit32(v as u32)
11321132
}
11331133
}
11341134
fn construct_64(self_: &Builder<'_, '_>, _signed: bool, v: u128) -> Operand {
@@ -1137,7 +1137,7 @@ impl<'a, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'tcx> {
11371137
"Switches to values above u64::MAX not supported: {v:?}"
11381138
))
11391139
} else {
1140-
Operand::LiteralInt64(v as u64)
1140+
Operand::LiteralBit64(v as u64)
11411141
}
11421142
}
11431143
// pass in signed into the closure to be able to unify closure types
@@ -1478,7 +1478,7 @@ impl<'a, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'tcx> {
14781478
let (ptr, access_ty) = self.adjust_pointer_for_typed_access(ptr, ty);
14791479

14801480
// TODO: Default to device scope
1481-
let memory = self.constant_u32(self.span(), Scope::Device as u32);
1481+
let memory = self.constant_bit32(self.span(), Scope::Device as u32);
14821482
let semantics = self.ordering_to_semantics_def(order);
14831483
let result = self
14841484
.emit()
@@ -1611,7 +1611,7 @@ impl<'a, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'tcx> {
16111611
let val = self.bitcast(val, access_ty);
16121612

16131613
// TODO: Default to device scope
1614-
let memory = self.constant_u32(self.span(), Scope::Device as u32);
1614+
let memory = self.constant_bit32(self.span(), Scope::Device as u32);
16151615
let semantics = self.ordering_to_semantics_def(order);
16161616
self.validate_atomic(val.ty, ptr.def(self));
16171617
self.emit()
@@ -1944,7 +1944,7 @@ impl<'a, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'tcx> {
19441944
) {
19451945
let indices = indices
19461946
.into_iter()
1947-
.map(|idx| self.constant_u32(self.span(), idx).def(self))
1947+
.map(|idx| self.constant_bit32(self.span(), idx).def(self))
19481948
.collect::<Vec<_>>();
19491949
self.emit()
19501950
.in_bounds_access_chain(dest_ty, None, ptr.def(self), indices)
@@ -2495,7 +2495,7 @@ impl<'a, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'tcx> {
24952495

24962496
self.validate_atomic(access_ty, dst.def(self));
24972497
// TODO: Default to device scope
2498-
let memory = self.constant_u32(self.span(), Scope::Device as u32);
2498+
let memory = self.constant_bit32(self.span(), Scope::Device as u32);
24992499
let semantics_equal = self.ordering_to_semantics_def(order);
25002500
let semantics_unequal = self.ordering_to_semantics_def(failure_order);
25012501
// Note: OpAtomicCompareExchangeWeak is deprecated, and has the same semantics
@@ -2535,7 +2535,7 @@ impl<'a, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'tcx> {
25352535
self.validate_atomic(access_ty, dst.def(self));
25362536
// TODO: Default to device scope
25372537
let memory = self
2538-
.constant_u32(self.span(), Scope::Device as u32)
2538+
.constant_bit32(self.span(), Scope::Device as u32)
25392539
.def(self);
25402540
let semantics = self.ordering_to_semantics_def(order).def(self);
25412541
use AtomicRmwBinOp::*;
@@ -2631,7 +2631,7 @@ impl<'a, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'tcx> {
26312631
// Ignore sync scope (it only has "single thread" and "cross thread")
26322632
// TODO: Default to device scope
26332633
let memory = self
2634-
.constant_u32(self.span(), Scope::Device as u32)
2634+
.constant_bit32(self.span(), Scope::Device as u32)
26352635
.def(self);
26362636
let semantics = self.ordering_to_semantics_def(order).def(self);
26372637
self.emit().memory_barrier(memory, semantics).unwrap();
@@ -2915,7 +2915,7 @@ impl<'a, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'tcx> {
29152915

29162916
// HACK(eddyb) avoid the logic below that assumes only ID operands
29172917
if inst.class.opcode == Op::CompositeExtract {
2918-
if let (Some(r), &[Operand::IdRef(x), Operand::LiteralInt32(i)]) =
2918+
if let (Some(r), &[Operand::IdRef(x), Operand::LiteralBit32(i)]) =
29192919
(inst.result_id, &inst.operands[..])
29202920
{
29212921
return Some(Inst::CompositeExtract(r, x, i));

crates/rustc_codegen_spirv/src/builder/byte_addressable_buffer.rs

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
3131
constant_offset: u32,
3232
) -> SpirvValue {
3333
let actual_index = if constant_offset != 0 {
34-
let const_offset_val = self.constant_u32(DUMMY_SP, constant_offset);
34+
let const_offset_val = self.constant_bit32(DUMMY_SP, constant_offset);
3535
self.add(dynamic_index, const_offset_val)
3636
} else {
3737
dynamic_index
@@ -199,7 +199,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
199199
// Note that the &[u32] gets split into two arguments - pointer, length
200200
let array = args[0];
201201
let byte_index = args[2];
202-
let two = self.constant_u32(DUMMY_SP, 2);
202+
let two = self.constant_bit32(DUMMY_SP, 2);
203203
let word_index = self.lshr(byte_index, two);
204204
self.recurse_load_type(result_type, result_type, array, word_index, 0)
205205
}
@@ -223,7 +223,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
223223
value: SpirvValue,
224224
) -> Result<(), ErrorGuaranteed> {
225225
let actual_index = if constant_offset != 0 {
226-
let const_offset_val = self.constant_u32(DUMMY_SP, constant_offset);
226+
let const_offset_val = self.constant_bit32(DUMMY_SP, constant_offset);
227227
self.add(dynamic_index, const_offset_val)
228228
} else {
229229
dynamic_index
@@ -367,7 +367,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
367367
// Note that the &[u32] gets split into two arguments - pointer, length
368368
let array = args[0];
369369
let byte_index = args[2];
370-
let two = self.constant_u32(DUMMY_SP, 2);
370+
let two = self.constant_bit32(DUMMY_SP, 2);
371371
let word_index = self.lshr(byte_index, two);
372372
if is_pair {
373373
let value_one = args[3];

crates/rustc_codegen_spirv/src/builder/intrinsics.rs

Lines changed: 18 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -45,12 +45,12 @@ impl Builder<'_, '_> {
4545
let int_ty = SpirvType::Integer(width, false).def(self.span(), self);
4646
let (mask_sign, mask_value) = match width {
4747
32 => (
48-
self.constant_u32(self.span(), 1 << 31),
49-
self.constant_u32(self.span(), u32::MAX >> 1),
48+
self.constant_bit32(self.span(), 1 << 31),
49+
self.constant_bit32(self.span(), u32::MAX >> 1),
5050
),
5151
64 => (
52-
self.constant_u64(self.span(), 1 << 63),
53-
self.constant_u64(self.span(), u64::MAX >> 1),
52+
self.constant_bit64(self.span(), 1 << 63),
53+
self.constant_bit64(self.span(), u64::MAX >> 1),
5454
),
5555
_ => bug!("copysign must have width 32 or 64, not {}", width),
5656
};
@@ -272,10 +272,10 @@ impl<'a, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'a, 'tcx> {
272272
self.or(tmp1, tmp2)
273273
}
274274
32 => {
275-
let offset8 = self.constant_u32(self.span(), 8);
276-
let offset24 = self.constant_u32(self.span(), 24);
277-
let mask16 = self.constant_u32(self.span(), 0xFF00);
278-
let mask24 = self.constant_u32(self.span(), 0xFF0000);
275+
let offset8 = self.constant_bit32(self.span(), 8);
276+
let offset24 = self.constant_bit32(self.span(), 24);
277+
let mask16 = self.constant_bit32(self.span(), 0xFF00);
278+
let mask24 = self.constant_bit32(self.span(), 0xFF0000);
279279
let tmp4 = self.shl(arg, offset24);
280280
let tmp3 = self.shl(arg, offset8);
281281
let tmp2 = self.lshr(arg, offset8);
@@ -287,16 +287,16 @@ impl<'a, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'a, 'tcx> {
287287
self.or(res1, res2)
288288
}
289289
64 => {
290-
let offset8 = self.constant_u64(self.span(), 8);
291-
let offset24 = self.constant_u64(self.span(), 24);
292-
let offset40 = self.constant_u64(self.span(), 40);
293-
let offset56 = self.constant_u64(self.span(), 56);
294-
let mask16 = self.constant_u64(self.span(), 0xff00);
295-
let mask24 = self.constant_u64(self.span(), 0xff0000);
296-
let mask32 = self.constant_u64(self.span(), 0xff000000);
297-
let mask40 = self.constant_u64(self.span(), 0xff00000000);
298-
let mask48 = self.constant_u64(self.span(), 0xff0000000000);
299-
let mask56 = self.constant_u64(self.span(), 0xff000000000000);
290+
let offset8 = self.constant_bit64(self.span(), 8);
291+
let offset24 = self.constant_bit64(self.span(), 24);
292+
let offset40 = self.constant_bit64(self.span(), 40);
293+
let offset56 = self.constant_bit64(self.span(), 56);
294+
let mask16 = self.constant_bit64(self.span(), 0xff00);
295+
let mask24 = self.constant_bit64(self.span(), 0xff0000);
296+
let mask32 = self.constant_bit64(self.span(), 0xff000000);
297+
let mask40 = self.constant_bit64(self.span(), 0xff00000000);
298+
let mask48 = self.constant_bit64(self.span(), 0xff0000000000);
299+
let mask56 = self.constant_bit64(self.span(), 0xff000000000000);
300300
let tmp8 = self.shl(arg, offset56);
301301
let tmp7 = self.shl(arg, offset40);
302302
let tmp6 = self.shl(arg, offset24);

0 commit comments

Comments
 (0)