Skip to content

Commit 5292554

Browse files
committed
1 parent da58c05 commit 5292554

File tree

6 files changed

+101
-190
lines changed

6 files changed

+101
-190
lines changed

compiler/rustc_codegen_ssa/src/mir/operand.rs

Lines changed: 8 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -346,6 +346,13 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> {
346346

347347
let val = if field.is_zst() {
348348
OperandValue::ZeroSized
349+
} else if let BackendRepr::SimdVector { .. } = self.layout.backend_repr {
350+
// codegen_transmute_operand doesn't support SIMD, but since the previous
351+
// check handled ZSTs, the only possible field access into something SIMD
352+
// is to the `non_1zst_field` that's the same SIMD. (Other things, even
353+
// just padding, would change the wrapper's representation type.)
354+
assert_eq!(field.size, self.layout.size);
355+
self.val
349356
} else if field.size == self.layout.size {
350357
assert_eq!(offset.bytes(), 0);
351358
fx.codegen_transmute_operand(bx, *self, field).unwrap_or_else(|| {
@@ -606,10 +613,8 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, Result<V, abi::Scalar>> {
606613
};
607614

608615
let mut update = |tgt: &mut Result<V, abi::Scalar>, src, from_scalar| {
609-
let from_bty = bx.cx().type_from_scalar(from_scalar);
610616
let to_scalar = tgt.unwrap_err();
611-
let to_bty = bx.cx().type_from_scalar(to_scalar);
612-
let imm = transmute_immediate(bx, src, from_scalar, from_bty, to_scalar, to_bty);
617+
let imm = transmute_immediate(bx, src, from_scalar, to_scalar);
613618
*tgt = Ok(imm);
614619
};
615620

compiler/rustc_codegen_ssa/src/mir/rvalue.rs

Lines changed: 73 additions & 145 deletions
Original file line numberDiff line numberDiff line change
@@ -1,18 +1,16 @@
1-
use std::assert_matches::assert_matches;
2-
31
use rustc_abi::{self as abi, FIRST_VARIANT};
42
use rustc_middle::ty::adjustment::PointerCoercion;
53
use rustc_middle::ty::layout::{HasTyCtxt, HasTypingEnv, LayoutOf, TyAndLayout};
64
use rustc_middle::ty::{self, Instance, Ty, TyCtxt};
7-
use rustc_middle::{bug, mir, span_bug};
5+
use rustc_middle::{bug, mir};
86
use rustc_session::config::OptLevel;
97
use rustc_span::{DUMMY_SP, Span};
108
use tracing::{debug, instrument};
119

1210
use super::operand::{OperandRef, OperandValue};
1311
use super::place::PlaceRef;
1412
use super::{FunctionCx, LocalRef};
15-
use crate::common::IntPredicate;
13+
use crate::common::{IntPredicate, TypeKind};
1614
use crate::traits::*;
1715
use crate::{MemFlags, base};
1816

@@ -200,31 +198,25 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
200198
assert!(src.layout.is_sized());
201199
assert!(dst.layout.is_sized());
202200

203-
if let Some(val) = self.codegen_transmute_operand(bx, src, dst.layout) {
204-
val.store(bx, dst);
205-
return;
206-
}
207-
208-
match src.val {
209-
OperandValue::Ref(..) | OperandValue::ZeroSized => {
210-
span_bug!(
211-
self.mir.span,
212-
"Operand path should have handled transmute \
213-
from {src:?} to place {dst:?}"
214-
);
215-
}
216-
OperandValue::Immediate(..) | OperandValue::Pair(..) => {
217-
// When we have immediate(s), the alignment of the source is irrelevant,
218-
// so we can store them using the destination's alignment.
219-
src.val.store(bx, dst.val.with_type(src.layout));
220-
}
201+
if src.layout.size == dst.layout.size {
202+
// Since in this path we have a place anyway, we can store or copy to it,
203+
// making sure we use the destination place's alignment even if the
204+
// source would normally have a higher one.
205+
src.val.store(bx, dst.val.with_type(src.layout));
206+
} else if src.layout.is_uninhabited() {
207+
bx.unreachable()
208+
} else {
209+
// Since this is known statically and the input could have existed
210+
// without already having hit UB, might as well trap for it, even
211+
// though it's UB so we *could* also unreachable it.
212+
bx.abort();
221213
}
222214
}
223215

224216
/// Attempts to transmute an `OperandValue` to another `OperandValue`.
225217
///
226218
/// Returns `None` for cases that can't work in that framework, such as for
227-
/// `Immediate`->`Ref` that needs an `alloc` to get the location.
219+
/// `Immediate`->`Ref` that needs an `alloca` to get the location.
228220
pub(crate) fn codegen_transmute_operand(
229221
&mut self,
230222
bx: &mut Bx,
@@ -247,69 +239,34 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
247239
return Some(OperandValue::poison(bx, cast));
248240
}
249241

250-
let operand_kind = self.value_kind(operand.layout);
251-
let cast_kind = self.value_kind(cast);
252-
253-
match operand.val {
254-
OperandValue::Ref(source_place_val) => {
242+
Some(match (operand.val, operand.layout.backend_repr, cast.backend_repr) {
243+
_ if cast.is_zst() => OperandValue::ZeroSized,
244+
(OperandValue::ZeroSized, _, _) => bug!(),
245+
(
246+
OperandValue::Ref(source_place_val),
247+
abi::BackendRepr::Memory { .. },
248+
abi::BackendRepr::Scalar(_) | abi::BackendRepr::ScalarPair(_, _),
249+
) => {
255250
assert_eq!(source_place_val.llextra, None);
256-
assert_matches!(operand_kind, OperandValueKind::Ref);
257251
// The existing alignment is part of `source_place_val`,
258252
// so that alignment will be used, not `cast`'s.
259-
Some(bx.load_operand(source_place_val.with_type(cast)).val)
253+
bx.load_operand(source_place_val.with_type(cast)).val
260254
}
261-
OperandValue::ZeroSized => {
262-
let OperandValueKind::ZeroSized = operand_kind else {
263-
bug!("Found {operand_kind:?} for operand {operand:?}");
264-
};
265-
if let OperandValueKind::ZeroSized = cast_kind {
266-
Some(OperandValue::ZeroSized)
267-
} else {
268-
None
269-
}
270-
}
271-
OperandValue::Immediate(imm) => {
272-
let OperandValueKind::Immediate(from_scalar) = operand_kind else {
273-
bug!("Found {operand_kind:?} for operand {operand:?}");
274-
};
275-
if let OperandValueKind::Immediate(to_scalar) = cast_kind
276-
&& from_scalar.size(self.cx) == to_scalar.size(self.cx)
277-
{
278-
let from_backend_ty = bx.backend_type(operand.layout);
279-
let to_backend_ty = bx.backend_type(cast);
280-
Some(OperandValue::Immediate(transmute_immediate(
281-
bx,
282-
imm,
283-
from_scalar,
284-
from_backend_ty,
285-
to_scalar,
286-
to_backend_ty,
287-
)))
288-
} else {
289-
None
290-
}
291-
}
292-
OperandValue::Pair(imm_a, imm_b) => {
293-
let OperandValueKind::Pair(in_a, in_b) = operand_kind else {
294-
bug!("Found {operand_kind:?} for operand {operand:?}");
295-
};
296-
if let OperandValueKind::Pair(out_a, out_b) = cast_kind
297-
&& in_a.size(self.cx) == out_a.size(self.cx)
298-
&& in_b.size(self.cx) == out_b.size(self.cx)
299-
{
300-
let in_a_ibty = bx.scalar_pair_element_backend_type(operand.layout, 0, false);
301-
let in_b_ibty = bx.scalar_pair_element_backend_type(operand.layout, 1, false);
302-
let out_a_ibty = bx.scalar_pair_element_backend_type(cast, 0, false);
303-
let out_b_ibty = bx.scalar_pair_element_backend_type(cast, 1, false);
304-
Some(OperandValue::Pair(
305-
transmute_immediate(bx, imm_a, in_a, in_a_ibty, out_a, out_a_ibty),
306-
transmute_immediate(bx, imm_b, in_b, in_b_ibty, out_b, out_b_ibty),
307-
))
308-
} else {
309-
None
310-
}
311-
}
312-
}
255+
(
256+
OperandValue::Immediate(imm),
257+
abi::BackendRepr::Scalar(from_scalar),
258+
abi::BackendRepr::Scalar(to_scalar),
259+
) => OperandValue::Immediate(transmute_immediate(bx, imm, from_scalar, to_scalar)),
260+
(
261+
OperandValue::Pair(imm_a, imm_b),
262+
abi::BackendRepr::ScalarPair(in_a, in_b),
263+
abi::BackendRepr::ScalarPair(out_a, out_b),
264+
) => OperandValue::Pair(
265+
transmute_immediate(bx, imm_a, in_a, out_a),
266+
transmute_immediate(bx, imm_b, in_b, out_b),
267+
),
268+
_ => return None,
269+
})
313270
}
314271

315272
/// Cast one of the immediates from an [`OperandValue::Immediate`]
@@ -479,9 +436,8 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
479436
// path as the other integer-to-X casts.
480437
| mir::CastKind::PointerWithExposedProvenance => {
481438
let imm = operand.immediate();
482-
let operand_kind = self.value_kind(operand.layout);
483-
let OperandValueKind::Immediate(from_scalar) = operand_kind else {
484-
bug!("Found {operand_kind:?} for operand {operand:?}");
439+
let abi::BackendRepr::Scalar(from_scalar) = operand.layout.backend_repr else {
440+
bug!("Found non-scalar for operand {operand:?}");
485441
};
486442
let from_backend_ty = bx.cx().immediate_backend_type(operand.layout);
487443

@@ -491,9 +447,8 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
491447
let val = OperandValue::Immediate(bx.cx().const_poison(to_backend_ty));
492448
return OperandRef { val, layout: cast };
493449
}
494-
let cast_kind = self.value_kind(cast);
495-
let OperandValueKind::Immediate(to_scalar) = cast_kind else {
496-
bug!("Found {cast_kind:?} for operand {cast:?}");
450+
let abi::BackendRepr::Scalar(to_scalar) = cast.layout.backend_repr else {
451+
bug!("Found non-scalar for cast {cast:?}");
497452
};
498453

499454
self.cast_immediate(bx, imm, from_scalar, from_backend_ty, to_scalar, to_backend_ty)
@@ -993,31 +948,29 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
993948
let operand_ty = operand.ty(self.mir, self.cx.tcx());
994949
let cast_layout = self.cx.layout_of(self.monomorphize(cast_ty));
995950
let operand_layout = self.cx.layout_of(self.monomorphize(operand_ty));
996-
997-
match (self.value_kind(operand_layout), self.value_kind(cast_layout)) {
998-
// Can always load from a pointer as needed
999-
(OperandValueKind::Ref, _) => true,
1000-
1001-
// ZST-to-ZST is the easiest thing ever
1002-
(OperandValueKind::ZeroSized, OperandValueKind::ZeroSized) => true,
1003-
1004-
// But if only one of them is a ZST the sizes can't match
1005-
(OperandValueKind::ZeroSized, _) | (_, OperandValueKind::ZeroSized) => false,
1006-
1007-
// Need to generate an `alloc` to get a pointer from an immediate
1008-
(OperandValueKind::Immediate(..) | OperandValueKind::Pair(..), OperandValueKind::Ref) => false,
951+
match (operand_layout.backend_repr, cast_layout.backend_repr) {
952+
// If the input is in a place we can load immediates from there.
953+
(abi::BackendRepr::Memory { .. }, abi::BackendRepr::Scalar(_) | abi::BackendRepr::ScalarPair(_, _)) => true,
1009954

1010955
// When we have scalar immediates, we can only convert things
1011956
// where the sizes match, to avoid endianness questions.
1012-
(OperandValueKind::Immediate(a), OperandValueKind::Immediate(b)) =>
957+
(abi::BackendRepr::Scalar(a), abi::BackendRepr::Scalar(b)) =>
1013958
a.size(self.cx) == b.size(self.cx),
1014-
(OperandValueKind::Pair(a0, a1), OperandValueKind::Pair(b0, b1)) =>
959+
(abi::BackendRepr::ScalarPair(a0, a1), abi::BackendRepr::ScalarPair(b0, b1)) =>
1015960
a0.size(self.cx) == b0.size(self.cx) && a1.size(self.cx) == b1.size(self.cx),
1016961

1017-
// Send mixings between scalars and pairs through the memory route
1018-
// FIXME: Maybe this could use insertvalue/extractvalue instead?
1019-
(OperandValueKind::Immediate(..), OperandValueKind::Pair(..)) |
1020-
(OperandValueKind::Pair(..), OperandValueKind::Immediate(..)) => false,
962+
// SIMD vectors don't work like normal immediates,
963+
// so always send them through memory.
964+
(abi::BackendRepr::SimdVector { .. }, _) | (_, abi::BackendRepr::SimdVector { .. }) => false,
965+
966+
// When the output will be in memory anyway, just use its place
967+
// (instead of the operand path) unless it's the trivial ZST case.
968+
(_, abi::BackendRepr::Memory { .. }) => cast_layout.is_zst(),
969+
970+
// Mixing Scalars and ScalarPairs can get quite complicated when
971+
// padding and undef get involved, so leave that to the memory path.
972+
(abi::BackendRepr::Scalar(_), abi::BackendRepr::ScalarPair(_, _)) |
973+
(abi::BackendRepr::ScalarPair(_, _), abi::BackendRepr::Scalar(_)) => false,
1021974
}
1022975
}
1023976
mir::Rvalue::Ref(..) |
@@ -1062,41 +1015,6 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
10621015

10631016
// (*) this is only true if the type is suitable
10641017
}
1065-
1066-
/// Gets which variant of [`OperandValue`] is expected for a particular type.
1067-
fn value_kind(&self, layout: TyAndLayout<'tcx>) -> OperandValueKind {
1068-
if layout.is_zst() {
1069-
OperandValueKind::ZeroSized
1070-
} else if self.cx.is_backend_immediate(layout) {
1071-
assert!(!self.cx.is_backend_scalar_pair(layout));
1072-
OperandValueKind::Immediate(match layout.backend_repr {
1073-
abi::BackendRepr::Scalar(s) => s,
1074-
abi::BackendRepr::SimdVector { element, .. } => element,
1075-
x => span_bug!(self.mir.span, "Couldn't translate {x:?} as backend immediate"),
1076-
})
1077-
} else if self.cx.is_backend_scalar_pair(layout) {
1078-
let abi::BackendRepr::ScalarPair(s1, s2) = layout.backend_repr else {
1079-
span_bug!(
1080-
self.mir.span,
1081-
"Couldn't translate {:?} as backend scalar pair",
1082-
layout.backend_repr,
1083-
);
1084-
};
1085-
OperandValueKind::Pair(s1, s2)
1086-
} else {
1087-
OperandValueKind::Ref
1088-
}
1089-
}
1090-
}
1091-
1092-
/// The variants of this match [`OperandValue`], giving details about the
1093-
/// backend values that will be held in that other type.
1094-
#[derive(Debug, Copy, Clone)]
1095-
enum OperandValueKind {
1096-
Ref,
1097-
Immediate(abi::Scalar),
1098-
Pair(abi::Scalar, abi::Scalar),
1099-
ZeroSized,
11001018
}
11011019

11021020
/// Transmutes one of the immediates from an [`OperandValue::Immediate`]
@@ -1108,22 +1026,30 @@ pub(super) fn transmute_immediate<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
11081026
bx: &mut Bx,
11091027
mut imm: Bx::Value,
11101028
from_scalar: abi::Scalar,
1111-
from_backend_ty: Bx::Type,
11121029
to_scalar: abi::Scalar,
1113-
to_backend_ty: Bx::Type,
11141030
) -> Bx::Value {
11151031
assert_eq!(from_scalar.size(bx.cx()), to_scalar.size(bx.cx()));
1032+
let imm_ty = bx.cx().val_ty(imm);
1033+
assert_ne!(
1034+
bx.cx().type_kind(imm_ty),
1035+
TypeKind::Vector,
1036+
"Vector type {imm_ty:?} not allowed in transmute_immediate {from_scalar:?} -> {to_scalar:?}"
1037+
);
11161038

11171039
// While optimizations will remove no-op transmutes, they might still be
11181040
// there in debug or things that aren't no-op in MIR because they change
11191041
// the Rust type but not the underlying layout/niche.
1120-
if from_scalar == to_scalar && from_backend_ty == to_backend_ty {
1042+
if from_scalar == to_scalar {
11211043
return imm;
11221044
}
11231045

11241046
use abi::Primitive::*;
11251047
imm = bx.from_immediate(imm);
11261048

1049+
let from_backend_ty = bx.cx().type_from_scalar(from_scalar);
1050+
debug_assert_eq!(bx.cx().val_ty(imm), from_backend_ty);
1051+
let to_backend_ty = bx.cx().type_from_scalar(to_scalar);
1052+
11271053
// If we have a scalar, we must already know its range. Either
11281054
//
11291055
// 1) It's a parameter with `range` parameter metadata,
@@ -1154,6 +1080,8 @@ pub(super) fn transmute_immediate<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
11541080
}
11551081
};
11561082

1083+
debug_assert_eq!(bx.cx().val_ty(imm), to_backend_ty);
1084+
11571085
// This `assume` remains important for cases like (a conceptual)
11581086
// transmute::<u32, NonZeroU32>(x) == 0
11591087
// since it's never passed to something with parameter metadata (especially

tests/codegen/intrinsics/transmute-x64.rs

Lines changed: 7 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -9,17 +9,20 @@ use std::mem::transmute;
99
// CHECK-LABEL: @check_sse_pair_to_avx(
1010
#[no_mangle]
1111
pub unsafe fn check_sse_pair_to_avx(x: (__m128i, __m128i)) -> __m256i {
12+
// CHECK: start:
1213
// CHECK-NOT: alloca
13-
// CHECK: %0 = load <4 x i64>, ptr %x, align 16
14-
// CHECK: store <4 x i64> %0, ptr %_0, align 32
14+
// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 32 %_0, ptr align 16 %x, i64 32, i1 false)
15+
// CHECK-NEXT: ret void
1516
transmute(x)
1617
}
1718

1819
// CHECK-LABEL: @check_sse_pair_from_avx(
1920
#[no_mangle]
2021
pub unsafe fn check_sse_pair_from_avx(x: __m256i) -> (__m128i, __m128i) {
22+
// CHECK: start:
2123
// CHECK-NOT: alloca
22-
// CHECK: %0 = load <4 x i64>, ptr %x, align 32
23-
// CHECK: store <4 x i64> %0, ptr %_0, align 16
24+
// CHECK-NEXT: %[[TEMP:.+]] = load <4 x i64>, ptr %x, align 32
25+
// CHECK-NEXT: store <4 x i64> %[[TEMP]], ptr %_0, align 16
26+
// CHECK-NEXT: ret void
2427
transmute(x)
2528
}

tests/codegen/simd-intrinsic/simd-intrinsic-transmute-array.rs

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -40,8 +40,7 @@ pub fn build_array_s(x: [f32; 4]) -> S<4> {
4040
// CHECK-LABEL: @build_array_transmute_s
4141
#[no_mangle]
4242
pub fn build_array_transmute_s(x: [f32; 4]) -> S<4> {
43-
// CHECK: %[[VAL:.+]] = load <4 x float>, ptr %x, align [[ARRAY_ALIGN]]
44-
// CHECK: store <4 x float> %[[VAL:.+]], ptr %_0, align [[VECTOR_ALIGN]]
43+
// CHECK: call void @llvm.memcpy.{{.+}}({{.*}} align [[VECTOR_ALIGN]] {{.*}} align [[ARRAY_ALIGN]] {{.*}}, [[USIZE]] 16, i1 false)
4544
unsafe { std::mem::transmute(x) }
4645
}
4746

@@ -55,7 +54,6 @@ pub fn build_array_t(x: [f32; 4]) -> T {
5554
// CHECK-LABEL: @build_array_transmute_t
5655
#[no_mangle]
5756
pub fn build_array_transmute_t(x: [f32; 4]) -> T {
58-
// CHECK: %[[VAL:.+]] = load <4 x float>, ptr %x, align [[ARRAY_ALIGN]]
59-
// CHECK: store <4 x float> %[[VAL:.+]], ptr %_0, align [[VECTOR_ALIGN]]
57+
// CHECK: call void @llvm.memcpy.{{.+}}({{.*}} align [[VECTOR_ALIGN]] {{.*}} align [[ARRAY_ALIGN]] {{.*}}, [[USIZE]] 16, i1 false)
6058
unsafe { std::mem::transmute(x) }
6159
}

0 commit comments

Comments
 (0)