@@ -95,6 +95,11 @@ unsafe extern "unadjusted" {
95
95
#[ link_name = "llvm.fshl.v8i16" ] fn fshlh ( a : vector_unsigned_short , b : vector_unsigned_short , c : vector_unsigned_short ) -> vector_unsigned_short ;
96
96
#[ link_name = "llvm.fshl.v4i32" ] fn fshlf ( a : vector_unsigned_int , b : vector_unsigned_int , c : vector_unsigned_int ) -> vector_unsigned_int ;
97
97
#[ link_name = "llvm.fshl.v2i64" ] fn fshlg ( a : vector_unsigned_long_long , b : vector_unsigned_long_long , c : vector_unsigned_long_long ) -> vector_unsigned_long_long ;
98
+
99
+ #[ link_name = "llvm.s390.verimb" ] fn verimb ( a : vector_signed_char , b : vector_signed_char , c : vector_signed_char , d : i32 ) -> vector_signed_char ;
100
+ #[ link_name = "llvm.s390.verimh" ] fn verimh ( a : vector_signed_short , b : vector_signed_short , c : vector_signed_short , d : i32 ) -> vector_signed_short ;
101
+ #[ link_name = "llvm.s390.verimf" ] fn verimf ( a : vector_signed_int , b : vector_signed_int , c : vector_signed_int , d : i32 ) -> vector_signed_int ;
102
+ #[ link_name = "llvm.s390.verimg" ] fn verimg ( a : vector_signed_long_long , b : vector_signed_long_long , c : vector_signed_long_long , d : i32 ) -> vector_signed_long_long ;
98
103
}
99
104
100
105
impl_from ! { i8x16, u8x16, i16x8, u16x8, i32x4, u32x4, i64x2, u64x2, f32x4, f64x2 }
@@ -948,6 +953,50 @@ mod sealed {
948
953
vector_signed_int, verllvf_imm,
949
954
vector_signed_long_long, verllvg_imm
950
955
}
956
+
957
+ #[ unstable( feature = "stdarch_s390x" , issue = "135681" ) ]
958
+ pub trait VectorRlMask < Other > {
959
+ unsafe fn vec_rl_mask < const IMM8 : u8 > ( self , other : Other ) -> Self ;
960
+ }
961
+
962
+ macro_rules! impl_rl_mask {
963
+ ( $( $ty: ident, $intr: ident, $fun: ident) ,* ) => {
964
+ $(
965
+ #[ inline]
966
+ #[ target_feature( enable = "vector" ) ]
967
+ #[ cfg_attr( test, assert_instr( $intr, IMM8 = 6 ) ) ]
968
+ unsafe fn $fun<const IMM8 : u8 >( a: $ty, b: t_u!( $ty) ) -> $ty {
969
+ // mod by the number of bits in a's element type to prevent UB
970
+ $intr( a, a, transmute( b) , const { ( IMM8 % <l_t_t!( $ty) >:: BITS as u8 ) as i32 } )
971
+ }
972
+
973
+ #[ unstable( feature = "stdarch_s390x" , issue = "135681" ) ]
974
+ impl VectorRlMask <t_u!( $ty) > for $ty {
975
+ #[ inline]
976
+ #[ target_feature( enable = "vector" ) ]
977
+ unsafe fn vec_rl_mask<const IMM8 : u8 >( self , other: t_u!( $ty) ) -> Self {
978
+ $fun:: <IMM8 >( self , other)
979
+ }
980
+ }
981
+
982
+ #[ unstable( feature = "stdarch_s390x" , issue = "135681" ) ]
983
+ impl VectorRlMask <t_u!( $ty) > for t_u!( $ty) {
984
+ #[ inline]
985
+ #[ target_feature( enable = "vector" ) ]
986
+ unsafe fn vec_rl_mask<const IMM8 : u8 >( self , other: t_u!( $ty) ) -> Self {
987
+ transmute( $fun:: <IMM8 >( transmute( self ) , transmute( other) ) )
988
+ }
989
+ }
990
+ ) *
991
+ }
992
+ }
993
+
994
+ impl_rl_mask ! {
995
+ vector_signed_char, verimb, test_verimb,
996
+ vector_signed_short, verimh, test_verimh,
997
+ vector_signed_int, verimf, test_verimf,
998
+ vector_signed_long_long, verimg, test_verimg
999
+ }
951
1000
}
952
1001
953
1002
/// Vector element-wise addition.
0 commit comments