@@ -891,6 +891,63 @@ mod sealed {
891
891
impl_rot ! { verllvg fshlg u64 }
892
892
893
893
impl_vec_shift ! { [ VectorRl vec_rl] ( verllvb, verllvh, verllvf, verllvg) }
894
+
895
+ macro_rules! test_rot_imm {
896
+ ( $fun: ident $instr: ident $intr: ident $ty: ident) => {
897
+ #[ inline]
898
+ #[ target_feature( enable = "vector" ) ]
899
+ #[ cfg_attr( test, assert_instr( $instr) ) ]
900
+ unsafe fn $fun( a: t_t_l!( $ty) , bits: core:: ffi:: c_ulong) -> t_t_l!( $ty) {
901
+ // mod by the number of bits in a's element type to prevent UB
902
+ let bits = ( bits % $ty:: BITS as core:: ffi:: c_ulong) as $ty;
903
+ let a = transmute( a) ;
904
+ let b = <t_t_s!( $ty) >:: splat( bits) ;
905
+
906
+ transmute( $intr( a, a, transmute( b) ) )
907
+ }
908
+ } ;
909
+ }
910
+
911
+ test_rot_imm ! { verllvb_imm verllb fshlb u8 }
912
+ test_rot_imm ! { verllvh_imm verllh fshlh u16 }
913
+ test_rot_imm ! { verllvf_imm verllf fshlf u32 }
914
+ test_rot_imm ! { verllvg_imm verllg fshlg u64 }
915
+
916
+ #[ unstable( feature = "stdarch_s390x" , issue = "135681" ) ]
917
+ pub trait VectorRli {
918
+ unsafe fn vec_rli ( self , bits : core:: ffi:: c_ulong ) -> Self ;
919
+ }
920
+
921
+ macro_rules! impl_rot_imm {
922
+ ( $( $ty: ident, $intr: ident) ,* ) => {
923
+ $(
924
+ #[ unstable( feature = "stdarch_s390x" , issue = "135681" ) ]
925
+ impl VectorRli for $ty {
926
+ #[ inline]
927
+ #[ target_feature( enable = "vector" ) ]
928
+ unsafe fn vec_rli( self , bits: core:: ffi:: c_ulong) -> Self {
929
+ transmute( $intr( transmute( self ) , bits) )
930
+ }
931
+ }
932
+
933
+ #[ unstable( feature = "stdarch_s390x" , issue = "135681" ) ]
934
+ impl VectorRli for t_u!( $ty) {
935
+ #[ inline]
936
+ #[ target_feature( enable = "vector" ) ]
937
+ unsafe fn vec_rli( self , bits: core:: ffi:: c_ulong) -> Self {
938
+ $intr( self , bits)
939
+ }
940
+ }
941
+ ) *
942
+ }
943
+ }
944
+
945
+ impl_rot_imm ! {
946
+ vector_signed_char, verllvb_imm,
947
+ vector_signed_short, verllvh_imm,
948
+ vector_signed_int, verllvf_imm,
949
+ vector_signed_long_long, verllvg_imm
950
+ }
894
951
}
895
952
896
953
/// Vector element-wise addition.
@@ -1339,6 +1396,18 @@ where
1339
1396
a. vec_sral ( b)
1340
1397
}
1341
1398
1399
+ /// Rotates each element of a vector left by a given number of bits. Each element of the result is obtained by rotating the corresponding element
1400
+ /// of a left by the number of bits specified by b, modulo the number of bits in the element.
1401
+ #[ inline]
1402
+ #[ target_feature( enable = "vector" ) ]
1403
+ #[ unstable( feature = "stdarch_s390x" , issue = "135681" ) ]
1404
+ pub unsafe fn vec_rli < T > ( a : T , bits : core:: ffi:: c_ulong ) -> T
1405
+ where
1406
+ T : sealed:: VectorRli ,
1407
+ {
1408
+ a. vec_rli ( bits)
1409
+ }
1410
+
1342
1411
#[ cfg( test) ]
1343
1412
mod tests {
1344
1413
use super :: * ;
0 commit comments