@@ -86,6 +86,11 @@ unsafe extern "unadjusted" {
86
86
#[ link_name = "llvm.s390.vsra" ] fn vsra ( a : vector_signed_char , b : vector_signed_char ) -> vector_signed_char ;
87
87
#[ link_name = "llvm.s390.vsrl" ] fn vsrl ( a : vector_signed_char , b : vector_signed_char ) -> vector_signed_char ;
88
88
#[ link_name = "llvm.s390.vsl" ] fn vsl ( a : vector_signed_char , b : vector_signed_char ) -> vector_signed_char ;
89
+
90
+ #[ link_name = "llvm.fshl.v16i8" ] fn fshlb ( a : vector_unsigned_char , b : vector_unsigned_char , c : vector_unsigned_char ) -> vector_unsigned_char ;
91
+ #[ link_name = "llvm.fshl.v8i16" ] fn fshlh ( a : vector_unsigned_short , b : vector_unsigned_short , c : vector_unsigned_short ) -> vector_unsigned_short ;
92
+ #[ link_name = "llvm.fshl.v4i32" ] fn fshlf ( a : vector_unsigned_int , b : vector_unsigned_int , c : vector_unsigned_int ) -> vector_unsigned_int ;
93
+ #[ link_name = "llvm.fshl.v2i64" ] fn fshlg ( a : vector_unsigned_long_long , b : vector_unsigned_long_long , c : vector_unsigned_long_long ) -> vector_unsigned_long_long ;
89
94
}
90
95
91
96
impl_from ! { i8x16, u8x16, i16x8, u16x8, i32x4, u32x4, i64x2, u64x2, f32x4, f64x2 }
@@ -809,6 +814,30 @@ mod sealed {
809
814
}
810
815
811
816
impl_vec_shift_long ! { [ VectorSll vec_sll] ( vsl) }
817
+
818
+ #[ unstable( feature = "stdarch_s390x" , issue = "135681" ) ]
819
+ pub trait VectorRl < Other > {
820
+ type Result ;
821
+ unsafe fn vec_rl ( self , b : Other ) -> Self :: Result ;
822
+ }
823
+
824
+ macro_rules! impl_rot {
825
+ ( $fun: ident $intr: ident $ty: ident) => {
826
+ #[ inline]
827
+ #[ target_feature( enable = "vector" ) ]
828
+ #[ cfg_attr( test, assert_instr( $fun) ) ]
829
+ unsafe fn $fun( a: t_t_l!( $ty) , b: t_t_l!( $ty) ) -> t_t_l!( $ty) {
830
+ transmute( $intr( transmute( a) , transmute( a) , transmute( b) ) )
831
+ }
832
+ } ;
833
+ }
834
+
835
+ impl_rot ! { verllvb fshlb u8 }
836
+ impl_rot ! { verllvh fshlh u16 }
837
+ impl_rot ! { verllvf fshlf u32 }
838
+ impl_rot ! { verllvg fshlg u64 }
839
+
840
+ impl_vec_shift ! { [ VectorRl vec_rl] ( verllvb, verllvh, verllvf, verllvg) }
812
841
}
813
842
814
843
/// Vector element-wise addition.
@@ -1176,6 +1205,17 @@ where
1176
1205
a. vec_sra ( b)
1177
1206
}
1178
1207
1208
+ /// Vector Element Rotate Left
1209
+ #[ inline]
1210
+ #[ target_feature( enable = "vector" ) ]
1211
+ #[ unstable( feature = "stdarch_s390x" , issue = "135681" ) ]
1212
+ pub unsafe fn vec_rl < T , U > ( a : T , b : U ) -> <T as sealed:: VectorRl < U > >:: Result
1213
+ where
1214
+ T : sealed:: VectorRl < U > ,
1215
+ {
1216
+ a. vec_rl ( b)
1217
+ }
1218
+
1179
1219
/// Performs a left shift for a vector by a given number of bits. Each element of the result is obtained by shifting the corresponding
1180
1220
/// element of a left by the number of bits specified by the last 3 bits of every byte of b. The bits that are shifted out are replaced by zeros.
1181
1221
#[ inline]
@@ -1563,4 +1603,9 @@ mod tests {
1563
1603
[ -8 , -8 , -8 , -8 ] ,
1564
1604
[ 0 , 0 , 0 , 1 , 0 , 0 , 0 , 2 , 0 , 0 , 0 , 3 , 0 , 0 , 0 , 16 ] ,
1565
1605
[ -4 , -2 , -1 , -8 ] }
1606
+
1607
+ test_vec_2 ! { test_vec_rl, vec_rl, u32x4,
1608
+ [ 0x12345678 , 0x9ABCDEF0 , 0x0F0F0F0F , 0x12345678 ] ,
1609
+ [ 4 , 8 , 12 , 68 ] ,
1610
+ [ 0x23456781 , 0xBCDEF09A , 0xF0F0F0F0 , 0x23456781 ] }
1566
1611
}
0 commit comments