@@ -82,6 +82,10 @@ unsafe extern "unadjusted" {
82
82
83
83
#[ link_name = "llvm.roundeven.v4f32" ] fn roundeven_v4f32 ( a : vector_float ) -> vector_float ;
84
84
#[ link_name = "llvm.roundeven.v2f64" ] fn roundeven_v2f64 ( a : vector_double ) -> vector_double ;
85
+
86
+ #[ link_name = "llvm.s390.vsra" ] fn vsra ( a : vector_signed_char , b : vector_signed_char ) -> vector_signed_char ;
87
+ #[ link_name = "llvm.s390.vsrl" ] fn vsrl ( a : vector_signed_char , b : vector_signed_char ) -> vector_signed_char ;
88
+ #[ link_name = "llvm.s390.vsl" ] fn vsl ( a : vector_signed_char , b : vector_signed_char ) -> vector_signed_char ;
85
89
}
86
90
87
91
impl_from ! { i8x16, u8x16, i16x8, u16x8, i32x4, u32x4, i64x2, u64x2, f32x4, f64x2 }
@@ -701,6 +705,43 @@ mod sealed {
701
705
702
706
impl_vec_trait ! { [ VectorFloor vec_floor] simd_floor ( vector_float) }
703
707
impl_vec_trait ! { [ VectorFloor vec_floor] simd_floor ( vector_double) }
708
+
709
+ macro_rules! impl_vec_shift_long {
710
+ ( [ $trait: ident $m: ident] ( $f: ident) ) => {
711
+ impl_vec_trait!{ [ $trait $m] + $f ( vector_unsigned_char, vector_unsigned_char) -> vector_unsigned_char }
712
+ impl_vec_trait!{ [ $trait $m] + $f ( vector_signed_char, vector_unsigned_char) -> vector_signed_char }
713
+ impl_vec_trait!{ [ $trait $m] + $f ( vector_unsigned_short, vector_unsigned_char) -> vector_unsigned_short }
714
+ impl_vec_trait!{ [ $trait $m] + $f ( vector_signed_short, vector_unsigned_char) -> vector_signed_short }
715
+ impl_vec_trait!{ [ $trait $m] + $f ( vector_unsigned_int, vector_unsigned_char) -> vector_unsigned_int }
716
+ impl_vec_trait!{ [ $trait $m] + $f ( vector_signed_int, vector_unsigned_char) -> vector_signed_int }
717
+ impl_vec_trait!{ [ $trait $m] + $f ( vector_unsigned_long_long, vector_unsigned_char) -> vector_unsigned_long_long }
718
+ impl_vec_trait!{ [ $trait $m] + $f ( vector_signed_long_long, vector_unsigned_char) -> vector_signed_long_long }
719
+ } ;
720
+ }
721
+
722
+ #[ unstable( feature = "stdarch_s390x" , issue = "135681" ) ]
723
+ pub trait VectorSrl < Other > {
724
+ type Result ;
725
+ unsafe fn vec_srl ( self , b : Other ) -> Self :: Result ;
726
+ }
727
+
728
+ impl_vec_shift_long ! { [ VectorSrl vec_srl] ( vsrl) }
729
+
730
+ #[ unstable( feature = "stdarch_s390x" , issue = "135681" ) ]
731
+ pub trait VectorSral < Other > {
732
+ type Result ;
733
+ unsafe fn vec_sral ( self , b : Other ) -> Self :: Result ;
734
+ }
735
+
736
+ impl_vec_shift_long ! { [ VectorSral vec_sral] ( vsra) }
737
+
738
+ #[ unstable( feature = "stdarch_s390x" , issue = "135681" ) ]
739
+ pub trait VectorSll < Other > {
740
+ type Result ;
741
+ unsafe fn vec_sll ( self , b : Other ) -> Self :: Result ;
742
+ }
743
+
744
+ impl_vec_shift_long ! { [ VectorSll vec_sll] ( vsl) }
704
745
}
705
746
706
747
/// Vector element-wise addition.
@@ -1035,6 +1076,43 @@ where
1035
1076
a. vec_rint ( )
1036
1077
}
1037
1078
1079
+ /// Performs a left shift for a vector by a given number of bits. Each element of the result is obtained by shifting the corresponding
1080
+ /// element of a left by the number of bits specified by the last 3 bits of every byte of b. The bits that are shifted out are replaced by zeros.
1081
+ #[ inline]
1082
+ #[ target_feature( enable = "vector" ) ]
1083
+ #[ unstable( feature = "stdarch_s390x" , issue = "135681" ) ]
1084
+ pub unsafe fn vec_sll < T > ( a : T , b : vector_unsigned_char ) -> T
1085
+ where
1086
+ T : sealed:: VectorSll < vector_unsigned_char , Result = T > ,
1087
+ {
1088
+ a. vec_sll ( b)
1089
+ }
1090
+
1091
+ /// Performs a right shift for a vector by a given number of bits. Each element of the result is obtained by shifting the corresponding
1092
+ /// element of a right by the number of bits specified by the last 3 bits of every byte of b. The bits that are shifted out are replaced by zeros.
1093
+ #[ inline]
1094
+ #[ target_feature( enable = "vector" ) ]
1095
+ #[ unstable( feature = "stdarch_s390x" , issue = "135681" ) ]
1096
+ pub unsafe fn vec_srl < T > ( a : T , b : vector_unsigned_char ) -> T
1097
+ where
1098
+ T : sealed:: VectorSrl < vector_unsigned_char , Result = T > ,
1099
+ {
1100
+ a. vec_srl ( b)
1101
+ }
1102
+
1103
+ /// Performs an algebraic right shift for a vector by a given number of bits. Each element of the result is obtained by shifting the corresponding
1104
+ /// element of a right by the number of bits specified by the last 3 bits of every byte of b. The bits that are shifted out are replaced by copies of
1105
+ /// the most significant bit of the element of a.
1106
+ #[ inline]
1107
+ #[ target_feature( enable = "vector" ) ]
1108
+ #[ unstable( feature = "stdarch_s390x" , issue = "135681" ) ]
1109
+ pub unsafe fn vec_sral < T > ( a : T , b : vector_unsigned_char ) -> T
1110
+ where
1111
+ T : sealed:: VectorSral < vector_unsigned_char , Result = T > ,
1112
+ {
1113
+ a. vec_sral ( b)
1114
+ }
1115
+
1038
1116
#[ cfg( test) ]
1039
1117
mod tests {
1040
1118
use super :: * ;
@@ -1076,10 +1154,13 @@ mod tests {
1076
1154
test_vec_2! { $name, $fn, $ty -> $ty, [ $( $a) ,+] , [ $( $b) ,+] , [ $( $d) ,+] }
1077
1155
} ;
1078
1156
{ $name: ident, $fn: ident, $ty: ident -> $ty_out: ident, [ $( $a: expr) ,+] , [ $( $b: expr) ,+] , [ $( $d: expr) ,+] } => {
1157
+ test_vec_2! { $name, $fn, $ty, $ty -> $ty, [ $( $a) ,+] , [ $( $b) ,+] , [ $( $d) ,+] }
1158
+ } ;
1159
+ { $name: ident, $fn: ident, $ty1: ident, $ty2: ident -> $ty_out: ident, [ $( $a: expr) ,+] , [ $( $b: expr) ,+] , [ $( $d: expr) ,+] } => {
1079
1160
#[ simd_test( enable = "vector" ) ]
1080
1161
unsafe fn $name( ) {
1081
- let a: s_t_l!( $ty ) = transmute( $ty :: new( $( $a) ,+) ) ;
1082
- let b: s_t_l!( $ty ) = transmute( $ty :: new( $( $b) ,+) ) ;
1162
+ let a: s_t_l!( $ty1 ) = transmute( $ty1 :: new( $( $a) ,+) ) ;
1163
+ let b: s_t_l!( $ty2 ) = transmute( $ty2 :: new( $( $b) ,+) ) ;
1083
1164
1084
1165
let d = $ty_out:: new( $( $d) ,+) ;
1085
1166
let r : $ty_out = transmute( $fn( a, b) ) ;
@@ -1362,4 +1443,24 @@ mod tests {
1362
1443
[ 0.6 , 0.9 ] ,
1363
1444
[ 1.0 , 1.0 ]
1364
1445
}
1446
+
1447
+ test_vec_2 ! { test_vec_sll, vec_sll, i32x4, u8x16 -> i32x4,
1448
+ [ 1 , 1 , 1 , 1 ] ,
1449
+ [ 0 , 0 , 0 , 2 , 0 , 0 , 0 , 3 , 0 , 0 , 0 , 4 , 0 , 0 , 0 , 8 ] ,
1450
+ [ 1 << 2 , 1 << 3 , 1 << 4 , 1 ] }
1451
+
1452
+ test_vec_2 ! { test_vec_srl, vec_srl, i32x4, u8x16 -> i32x4,
1453
+ [ 0b1000 , 0b1000 , 0b1000 , 0b1000 ] ,
1454
+ [ 0 , 0 , 0 , 1 , 0 , 0 , 0 , 2 , 0 , 0 , 0 , 3 , 0 , 0 , 0 , 16 ] ,
1455
+ [ 4 , 2 , 1 , 8 ] }
1456
+
1457
+ test_vec_2 ! { test_vec_sral_pos, vec_sral, u32x4, u8x16 -> i32x4,
1458
+ [ 0b1000 , 0b1000 , 0b1000 , 0b1000 ] ,
1459
+ [ 0 , 0 , 0 , 1 , 0 , 0 , 0 , 2 , 0 , 0 , 0 , 3 , 0 , 0 , 0 , 16 ] ,
1460
+ [ 4 , 2 , 1 , 8 ] }
1461
+
1462
+ test_vec_2 ! { test_vec_sral_neg, vec_sral, i32x4, u8x16 -> i32x4,
1463
+ [ -8 , -8 , -8 , -8 ] ,
1464
+ [ 0 , 0 , 0 , 1 , 0 , 0 , 0 , 2 , 0 , 0 , 0 , 3 , 0 , 0 , 0 , 16 ] ,
1465
+ [ -4 , -2 , -1 , -8 ] }
1365
1466
}
0 commit comments