@@ -606,6 +606,57 @@ mod sealed {
606
606
impl_vec_xl ! { vec_xl_u32 lxvd2x / lxv u32 }
607
607
impl_vec_xl ! { vec_xl_f32 lxvd2x / lxv f32 }
608
608
609
+ #[ unstable( feature = "stdarch_powerpc" , issue = "111145" ) ]
610
+ pub trait VectorXst {
611
+ type Out ;
612
+ unsafe fn vec_xst ( self , a : isize , p : Self :: Out ) ;
613
+ }
614
+
615
+ macro_rules! impl_vec_xst {
616
+ ( $fun: ident $notpwr9: ident / $pwr9: ident $ty: ident) => {
617
+ #[ inline]
618
+ #[ target_feature( enable = "altivec" ) ]
619
+ #[ cfg_attr(
620
+ all( test, not( target_feature = "power9-altivec" ) ) ,
621
+ assert_instr( $notpwr9)
622
+ ) ]
623
+ #[ cfg_attr( all( test, target_feature = "power9-altivec" ) , assert_instr( $pwr9) ) ]
624
+ pub unsafe fn $fun( s: t_t_l!( $ty) , a: isize , b: * mut $ty) {
625
+ let addr = ( b as * mut u8 ) . offset( a) ;
626
+
627
+ // Workaround ptr::copy_nonoverlapping not being inlined
628
+ extern "rust-intrinsic" {
629
+ #[ rustc_nounwind]
630
+ pub fn copy_nonoverlapping<T >( src: * const T , dst: * mut T , count: usize ) ;
631
+ }
632
+
633
+ copy_nonoverlapping(
634
+ & s as * const _ as * const u8 ,
635
+ addr,
636
+ mem:: size_of:: <t_t_l!( $ty) >( ) ,
637
+ ) ;
638
+ }
639
+
640
+ #[ unstable( feature = "stdarch_powerpc" , issue = "111145" ) ]
641
+ impl VectorXst for t_t_l!( $ty) {
642
+ type Out = * mut $ty;
643
+ #[ inline]
644
+ #[ target_feature( enable = "altivec" ) ]
645
+ unsafe fn vec_xst( self , a: isize , b: Self :: Out ) {
646
+ $fun( self , a, b)
647
+ }
648
+ }
649
+ } ;
650
+ }
651
+
652
+ impl_vec_xst ! { vec_xst_i8 stxvd2x / stxv i8 }
653
+ impl_vec_xst ! { vec_xst_u8 stxvd2x / stxv u8 }
654
+ impl_vec_xst ! { vec_xst_i16 stxvd2x / stxv i16 }
655
+ impl_vec_xst ! { vec_xst_u16 stxvd2x / stxv u16 }
656
+ impl_vec_xst ! { vec_xst_i32 stxvd2x / stxv i32 }
657
+ impl_vec_xst ! { vec_xst_u32 stxvd2x / stxv u32 }
658
+ impl_vec_xst ! { vec_xst_f32 stxvd2x / stxv f32 }
659
+
609
660
test_impl ! { vec_floor( a: vector_float) -> vector_float [ vfloor, vrfim / xvrspim ] }
610
661
611
662
test_impl ! { vec_vexptefp( a: vector_float) -> vector_float [ vexptefp, vexptefp ] }
@@ -2692,6 +2743,17 @@ where
2692
2743
p. vec_xl ( off)
2693
2744
}
2694
2745
2746
+ /// VSX Unaligned Store
2747
+ #[ inline]
2748
+ #[ target_feature( enable = "altivec" ) ]
2749
+ #[ unstable( feature = "stdarch_powerpc" , issue = "111145" ) ]
2750
+ pub unsafe fn vec_xst < T > ( v : T , off : isize , p : <T as sealed:: VectorXst >:: Out )
2751
+ where
2752
+ T : sealed:: VectorXst ,
2753
+ {
2754
+ v. vec_xst ( off, p)
2755
+ }
2756
+
2695
2757
/// Vector Base-2 Logarithm Estimate
2696
2758
#[ inline]
2697
2759
#[ target_feature( enable = "altivec" ) ]
@@ -3579,6 +3641,21 @@ mod tests {
3579
3641
}
3580
3642
}
3581
3643
3644
+ #[ simd_test( enable = "altivec" ) ]
3645
+ unsafe fn test_vec_xst ( ) {
3646
+ let v: vector_unsigned_char = transmute ( u8x16:: new (
3647
+ 0 , 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 , 9 , 10 , 11 , 12 , 13 , 14 , 15 ,
3648
+ ) ) ;
3649
+
3650
+ for off in 0 ..16 {
3651
+ let mut buf = [ 0u8 ; 32 ] ;
3652
+ vec_xst ( v, 0 , ( buf. as_mut_ptr ( ) as * mut u8 ) . offset ( off) ) ;
3653
+ for i in 0 ..16 {
3654
+ assert_eq ! ( i as u8 , buf[ off as usize ..] [ i] ) ;
3655
+ }
3656
+ }
3657
+ }
3658
+
3582
3659
#[ simd_test( enable = "altivec" ) ]
3583
3660
unsafe fn test_vec_ldl ( ) {
3584
3661
let pat = [
0 commit comments