@@ -138,31 +138,22 @@ macro_rules! simd_impl {
138
138
/// Requires nightly Rust and the [`simd_support`] feature
139
139
///
140
140
/// [`simd_support`]: https://github.com/rust-random/rand#crate-features
141
- impl Distribution <$ty> for Standard {
141
+ impl <const LANES : usize > Distribution <Simd <$ty, LANES >> for Standard
142
+ where
143
+ LaneCount <LANES >: SupportedLaneCount ,
144
+ {
142
145
#[ inline]
143
- fn sample<R : Rng + ?Sized >( & self , rng: & mut R ) -> $ty {
144
- // TODO: impl this generically once const generics are robust enough
145
- let mut vec: Simd <u8 , { mem:: size_of:: <$ty>( ) } > = Default :: default ( ) ;
146
- rng. fill_bytes( vec. as_mut_array( ) ) ;
147
- // NOTE: replace with `to_le` if added to core::simd
148
- #[ cfg( not( target_endian = "little" ) ) ]
149
- {
150
- vec = vec. reverse( ) ;
151
- }
152
- // SAFETY: we know u8xN and $ty have the same size
153
- unsafe { mem:: transmute_copy( & vec) }
146
+ fn sample<R : Rng + ?Sized >( & self , rng: & mut R ) -> Simd <$ty, LANES > {
147
+ let mut vec = Simd :: default ( ) ;
148
+ rng. fill( vec. as_mut_array( ) . as_mut_slice( ) ) ;
149
+ vec
154
150
}
155
151
}
156
152
) +} ;
157
153
}
158
154
159
155
#[ cfg( feature = "simd_support" ) ]
160
- simd_impl ! (
161
- i8x4, i8x8, i8x16, i8x32, i8x64, i16x2, i16x4, i16x8, i16x16, i16x32, i32x2, i32x4, i32x8,
162
- i32x16, i64x2, i64x4, i64x8, isizex2, isizex4, isizex8, u8x4, u8x8, u8x16, u8x32, u8x64, u16x2,
163
- u16x4, u16x8, u16x16, u16x32, u32x2, u32x4, u32x8, u32x16, u64x2, u64x4, u64x8, usizex2,
164
- usizex4, usizex8
165
- ) ;
156
+ simd_impl ! ( u8 , i8 , u16 , i16 , u32 , i32 , u64 , i64 , usize , isize ) ;
166
157
167
158
#[ cfg( any( target_arch = "x86" , target_arch = "x86_64" ) ) ]
168
159
intrinsic_impl ! ( __m128i, __m256i) ;
0 commit comments