@@ -3147,42 +3147,42 @@ impl Simd for V4_256 {
3147
3147
3148
3148
#[ inline( always) ]
3149
3149
fn f32s_reduce_sum ( self , a : Self :: f32s ) -> f32 {
3150
- unsafe { ( * * self ) . f32s_reduce_sum ( transmute ( a ) ) }
3150
+ ( * * self ) . f32s_reduce_sum ( a )
3151
3151
}
3152
3152
3153
3153
#[ inline( always) ]
3154
3154
fn f32s_reduce_product ( self , a : Self :: f32s ) -> f32 {
3155
- unsafe { ( * * self ) . f32s_reduce_product ( transmute ( a ) ) }
3155
+ ( * * self ) . f32s_reduce_product ( a )
3156
3156
}
3157
3157
3158
3158
#[ inline( always) ]
3159
3159
fn f32s_reduce_min ( self , a : Self :: f32s ) -> f32 {
3160
- unsafe { ( * * self ) . f32s_reduce_min ( transmute ( a ) ) }
3160
+ ( * * self ) . f32s_reduce_min ( a )
3161
3161
}
3162
3162
3163
3163
#[ inline( always) ]
3164
3164
fn f32s_reduce_max ( self , a : Self :: f32s ) -> f32 {
3165
- unsafe { ( * * self ) . f32s_reduce_max ( transmute ( a ) ) }
3165
+ ( * * self ) . f32s_reduce_max ( a )
3166
3166
}
3167
3167
3168
3168
#[ inline( always) ]
3169
3169
fn f64s_reduce_sum ( self , a : Self :: f64s ) -> f64 {
3170
- unsafe { ( * * self ) . f64s_reduce_sum ( transmute ( a ) ) }
3170
+ ( * * self ) . f64s_reduce_sum ( a )
3171
3171
}
3172
3172
3173
3173
#[ inline( always) ]
3174
3174
fn f64s_reduce_product ( self , a : Self :: f64s ) -> f64 {
3175
- unsafe { ( * * self ) . f64s_reduce_product ( transmute ( a ) ) }
3175
+ ( * * self ) . f64s_reduce_product ( a )
3176
3176
}
3177
3177
3178
3178
#[ inline( always) ]
3179
3179
fn f64s_reduce_min ( self , a : Self :: f64s ) -> f64 {
3180
- unsafe { ( * * self ) . f64s_reduce_min ( transmute ( a ) ) }
3180
+ ( * * self ) . f64s_reduce_min ( a )
3181
3181
}
3182
3182
3183
3183
#[ inline( always) ]
3184
3184
fn f64s_reduce_max ( self , a : Self :: f64s ) -> f64 {
3185
- unsafe { ( * * self ) . f64s_reduce_max ( transmute ( a ) ) }
3185
+ ( * * self ) . f64s_reduce_max ( a )
3186
3186
}
3187
3187
3188
3188
type c32s = f32x8 ;
@@ -3272,47 +3272,47 @@ impl Simd for V4_256 {
3272
3272
#[ inline( always) ]
3273
3273
fn u32s_partial_load ( self , slice : & [ u32 ] ) -> Self :: u32s {
3274
3274
unsafe {
3275
- let mask = transmute ( V4_256_U32_MASKS [ slice. len ( ) . min ( 16 ) ] ) ;
3275
+ let mask = V4_256_U32_MASKS [ slice. len ( ) . min ( 16 ) ] ;
3276
3276
transmute ( _mm256_maskz_loadu_epi32 ( mask, slice. as_ptr ( ) as _ ) )
3277
3277
}
3278
3278
}
3279
3279
3280
3280
#[ inline( always) ]
3281
3281
fn u32s_partial_store ( self , slice : & mut [ u32 ] , values : Self :: u32s ) {
3282
3282
unsafe {
3283
- let mask = transmute ( V4_256_U32_MASKS [ slice. len ( ) . min ( 16 ) ] ) ;
3283
+ let mask = V4_256_U32_MASKS [ slice. len ( ) . min ( 16 ) ] ;
3284
3284
_mm256_mask_storeu_epi32 ( slice. as_mut_ptr ( ) as _ , mask, transmute ( values) ) ;
3285
3285
}
3286
3286
}
3287
3287
3288
3288
#[ inline( always) ]
3289
3289
fn u64s_partial_load ( self , slice : & [ u64 ] ) -> Self :: u64s {
3290
3290
unsafe {
3291
- let mask = transmute ( V4_256_U32_MASKS [ ( 2 * slice. len ( ) ) . min ( 16 ) ] ) ;
3291
+ let mask = V4_256_U32_MASKS [ ( 2 * slice. len ( ) ) . min ( 16 ) ] ;
3292
3292
transmute ( _mm256_maskz_loadu_epi32 ( mask, slice. as_ptr ( ) as _ ) )
3293
3293
}
3294
3294
}
3295
3295
3296
3296
#[ inline( always) ]
3297
3297
fn u64s_partial_store ( self , slice : & mut [ u64 ] , values : Self :: u64s ) {
3298
3298
unsafe {
3299
- let mask = transmute ( V4_256_U32_MASKS [ ( 2 * slice. len ( ) ) . min ( 16 ) ] ) ;
3299
+ let mask = V4_256_U32_MASKS [ ( 2 * slice. len ( ) ) . min ( 16 ) ] ;
3300
3300
_mm256_mask_storeu_epi32 ( slice. as_mut_ptr ( ) as _ , mask, transmute ( values) ) ;
3301
3301
}
3302
3302
}
3303
3303
3304
3304
#[ inline( always) ]
3305
3305
fn c64s_partial_load ( self , slice : & [ c64 ] ) -> Self :: c64s {
3306
3306
unsafe {
3307
- let mask = transmute ( V4_256_U32_MASKS [ ( 4 * slice. len ( ) ) . min ( 16 ) ] ) ;
3307
+ let mask = V4_256_U32_MASKS [ ( 4 * slice. len ( ) ) . min ( 16 ) ] ;
3308
3308
transmute ( _mm256_maskz_loadu_epi32 ( mask, slice. as_ptr ( ) as _ ) )
3309
3309
}
3310
3310
}
3311
3311
3312
3312
#[ inline( always) ]
3313
3313
fn c64s_partial_store ( self , slice : & mut [ c64 ] , values : Self :: c64s ) {
3314
3314
unsafe {
3315
- let mask = transmute ( V4_256_U32_MASKS [ ( 4 * slice. len ( ) ) . min ( 16 ) ] ) ;
3315
+ let mask = V4_256_U32_MASKS [ ( 4 * slice. len ( ) ) . min ( 16 ) ] ;
3316
3316
_mm256_mask_storeu_epi32 ( slice. as_mut_ptr ( ) as _ , mask, transmute ( values) ) ;
3317
3317
}
3318
3318
}
@@ -3321,7 +3321,7 @@ impl Simd for V4_256 {
3321
3321
fn u32s_partial_load_last ( self , slice : & [ u32 ] ) -> Self :: u32s {
3322
3322
unsafe {
3323
3323
let len = slice. len ( ) ;
3324
- let mask = transmute ( V4_256_U32_LAST_MASKS [ slice. len ( ) . min ( 16 ) ] ) ;
3324
+ let mask = V4_256_U32_LAST_MASKS [ slice. len ( ) . min ( 16 ) ] ;
3325
3325
transmute ( _mm256_maskz_loadu_epi32 (
3326
3326
mask,
3327
3327
slice. as_ptr ( ) . add ( len) . wrapping_sub ( 16 ) as _ ,
@@ -3333,7 +3333,7 @@ impl Simd for V4_256 {
3333
3333
fn u32s_partial_store_last ( self , slice : & mut [ u32 ] , values : Self :: u32s ) {
3334
3334
unsafe {
3335
3335
let len = slice. len ( ) ;
3336
- let mask = transmute ( V4_256_U32_LAST_MASKS [ slice. len ( ) . min ( 16 ) ] ) ;
3336
+ let mask = V4_256_U32_LAST_MASKS [ slice. len ( ) . min ( 16 ) ] ;
3337
3337
_mm256_mask_storeu_epi32 (
3338
3338
slice. as_mut_ptr ( ) . add ( len) . wrapping_sub ( 16 ) as _ ,
3339
3339
mask,
@@ -3346,7 +3346,7 @@ impl Simd for V4_256 {
3346
3346
fn u64s_partial_load_last ( self , slice : & [ u64 ] ) -> Self :: u64s {
3347
3347
unsafe {
3348
3348
let len = slice. len ( ) ;
3349
- let mask = transmute ( V4_256_U32_LAST_MASKS [ ( 2 * slice. len ( ) ) . min ( 16 ) ] ) ;
3349
+ let mask = V4_256_U32_LAST_MASKS [ ( 2 * slice. len ( ) ) . min ( 16 ) ] ;
3350
3350
transmute ( _mm256_maskz_loadu_epi32 (
3351
3351
mask,
3352
3352
slice. as_ptr ( ) . add ( len) . wrapping_sub ( 8 ) as _ ,
@@ -3358,7 +3358,7 @@ impl Simd for V4_256 {
3358
3358
fn u64s_partial_store_last ( self , slice : & mut [ u64 ] , values : Self :: u64s ) {
3359
3359
unsafe {
3360
3360
let len = slice. len ( ) ;
3361
- let mask = transmute ( V4_256_U32_LAST_MASKS [ ( 2 * slice. len ( ) ) . min ( 16 ) ] ) ;
3361
+ let mask = V4_256_U32_LAST_MASKS [ ( 2 * slice. len ( ) ) . min ( 16 ) ] ;
3362
3362
_mm256_mask_storeu_epi32 (
3363
3363
slice. as_mut_ptr ( ) . add ( len) . wrapping_sub ( 8 ) as _ ,
3364
3364
mask,
@@ -3371,7 +3371,7 @@ impl Simd for V4_256 {
3371
3371
fn c64s_partial_load_last ( self , slice : & [ c64 ] ) -> Self :: c64s {
3372
3372
unsafe {
3373
3373
let len = slice. len ( ) ;
3374
- let mask = transmute ( V4_256_U32_LAST_MASKS [ ( 4 * slice. len ( ) ) . min ( 16 ) ] ) ;
3374
+ let mask = V4_256_U32_LAST_MASKS [ ( 4 * slice. len ( ) ) . min ( 16 ) ] ;
3375
3375
transmute ( _mm256_maskz_loadu_epi32 (
3376
3376
mask,
3377
3377
slice. as_ptr ( ) . add ( len) . wrapping_sub ( 4 ) as _ ,
@@ -3383,7 +3383,7 @@ impl Simd for V4_256 {
3383
3383
fn c64s_partial_store_last ( self , slice : & mut [ c64 ] , values : Self :: c64s ) {
3384
3384
unsafe {
3385
3385
let len = slice. len ( ) ;
3386
- let mask = transmute ( V4_256_U32_LAST_MASKS [ ( 4 * slice. len ( ) ) . min ( 16 ) ] ) ;
3386
+ let mask = V4_256_U32_LAST_MASKS [ ( 4 * slice. len ( ) ) . min ( 16 ) ] ;
3387
3387
_mm256_mask_storeu_epi32 (
3388
3388
slice. as_mut_ptr ( ) . add ( len) . wrapping_sub ( 4 ) as _ ,
3389
3389
mask,
@@ -3439,7 +3439,7 @@ impl Simd for V4_256 {
3439
3439
3440
3440
#[ inline( always) ]
3441
3441
fn c32s_reduce_sum ( self , a : Self :: c32s ) -> c32 {
3442
- unsafe { ( * * self ) . c32s_reduce_sum ( transmute ( a ) ) }
3442
+ ( * * self ) . c32s_reduce_sum ( a )
3443
3443
}
3444
3444
3445
3445
#[ inline( always) ]
@@ -3449,7 +3449,7 @@ impl Simd for V4_256 {
3449
3449
3450
3450
#[ inline( always) ]
3451
3451
fn c64s_reduce_sum ( self , a : Self :: c64s ) -> c64 {
3452
- unsafe { ( * * self ) . c64s_reduce_sum ( transmute ( a ) ) }
3452
+ ( * * self ) . c64s_reduce_sum ( a )
3453
3453
}
3454
3454
3455
3455
#[ inline( always) ]
0 commit comments