@@ -253,7 +253,7 @@ impl BytesMut {
253
253
254
254
let ptr = self . ptr . as_ptr ( ) ;
255
255
let len = self . len ;
256
- let data = AtomicPtr :: new ( self . data as _ ) ;
256
+ let data = AtomicPtr :: new ( self . data . cast ( ) ) ;
257
257
mem:: forget ( self ) ;
258
258
unsafe { Bytes :: with_vtable ( ptr, len, data, & SHARED_VTABLE ) }
259
259
}
@@ -613,7 +613,7 @@ impl BytesMut {
613
613
}
614
614
615
615
debug_assert_eq ! ( kind, KIND_ARC ) ;
616
- let shared: * mut Shared = self . data as _ ;
616
+ let shared: * mut Shared = self . data ;
617
617
618
618
// Reserving involves abandoning the currently shared buffer and
619
619
// allocating a new vector with the requested capacity.
@@ -692,7 +692,7 @@ impl BytesMut {
692
692
693
693
// Update self
694
694
let data = ( original_capacity_repr << ORIGINAL_CAPACITY_OFFSET ) | KIND_VEC ;
695
- self . data = data as _ ;
695
+ self . data = invalid_ptr ( data) ;
696
696
self . ptr = vptr ( v. as_mut_ptr ( ) ) ;
697
697
self . len = v. len ( ) ;
698
698
self . cap = v. capacity ( ) ;
@@ -723,7 +723,7 @@ impl BytesMut {
723
723
// Reserved above
724
724
debug_assert ! ( dst. len( ) >= cnt) ;
725
725
726
- ptr:: copy_nonoverlapping ( extend. as_ptr ( ) , dst. as_mut_ptr ( ) as * mut u8 , cnt) ;
726
+ ptr:: copy_nonoverlapping ( extend. as_ptr ( ) , dst. as_mut_ptr ( ) , cnt) ;
727
727
}
728
728
729
729
unsafe {
@@ -788,7 +788,7 @@ impl BytesMut {
788
788
ptr,
789
789
len,
790
790
cap,
791
- data : data as * mut _ ,
791
+ data : invalid_ptr ( data) ,
792
792
}
793
793
}
794
794
@@ -909,7 +909,7 @@ impl BytesMut {
909
909
// always succeed.
910
910
debug_assert_eq ! ( shared as usize & KIND_MASK , KIND_ARC ) ;
911
911
912
- self . data = shared as _ ;
912
+ self . data = shared;
913
913
}
914
914
915
915
/// Makes an exact shallow clone of `self`.
@@ -942,7 +942,7 @@ impl BytesMut {
942
942
debug_assert_eq ! ( self . kind( ) , KIND_VEC ) ;
943
943
debug_assert ! ( pos <= MAX_VEC_POS ) ;
944
944
945
- self . data = ( ( pos << VEC_POS_OFFSET ) | ( prev & NOT_VEC_POS_MASK ) ) as * mut _ ;
945
+ self . data = invalid_ptr ( ( pos << VEC_POS_OFFSET ) | ( prev & NOT_VEC_POS_MASK ) ) ;
946
946
}
947
947
948
948
#[ inline]
@@ -968,7 +968,7 @@ impl Drop for BytesMut {
968
968
let _ = rebuild_vec ( self . ptr . as_ptr ( ) , self . len , self . cap , off) ;
969
969
}
970
970
} else if kind == KIND_ARC {
971
- unsafe { release_shared ( self . data as _ ) } ;
971
+ unsafe { release_shared ( self . data ) } ;
972
972
}
973
973
}
974
974
}
@@ -1549,6 +1549,18 @@ fn vptr(ptr: *mut u8) -> NonNull<u8> {
1549
1549
}
1550
1550
}
1551
1551
1552
+ /// Returns a dangling pointer with the given address. This is used to store
1553
+ /// integer data in pointer fields.
1554
+ ///
1555
+ /// It is equivalent to `addr as *mut T`, but this fails on miri when strict
1556
+ /// provenance checking is enabled.
1557
+ #[ inline]
1558
+ fn invalid_ptr < T > ( addr : usize ) -> * mut T {
1559
+ let ptr = core:: ptr:: null_mut :: < u8 > ( ) . wrapping_add ( addr) ;
1560
+ debug_assert_eq ! ( ptr as usize , addr) ;
1561
+ ptr. cast :: < T > ( )
1562
+ }
1563
+
1552
1564
unsafe fn rebuild_vec ( ptr : * mut u8 , mut len : usize , mut cap : usize , off : usize ) -> Vec < u8 > {
1553
1565
let ptr = ptr. offset ( -( off as isize ) ) ;
1554
1566
len += off;
@@ -1568,7 +1580,7 @@ unsafe fn shared_v_clone(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> By
1568
1580
let shared = data. load ( Ordering :: Relaxed ) as * mut Shared ;
1569
1581
increment_shared ( shared) ;
1570
1582
1571
- let data = AtomicPtr :: new ( shared as _ ) ;
1583
+ let data = AtomicPtr :: new ( shared as * mut ( ) ) ;
1572
1584
Bytes :: with_vtable ( ptr, len, data, & SHARED_VTABLE )
1573
1585
}
1574
1586
0 commit comments