1
1
//! The virtual memory representation of the MIR interpreter.
2
2
3
+ use std:: borrow:: Cow ;
4
+ use std:: convert:: TryFrom ;
5
+ use std:: iter;
6
+ use std:: ops:: { Add , Deref , DerefMut , Mul , Range , Sub } ;
7
+
8
+ use rustc_ast:: ast:: Mutability ;
9
+ use rustc_data_structures:: sorted_map:: SortedMap ;
10
+ use rustc_target:: abi:: HasDataLayout ;
11
+
3
12
use super :: {
4
13
read_target_uint, write_target_uint, AllocId , InterpResult , Pointer , Scalar , ScalarMaybeUndef ,
5
14
} ;
6
15
7
16
use crate :: ty:: layout:: { Align , Size } ;
8
17
9
- use rustc_ast:: ast:: Mutability ;
10
- use rustc_data_structures:: sorted_map:: SortedMap ;
11
- use rustc_target:: abi:: HasDataLayout ;
12
- use std:: borrow:: Cow ;
13
- use std:: iter;
14
- use std:: ops:: { Deref , DerefMut , Range } ;
15
-
16
18
// NOTE: When adding new fields, make sure to adjust the `Snapshot` impl in
17
19
// `src/librustc_mir/interpret/snapshot.rs`.
18
20
#[ derive( Clone , Debug , Eq , PartialEq , PartialOrd , Ord , Hash , RustcEncodable , RustcDecodable ) ]
@@ -90,7 +92,7 @@ impl<Tag> Allocation<Tag> {
90
92
/// Creates a read-only allocation initialized by the given bytes
91
93
pub fn from_bytes < ' a > ( slice : impl Into < Cow < ' a , [ u8 ] > > , align : Align ) -> Self {
92
94
let bytes = slice. into ( ) . into_owned ( ) ;
93
- let size = Size :: from_bytes ( bytes. len ( ) as u64 ) ;
95
+ let size = Size :: from_bytes ( u64 :: try_from ( bytes. len ( ) ) . unwrap ( ) ) ;
94
96
Self {
95
97
bytes,
96
98
relocations : Relocations :: new ( ) ,
@@ -107,9 +109,8 @@ impl<Tag> Allocation<Tag> {
107
109
}
108
110
109
111
pub fn undef ( size : Size , align : Align ) -> Self {
110
- assert_eq ! ( size. bytes( ) as usize as u64 , size. bytes( ) ) ;
111
112
Allocation {
112
- bytes : vec ! [ 0 ; size. bytes( ) as usize ] ,
113
+ bytes : vec ! [ 0 ; usize :: try_from ( size. bytes( ) ) . unwrap ( ) ] ,
113
114
relocations : Relocations :: new ( ) ,
114
115
undef_mask : UndefMask :: new ( size, false ) ,
115
116
size,
@@ -152,7 +153,7 @@ impl Allocation<(), ()> {
152
153
/// Raw accessors. Provide access to otherwise private bytes.
153
154
impl < Tag , Extra > Allocation < Tag , Extra > {
154
155
pub fn len ( & self ) -> usize {
155
- self . size . bytes ( ) as usize
156
+ usize :: try_from ( self . size . bytes ( ) ) . unwrap ( )
156
157
}
157
158
158
159
/// Looks at a slice which may describe undefined bytes or describe a relocation. This differs
@@ -182,21 +183,16 @@ impl<'tcx, Tag: Copy, Extra: AllocationExtra<Tag>> Allocation<Tag, Extra> {
182
183
/// Returns the range of this allocation that was meant.
183
184
#[ inline]
184
185
fn check_bounds ( & self , offset : Size , size : Size ) -> Range < usize > {
185
- let end = offset + size; // This does overflow checking.
186
- assert_eq ! (
187
- end. bytes( ) as usize as u64 ,
188
- end. bytes( ) ,
189
- "cannot handle this access on this host architecture"
190
- ) ;
191
- let end = end. bytes ( ) as usize ;
186
+ let end = Size :: add ( offset, size) ; // This does overflow checking.
187
+ let end = usize:: try_from ( end. bytes ( ) ) . expect ( "access too big for this host architecture" ) ;
192
188
assert ! (
193
189
end <= self . len( ) ,
194
190
"Out-of-bounds access at offset {}, size {} in allocation of size {}" ,
195
191
offset. bytes( ) ,
196
192
size. bytes( ) ,
197
193
self . len( )
198
194
) ;
199
- ( offset. bytes ( ) as usize ) ..end
195
+ usize :: try_from ( offset. bytes ( ) ) . unwrap ( ) ..end
200
196
}
201
197
202
198
/// The last argument controls whether we error out when there are undefined
@@ -294,11 +290,11 @@ impl<'tcx, Tag: Copy, Extra: AllocationExtra<Tag>> Allocation<Tag, Extra> {
294
290
cx : & impl HasDataLayout ,
295
291
ptr : Pointer < Tag > ,
296
292
) -> InterpResult < ' tcx , & [ u8 ] > {
297
- assert_eq ! ( ptr. offset. bytes( ) as usize as u64 , ptr. offset. bytes( ) ) ;
298
- let offset = ptr. offset . bytes ( ) as usize ;
293
+ let offset = usize:: try_from ( ptr. offset . bytes ( ) ) . unwrap ( ) ;
299
294
Ok ( match self . bytes [ offset..] . iter ( ) . position ( |& c| c == 0 ) {
300
295
Some ( size) => {
301
- let size_with_null = Size :: from_bytes ( ( size + 1 ) as u64 ) ;
296
+ let size_with_null =
297
+ Size :: from_bytes ( u64:: try_from ( size. checked_add ( 1 ) . unwrap ( ) ) . unwrap ( ) ) ;
302
298
// Go through `get_bytes` for checks and AllocationExtra hooks.
303
299
// We read the null, so we include it in the request, but we want it removed
304
300
// from the result, so we do subslicing.
@@ -343,7 +339,7 @@ impl<'tcx, Tag: Copy, Extra: AllocationExtra<Tag>> Allocation<Tag, Extra> {
343
339
let ( lower, upper) = src. size_hint ( ) ;
344
340
let len = upper. expect ( "can only write bounded iterators" ) ;
345
341
assert_eq ! ( lower, len, "can only write iterators with a precise length" ) ;
346
- let bytes = self . get_bytes_mut ( cx, ptr, Size :: from_bytes ( len as u64 ) ) ?;
342
+ let bytes = self . get_bytes_mut ( cx, ptr, Size :: from_bytes ( u64:: try_from ( len ) . unwrap ( ) ) ) ?;
347
343
// `zip` would stop when the first iterator ends; we want to definitely
348
344
// cover all of `bytes`.
349
345
for dest in bytes {
@@ -386,7 +382,11 @@ impl<'tcx, Tag: Copy, Extra: AllocationExtra<Tag>> Allocation<Tag, Extra> {
386
382
} else {
387
383
match self . relocations . get ( & ptr. offset ) {
388
384
Some ( & ( tag, alloc_id) ) => {
389
- let ptr = Pointer :: new_with_tag ( alloc_id, Size :: from_bytes ( bits as u64 ) , tag) ;
385
+ let ptr = Pointer :: new_with_tag (
386
+ alloc_id,
387
+ Size :: from_bytes ( u64:: try_from ( bits) . unwrap ( ) ) ,
388
+ tag,
389
+ ) ;
390
390
return Ok ( ScalarMaybeUndef :: Scalar ( ptr. into ( ) ) ) ;
391
391
}
392
392
None => { }
@@ -433,7 +433,7 @@ impl<'tcx, Tag: Copy, Extra: AllocationExtra<Tag>> Allocation<Tag, Extra> {
433
433
} ;
434
434
435
435
let bytes = match val. to_bits_or_ptr ( type_size, cx) {
436
- Err ( val) => val. offset . bytes ( ) as u128 ,
436
+ Err ( val) => u128 :: from ( val. offset . bytes ( ) ) ,
437
437
Ok ( data) => data,
438
438
} ;
439
439
@@ -479,7 +479,7 @@ impl<'tcx, Tag: Copy, Extra> Allocation<Tag, Extra> {
479
479
// We have to go back `pointer_size - 1` bytes, as that one would still overlap with
480
480
// the beginning of this range.
481
481
let start = ptr. offset . bytes ( ) . saturating_sub ( cx. data_layout ( ) . pointer_size . bytes ( ) - 1 ) ;
482
- let end = ptr. offset + size; // This does overflow checking.
482
+ let end = Size :: add ( ptr. offset , size) ; // This does overflow checking.
483
483
self . relocations . range ( Size :: from_bytes ( start) ..end)
484
484
}
485
485
@@ -524,7 +524,7 @@ impl<'tcx, Tag: Copy, Extra> Allocation<Tag, Extra> {
524
524
)
525
525
} ;
526
526
let start = ptr. offset ;
527
- let end = start + size;
527
+ let end = Size :: add ( start, size) ;
528
528
529
529
// Mark parts of the outermost relocations as undefined if they partially fall outside the
530
530
// given range.
@@ -563,15 +563,15 @@ impl<'tcx, Tag, Extra> Allocation<Tag, Extra> {
563
563
#[ inline]
564
564
fn check_defined ( & self , ptr : Pointer < Tag > , size : Size ) -> InterpResult < ' tcx > {
565
565
self . undef_mask
566
- . is_range_defined ( ptr. offset , ptr. offset + size)
566
+ . is_range_defined ( ptr. offset , Size :: add ( ptr. offset , size) )
567
567
. or_else ( |idx| throw_ub ! ( InvalidUndefBytes ( Some ( Pointer :: new( ptr. alloc_id, idx) ) ) ) )
568
568
}
569
569
570
570
pub fn mark_definedness ( & mut self , ptr : Pointer < Tag > , size : Size , new_state : bool ) {
571
571
if size. bytes ( ) == 0 {
572
572
return ;
573
573
}
574
- self . undef_mask . set_range ( ptr. offset , ptr. offset + size, new_state) ;
574
+ self . undef_mask . set_range ( ptr. offset , Size :: add ( ptr. offset , size) , new_state) ;
575
575
}
576
576
}
577
577
@@ -616,7 +616,7 @@ impl<Tag, Extra> Allocation<Tag, Extra> {
616
616
617
617
for i in 1 ..size. bytes ( ) {
618
618
// FIXME: optimize to bitshift the current undef block's bits and read the top bit.
619
- if self . undef_mask . get ( src. offset + Size :: from_bytes ( i) ) == cur {
619
+ if self . undef_mask . get ( Size :: add ( src. offset , Size :: from_bytes ( i) ) ) == cur {
620
620
cur_len += 1 ;
621
621
} else {
622
622
ranges. push ( cur_len) ;
@@ -643,7 +643,7 @@ impl<Tag, Extra> Allocation<Tag, Extra> {
643
643
if defined. ranges . len ( ) <= 1 {
644
644
self . undef_mask . set_range_inbounds (
645
645
dest. offset ,
646
- dest. offset + size * repeat,
646
+ Size :: add ( dest. offset , Size :: mul ( size, repeat) ) ,
647
647
defined. initial ,
648
648
) ;
649
649
return ;
@@ -721,10 +721,10 @@ impl<Tag: Copy, Extra> Allocation<Tag, Extra> {
721
721
for i in 0 ..length {
722
722
new_relocations. extend ( relocations. iter ( ) . map ( |& ( offset, reloc) | {
723
723
// compute offset for current repetition
724
- let dest_offset = dest. offset + ( i * size) ;
724
+ let dest_offset = Size :: add ( dest. offset , Size :: mul ( size, i ) ) ;
725
725
(
726
726
// shift offsets from source allocation to destination allocation
727
- offset + dest_offset - src. offset ,
727
+ Size :: sub ( Size :: add ( offset, dest_offset) , src. offset ) ,
728
728
reloc,
729
729
)
730
730
} ) ) ;
@@ -861,18 +861,18 @@ impl UndefMask {
861
861
if amount. bytes ( ) == 0 {
862
862
return ;
863
863
}
864
- let unused_trailing_bits = self . blocks . len ( ) as u64 * Self :: BLOCK_SIZE - self . len . bytes ( ) ;
864
+ let unused_trailing_bits =
865
+ u64:: try_from ( self . blocks . len ( ) ) . unwrap ( ) * Self :: BLOCK_SIZE - self . len . bytes ( ) ;
865
866
if amount. bytes ( ) > unused_trailing_bits {
866
867
let additional_blocks = amount. bytes ( ) / Self :: BLOCK_SIZE + 1 ;
867
- assert_eq ! ( additional_blocks as usize as u64 , additional_blocks) ;
868
868
self . blocks . extend (
869
869
// FIXME(oli-obk): optimize this by repeating `new_state as Block`.
870
- iter:: repeat ( 0 ) . take ( additional_blocks as usize ) ,
870
+ iter:: repeat ( 0 ) . take ( usize:: try_from ( additional_blocks ) . unwrap ( ) ) ,
871
871
) ;
872
872
}
873
873
let start = self . len ;
874
874
self . len += amount;
875
- self . set_range_inbounds ( start, start + amount, new_state) ;
875
+ self . set_range_inbounds ( start, Size :: add ( start, amount) , new_state) ;
876
876
}
877
877
}
878
878
@@ -881,7 +881,5 @@ fn bit_index(bits: Size) -> (usize, usize) {
881
881
let bits = bits. bytes ( ) ;
882
882
let a = bits / UndefMask :: BLOCK_SIZE ;
883
883
let b = bits % UndefMask :: BLOCK_SIZE ;
884
- assert_eq ! ( a as usize as u64 , a) ;
885
- assert_eq ! ( b as usize as u64 , b) ;
886
- ( a as usize , b as usize )
884
+ ( usize:: try_from ( a) . unwrap ( ) , usize:: try_from ( b) . unwrap ( ) )
887
885
}
0 commit comments