Skip to content

Commit 465f5d8

Browse files
authored
Rollup merge of #100822 - WaffleLapkin:no_offset_question_mark, r=scottmcm
Replace most uses of `pointer::offset` with `add` and `sub` As PR title says, it replaces `pointer::offset` in compiler and standard library with `pointer::add` and `pointer::sub`. This generally makes code cleaner, easier to grasp and removes (or, well, hides) integer casts. This is generally trivially correct, `.offset(-constant)` is just `.sub(constant)`, `.offset(usized as isize)` is just `.add(usized)`, etc. However in some cases we need to be careful with signs of things. r? ````@scottmcm```` _split off from #100746_
2 parents 959be73 + 17d153c commit 465f5d8

File tree

18 files changed

+53
-53
lines changed

18 files changed

+53
-53
lines changed

alloc/src/alloc/tests.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@ fn allocate_zeroed() {
1515
let end = i.add(layout.size());
1616
while i < end {
1717
assert_eq!(*i, 0);
18-
i = i.offset(1);
18+
i = i.add(1);
1919
}
2020
Global.deallocate(ptr.as_non_null_ptr(), layout);
2121
}

alloc/src/collections/vec_deque/mod.rs

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2447,8 +2447,8 @@ impl<T, A: Allocator> VecDeque<T, A> {
24472447
let mut right_offset = 0;
24482448
for i in left_edge..right_edge {
24492449
right_offset = (i - left_edge) % (cap - right_edge);
2450-
let src: isize = (right_edge + right_offset) as isize;
2451-
ptr::swap(buf.add(i), buf.offset(src));
2450+
let src = right_edge + right_offset;
2451+
ptr::swap(buf.add(i), buf.add(src));
24522452
}
24532453
let n_ops = right_edge - left_edge;
24542454
left_edge += n_ops;

alloc/src/slice.rs

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1024,7 +1024,7 @@ where
10241024
// Consume the greater side.
10251025
// If equal, prefer the right run to maintain stability.
10261026
unsafe {
1027-
let to_copy = if is_less(&*right.offset(-1), &*left.offset(-1)) {
1027+
let to_copy = if is_less(&*right.sub(1), &*left.sub(1)) {
10281028
decrement_and_get(left)
10291029
} else {
10301030
decrement_and_get(right)
@@ -1038,12 +1038,12 @@ where
10381038

10391039
unsafe fn get_and_increment<T>(ptr: &mut *mut T) -> *mut T {
10401040
let old = *ptr;
1041-
*ptr = unsafe { ptr.offset(1) };
1041+
*ptr = unsafe { ptr.add(1) };
10421042
old
10431043
}
10441044

10451045
unsafe fn decrement_and_get<T>(ptr: &mut *mut T) -> *mut T {
1046-
*ptr = unsafe { ptr.offset(-1) };
1046+
*ptr = unsafe { ptr.sub(1) };
10471047
*ptr
10481048
}
10491049

alloc/src/vec/in_place_collect.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -267,7 +267,7 @@ where
267267
// one slot in the underlying storage will have been freed up and we can immediately
268268
// write back the result.
269269
unsafe {
270-
let dst = dst_buf.offset(i as isize);
270+
let dst = dst_buf.add(i);
271271
debug_assert!(dst as *const _ <= end, "InPlaceIterable contract violation");
272272
ptr::write(dst, self.__iterator_get_unchecked(i));
273273
// Since this executes user code which can panic we have to bump the pointer

alloc/src/vec/into_iter.rs

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -160,7 +160,7 @@ impl<T, A: Allocator> Iterator for IntoIter<T, A> {
160160
Some(unsafe { mem::zeroed() })
161161
} else {
162162
let old = self.ptr;
163-
self.ptr = unsafe { self.ptr.offset(1) };
163+
self.ptr = unsafe { self.ptr.add(1) };
164164

165165
Some(unsafe { ptr::read(old) })
166166
}
@@ -272,7 +272,7 @@ impl<T, A: Allocator> DoubleEndedIterator for IntoIter<T, A> {
272272
// Make up a value of this ZST.
273273
Some(unsafe { mem::zeroed() })
274274
} else {
275-
self.end = unsafe { self.end.offset(-1) };
275+
self.end = unsafe { self.end.sub(1) };
276276

277277
Some(unsafe { ptr::read(self.end) })
278278
}
@@ -288,7 +288,7 @@ impl<T, A: Allocator> DoubleEndedIterator for IntoIter<T, A> {
288288
}
289289
} else {
290290
// SAFETY: same as for advance_by()
291-
self.end = unsafe { self.end.offset(step_size.wrapping_neg() as isize) };
291+
self.end = unsafe { self.end.sub(step_size) };
292292
}
293293
let to_drop = ptr::slice_from_raw_parts_mut(self.end as *mut T, step_size);
294294
// SAFETY: same as for advance_by()

alloc/src/vec/mod.rs

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1393,7 +1393,7 @@ impl<T, A: Allocator> Vec<T, A> {
13931393
if index < len {
13941394
// Shift everything over to make space. (Duplicating the
13951395
// `index`th element into two consecutive places.)
1396-
ptr::copy(p, p.offset(1), len - index);
1396+
ptr::copy(p, p.add(1), len - index);
13971397
} else if index == len {
13981398
// No elements need shifting.
13991399
} else {
@@ -1455,7 +1455,7 @@ impl<T, A: Allocator> Vec<T, A> {
14551455
ret = ptr::read(ptr);
14561456

14571457
// Shift everything down to fill in that spot.
1458-
ptr::copy(ptr.offset(1), ptr, len - index - 1);
1458+
ptr::copy(ptr.add(1), ptr, len - index - 1);
14591459
}
14601460
self.set_len(len - 1);
14611461
ret
@@ -2408,7 +2408,7 @@ impl<T, A: Allocator> Vec<T, A> {
24082408
// Write all elements except the last one
24092409
for _ in 1..n {
24102410
ptr::write(ptr, value.next());
2411-
ptr = ptr.offset(1);
2411+
ptr = ptr.add(1);
24122412
// Increment the length in every step in case next() panics
24132413
local_len.increment_len(1);
24142414
}

alloc/src/vec/spec_extend.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -39,7 +39,7 @@ where
3939
let mut local_len = SetLenOnDrop::new(&mut self.len);
4040
iterator.for_each(move |element| {
4141
ptr::write(ptr, element);
42-
ptr = ptr.offset(1);
42+
ptr = ptr.add(1);
4343
// Since the loop executes user code which can panic we have to bump the pointer
4444
// after each step.
4545
// NB can't overflow since we would have had to alloc the address space

alloc/tests/str.rs

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1010,11 +1010,11 @@ fn test_as_bytes_fail() {
10101010
fn test_as_ptr() {
10111011
let buf = "hello".as_ptr();
10121012
unsafe {
1013-
assert_eq!(*buf.offset(0), b'h');
1014-
assert_eq!(*buf.offset(1), b'e');
1015-
assert_eq!(*buf.offset(2), b'l');
1016-
assert_eq!(*buf.offset(3), b'l');
1017-
assert_eq!(*buf.offset(4), b'o');
1013+
assert_eq!(*buf.add(0), b'h');
1014+
assert_eq!(*buf.add(1), b'e');
1015+
assert_eq!(*buf.add(2), b'l');
1016+
assert_eq!(*buf.add(3), b'l');
1017+
assert_eq!(*buf.add(4), b'o');
10181018
}
10191019
}
10201020

core/src/slice/mod.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2924,7 +2924,7 @@ impl<T> [T] {
29242924
let prev_ptr_write = ptr.add(next_write - 1);
29252925
if !same_bucket(&mut *ptr_read, &mut *prev_ptr_write) {
29262926
if next_read != next_write {
2927-
let ptr_write = prev_ptr_write.offset(1);
2927+
let ptr_write = prev_ptr_write.add(1);
29282928
mem::swap(&mut *ptr_read, &mut *ptr_write);
29292929
}
29302930
next_write += 1;

core/src/slice/sort.rs

Lines changed: 18 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -326,8 +326,8 @@ where
326326
unsafe {
327327
// Branchless comparison.
328328
*end_l = i as u8;
329-
end_l = end_l.offset(!is_less(&*elem, pivot) as isize);
330-
elem = elem.offset(1);
329+
end_l = end_l.add(!is_less(&*elem, pivot) as usize);
330+
elem = elem.add(1);
331331
}
332332
}
333333
}
@@ -352,9 +352,9 @@ where
352352
// Plus, `block_r` was asserted to be less than `BLOCK` and `elem` will therefore at most be pointing to the beginning of the slice.
353353
unsafe {
354354
// Branchless comparison.
355-
elem = elem.offset(-1);
355+
elem = elem.sub(1);
356356
*end_r = i as u8;
357-
end_r = end_r.offset(is_less(&*elem, pivot) as isize);
357+
end_r = end_r.add(is_less(&*elem, pivot) as usize);
358358
}
359359
}
360360
}
@@ -365,12 +365,12 @@ where
365365
if count > 0 {
366366
macro_rules! left {
367367
() => {
368-
l.offset(*start_l as isize)
368+
l.add(*start_l as usize)
369369
};
370370
}
371371
macro_rules! right {
372372
() => {
373-
r.offset(-(*start_r as isize) - 1)
373+
r.sub((*start_r as usize) + 1)
374374
};
375375
}
376376

@@ -398,16 +398,16 @@ where
398398
ptr::copy_nonoverlapping(right!(), left!(), 1);
399399

400400
for _ in 1..count {
401-
start_l = start_l.offset(1);
401+
start_l = start_l.add(1);
402402
ptr::copy_nonoverlapping(left!(), right!(), 1);
403-
start_r = start_r.offset(1);
403+
start_r = start_r.add(1);
404404
ptr::copy_nonoverlapping(right!(), left!(), 1);
405405
}
406406

407407
ptr::copy_nonoverlapping(&tmp, right!(), 1);
408408
mem::forget(tmp);
409-
start_l = start_l.offset(1);
410-
start_r = start_r.offset(1);
409+
start_l = start_l.add(1);
410+
start_r = start_r.add(1);
411411
}
412412
}
413413

@@ -420,15 +420,15 @@ where
420420
// safe. Otherwise, the debug assertions in the `is_done` case guarantee that
421421
// `width(l, r) == block_l + block_r`, namely, that the block sizes have been adjusted to account
422422
// for the smaller number of remaining elements.
423-
l = unsafe { l.offset(block_l as isize) };
423+
l = unsafe { l.add(block_l) };
424424
}
425425

426426
if start_r == end_r {
427427
// All out-of-order elements in the right block were moved. Move to the previous block.
428428

429429
// SAFETY: Same argument as [block-width-guarantee]. Either this is a full block `2*BLOCK`-wide,
430430
// or `block_r` has been adjusted for the last handful of elements.
431-
r = unsafe { r.offset(-(block_r as isize)) };
431+
r = unsafe { r.sub(block_r) };
432432
}
433433

434434
if is_done {
@@ -457,9 +457,9 @@ where
457457
// - `offsets_l` contains valid offsets into `v` collected during the partitioning of
458458
// the last block, so the `l.offset` calls are valid.
459459
unsafe {
460-
end_l = end_l.offset(-1);
461-
ptr::swap(l.offset(*end_l as isize), r.offset(-1));
462-
r = r.offset(-1);
460+
end_l = end_l.sub(1);
461+
ptr::swap(l.add(*end_l as usize), r.sub(1));
462+
r = r.sub(1);
463463
}
464464
}
465465
width(v.as_mut_ptr(), r)
@@ -470,9 +470,9 @@ where
470470
while start_r < end_r {
471471
// SAFETY: See the reasoning in [remaining-elements-safety].
472472
unsafe {
473-
end_r = end_r.offset(-1);
474-
ptr::swap(l, r.offset(-(*end_r as isize) - 1));
475-
l = l.offset(1);
473+
end_r = end_r.sub(1);
474+
ptr::swap(l, r.sub((*end_r as usize) + 1));
475+
l = l.add(1);
476476
}
477477
}
478478
width(v.as_mut_ptr(), l)

0 commit comments

Comments
 (0)