@@ -1523,43 +1523,92 @@ impl<T, A: Allocator> Vec<T, A> {
1523
1523
return ;
1524
1524
}
1525
1525
1526
- let ptr = self . as_mut_ptr ( ) ;
1527
- /* Offset of the element we want to check if it is duplicate */
1528
- let mut read: usize = 1 ;
1529
- /* Offset of the place where we want to place the non-duplicate
1530
- * when we find it. */
1531
- let mut write: usize = 1 ;
1526
+ /* INVARIANT: vec.len() > read >= write > write-1 >= 0 */
1527
+ struct FillGapOnDrop < ' a , T , A : core:: alloc:: Allocator > {
1528
+ /* Offset of the element we want to check if it is duplicate */
1529
+ read : usize ,
1530
+
1531
+ /* Offset of the place where we want to place the non-duplicate
1532
+ * when we find it. */
1533
+ write : usize ,
1534
+
1535
+ /* The Vec that would need correction if `same_bucket` panicked */
1536
+ vec : & ' a mut Vec < T , A > ,
1537
+ }
1538
+
1539
+ impl < ' a , T , A : core:: alloc:: Allocator > Drop for FillGapOnDrop < ' a , T , A > {
1540
+ fn drop ( & mut self ) {
1541
+ /* This code gets executed either at the end of `dedup_by` or
1542
+ * when `same_bucket` panics */
1543
+
1544
+ /* SAFETY (if finishing successfully): self.read == len, so
1545
+ * no data is copied and length is set correctly */
1546
+
1547
+ /* SAFETY (if panicing): invariant guarantees that `read - write`
1548
+ * and `len - read` never overflow and that the copy is always
1549
+ * in-bounds. */
1550
+ unsafe {
1551
+ let ptr = self . vec . as_mut_ptr ( ) ;
1552
+ let len = self . vec . len ( ) ;
1553
+
1554
+ /* How many items were left when `same_bucket` paniced.
1555
+ * Basically vec[read..].len() */
1556
+ let items_left = len - self . read ;
1557
+
1558
+ /* Pointer to first item in vec[write..write+items_left] slice */
1559
+ let dropped_ptr = ptr. add ( self . write ) ;
1560
+ /* Pointer to first item in vec[read..] slice */
1561
+ let valid_ptr = ptr. add ( self . read ) ;
1562
+
1563
+ /* Copy `vec[read..]` to `vec[write..write+items_left]`.
1564
+ * The slices can overlap, so `copy_nonoverlapping` cannot be used */
1565
+ ptr:: copy ( valid_ptr, dropped_ptr, items_left) ;
1566
+
1567
+ /* How many items have been already dropped
1568
+ * Basically vec[read..write].len() */
1569
+ let dropped = self . read - self . write ;
1570
+
1571
+ self . vec . set_len ( len - dropped) ;
1572
+ }
1573
+ }
1574
+ }
1575
+
1576
+ let mut gap = FillGapOnDrop { read : 1 , write : 1 , vec : self } ;
1577
+
1578
+ let ptr = gap. vec . as_mut_ptr ( ) ;
1532
1579
1533
1580
/* Drop items while going through Vec, it should be more efficient than
1534
1581
* doing slice partition_dedup + truncate */
1535
1582
1536
- /* INVARIANT: len > read >= write > write-1 >= 0
1537
- * SAFETY: Because of the invariant, read_ptr, prev_ptr and write_ptr
1583
+ /* SAFETY: Because of the invariant, read_ptr, prev_ptr and write_ptr
1538
1584
* are always in-bounds and read_ptr never aliases prev_ptr */
1539
1585
unsafe {
1540
- while read < len {
1541
- let read_ptr = ptr. add ( read) ;
1542
- let prev_ptr = ptr. add ( write. wrapping_sub ( 1 ) ) ;
1586
+ while gap . read < len {
1587
+ let read_ptr = ptr. add ( gap . read ) ;
1588
+ let prev_ptr = ptr. add ( gap . write . wrapping_sub ( 1 ) ) ;
1543
1589
1544
1590
if same_bucket ( & mut * read_ptr, & mut * prev_ptr) {
1545
1591
/* We have found duplicate, drop it in-place */
1546
1592
ptr:: drop_in_place ( read_ptr) ;
1547
1593
} else {
1548
- let write_ptr = ptr. add ( write) ;
1594
+ let write_ptr = ptr. add ( gap . write ) ;
1549
1595
1550
1596
/* Looks like doing just `copy` can be faster than
1551
1597
* conditional `copy_nonoverlapping` */
1552
1598
ptr:: copy ( read_ptr, write_ptr, 1 ) ;
1553
1599
1554
1600
/* We have filled that place, so go further */
1555
- write += 1 ;
1601
+ gap . write += 1 ;
1556
1602
}
1557
1603
1558
- read += 1 ;
1604
+ gap . read += 1 ;
1559
1605
}
1560
1606
1561
- /* `write` items are inside vec, rest is already dropped */
1562
- self . set_len ( write) ;
1607
+ /* Technically we could let `gap` clean up with its Drop, but
1608
+ * when `same_bucket` is guaranteed to not panic, this bloats a little
1609
+ * the codegen, so we just do it manually */
1610
+ gap. vec . set_len ( gap. write ) ;
1611
+ mem:: forget ( gap) ;
1563
1612
}
1564
1613
}
1565
1614
0 commit comments