Skip to content

Commit 381dd5d

Browse files
committed
Auto merge of #558 - waywardmonkeys:reduce-typo-count, r=cuviper
Reduce typo count. This renames 2 identifiers, one in benches, one in Rayon support. Neither are public.
2 parents 0011336 + a00bb9f commit 381dd5d

File tree

4 files changed

+54
-54
lines changed

4 files changed

+54
-54
lines changed

CHANGELOG.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -68,7 +68,7 @@ This release was _yanked_ due to a breaking change.
6868
- Optimized implementation for ARM using NEON instructions. (#430)
6969
- Support for rkyv serialization. (#432)
7070
- `Equivalent` trait to look up values without `Borrow`. (#345)
71-
- `Hash{Map,Set}::raw_table_mut` is added whic returns a mutable reference. (#404)
71+
- `Hash{Map,Set}::raw_table_mut` is added which returns a mutable reference. (#404)
7272
- Fast path for `clear` on empty tables. (#428)
7373

7474
### Changed

benches/set_ops.rs

Lines changed: 21 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,7 @@ const LARGE_SET_SIZE: usize = 1000;
1919
const SMALL_SET_SIZE: usize = 100;
2020

2121
/// The number of keys present in both sets.
22-
const OVERLAPP: usize =
22+
const OVERLAP: usize =
2323
[LARGE_SET_SIZE, SMALL_SET_SIZE][(LARGE_SET_SIZE < SMALL_SET_SIZE) as usize] / 2;
2424

2525
/// Creates a set containing end - start unique string elements.
@@ -31,8 +31,8 @@ fn create_set(start: usize, end: usize) -> HashSet<String> {
3131
fn set_ops_bit_or(b: &mut Bencher) {
3232
let large_set = create_set(0, LARGE_SET_SIZE);
3333
let small_set = create_set(
34-
LARGE_SET_SIZE - OVERLAPP,
35-
LARGE_SET_SIZE + SMALL_SET_SIZE - OVERLAPP,
34+
LARGE_SET_SIZE - OVERLAP,
35+
LARGE_SET_SIZE + SMALL_SET_SIZE - OVERLAP,
3636
);
3737
b.iter(|| &large_set | &small_set)
3838
}
@@ -41,8 +41,8 @@ fn set_ops_bit_or(b: &mut Bencher) {
4141
fn set_ops_bit_and(b: &mut Bencher) {
4242
let large_set = create_set(0, LARGE_SET_SIZE);
4343
let small_set = create_set(
44-
LARGE_SET_SIZE - OVERLAPP,
45-
LARGE_SET_SIZE + SMALL_SET_SIZE - OVERLAPP,
44+
LARGE_SET_SIZE - OVERLAP,
45+
LARGE_SET_SIZE + SMALL_SET_SIZE - OVERLAP,
4646
);
4747
b.iter(|| &large_set & &small_set)
4848
}
@@ -51,8 +51,8 @@ fn set_ops_bit_and(b: &mut Bencher) {
5151
fn set_ops_bit_xor(b: &mut Bencher) {
5252
let large_set = create_set(0, LARGE_SET_SIZE);
5353
let small_set = create_set(
54-
LARGE_SET_SIZE - OVERLAPP,
55-
LARGE_SET_SIZE + SMALL_SET_SIZE - OVERLAPP,
54+
LARGE_SET_SIZE - OVERLAP,
55+
LARGE_SET_SIZE + SMALL_SET_SIZE - OVERLAP,
5656
);
5757
b.iter(|| &large_set ^ &small_set)
5858
}
@@ -61,8 +61,8 @@ fn set_ops_bit_xor(b: &mut Bencher) {
6161
fn set_ops_sub_large_small(b: &mut Bencher) {
6262
let large_set = create_set(0, LARGE_SET_SIZE);
6363
let small_set = create_set(
64-
LARGE_SET_SIZE - OVERLAPP,
65-
LARGE_SET_SIZE + SMALL_SET_SIZE - OVERLAPP,
64+
LARGE_SET_SIZE - OVERLAP,
65+
LARGE_SET_SIZE + SMALL_SET_SIZE - OVERLAP,
6666
);
6767
b.iter(|| &large_set - &small_set)
6868
}
@@ -71,8 +71,8 @@ fn set_ops_sub_large_small(b: &mut Bencher) {
7171
fn set_ops_sub_small_large(b: &mut Bencher) {
7272
let large_set = create_set(0, LARGE_SET_SIZE);
7373
let small_set = create_set(
74-
LARGE_SET_SIZE - OVERLAPP,
75-
LARGE_SET_SIZE + SMALL_SET_SIZE - OVERLAPP,
74+
LARGE_SET_SIZE - OVERLAP,
75+
LARGE_SET_SIZE + SMALL_SET_SIZE - OVERLAP,
7676
);
7777
b.iter(|| &small_set - &large_set)
7878
}
@@ -81,8 +81,8 @@ fn set_ops_sub_small_large(b: &mut Bencher) {
8181
fn set_ops_bit_or_assign(b: &mut Bencher) {
8282
let large_set = create_set(0, LARGE_SET_SIZE);
8383
let small_set = create_set(
84-
LARGE_SET_SIZE - OVERLAPP,
85-
LARGE_SET_SIZE + SMALL_SET_SIZE - OVERLAPP,
84+
LARGE_SET_SIZE - OVERLAP,
85+
LARGE_SET_SIZE + SMALL_SET_SIZE - OVERLAP,
8686
);
8787
b.iter(|| {
8888
let mut set = large_set.clone();
@@ -95,8 +95,8 @@ fn set_ops_bit_or_assign(b: &mut Bencher) {
9595
fn set_ops_bit_and_assign(b: &mut Bencher) {
9696
let large_set = create_set(0, LARGE_SET_SIZE);
9797
let small_set = create_set(
98-
LARGE_SET_SIZE - OVERLAPP,
99-
LARGE_SET_SIZE + SMALL_SET_SIZE - OVERLAPP,
98+
LARGE_SET_SIZE - OVERLAP,
99+
LARGE_SET_SIZE + SMALL_SET_SIZE - OVERLAP,
100100
);
101101
b.iter(|| {
102102
let mut set = small_set.clone();
@@ -109,8 +109,8 @@ fn set_ops_bit_and_assign(b: &mut Bencher) {
109109
fn set_ops_bit_xor_assign(b: &mut Bencher) {
110110
let large_set = create_set(0, LARGE_SET_SIZE);
111111
let small_set = create_set(
112-
LARGE_SET_SIZE - OVERLAPP,
113-
LARGE_SET_SIZE + SMALL_SET_SIZE - OVERLAPP,
112+
LARGE_SET_SIZE - OVERLAP,
113+
LARGE_SET_SIZE + SMALL_SET_SIZE - OVERLAP,
114114
);
115115
b.iter(|| {
116116
let mut set = large_set.clone();
@@ -123,8 +123,8 @@ fn set_ops_bit_xor_assign(b: &mut Bencher) {
123123
fn set_ops_sub_assign_large_small(b: &mut Bencher) {
124124
let large_set = create_set(0, LARGE_SET_SIZE);
125125
let small_set = create_set(
126-
LARGE_SET_SIZE - OVERLAPP,
127-
LARGE_SET_SIZE + SMALL_SET_SIZE - OVERLAPP,
126+
LARGE_SET_SIZE - OVERLAP,
127+
LARGE_SET_SIZE + SMALL_SET_SIZE - OVERLAP,
128128
);
129129
b.iter(|| {
130130
let mut set = large_set.clone();
@@ -137,8 +137,8 @@ fn set_ops_sub_assign_large_small(b: &mut Bencher) {
137137
fn set_ops_sub_assign_small_large(b: &mut Bencher) {
138138
let large_set = create_set(0, LARGE_SET_SIZE);
139139
let small_set = create_set(
140-
LARGE_SET_SIZE - OVERLAPP,
141-
LARGE_SET_SIZE + SMALL_SET_SIZE - OVERLAPP,
140+
LARGE_SET_SIZE - OVERLAP,
141+
LARGE_SET_SIZE + SMALL_SET_SIZE - OVERLAP,
142142
);
143143
b.iter(|| {
144144
let mut set = small_set.clone();

src/external_trait_impls/rayon/map.rs

Lines changed: 14 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -472,32 +472,32 @@ mod test_par_map {
472472

473473
use crate::hash_map::HashMap;
474474

475-
struct Dropable<'a> {
475+
struct Droppable<'a> {
476476
k: usize,
477477
counter: &'a AtomicUsize,
478478
}
479479

480-
impl Dropable<'_> {
481-
fn new(k: usize, counter: &AtomicUsize) -> Dropable<'_> {
480+
impl Droppable<'_> {
481+
fn new(k: usize, counter: &AtomicUsize) -> Droppable<'_> {
482482
counter.fetch_add(1, Ordering::Relaxed);
483483

484-
Dropable { k, counter }
484+
Droppable { k, counter }
485485
}
486486
}
487487

488-
impl Drop for Dropable<'_> {
488+
impl Drop for Droppable<'_> {
489489
fn drop(&mut self) {
490490
self.counter.fetch_sub(1, Ordering::Relaxed);
491491
}
492492
}
493493

494-
impl Clone for Dropable<'_> {
494+
impl Clone for Droppable<'_> {
495495
fn clone(&self) -> Self {
496-
Dropable::new(self.k, self.counter)
496+
Droppable::new(self.k, self.counter)
497497
}
498498
}
499499

500-
impl Hash for Dropable<'_> {
500+
impl Hash for Droppable<'_> {
501501
fn hash<H>(&self, state: &mut H)
502502
where
503503
H: Hasher,
@@ -506,13 +506,13 @@ mod test_par_map {
506506
}
507507
}
508508

509-
impl PartialEq for Dropable<'_> {
509+
impl PartialEq for Droppable<'_> {
510510
fn eq(&self, other: &Self) -> bool {
511511
self.k == other.k
512512
}
513513
}
514514

515-
impl Eq for Dropable<'_> {}
515+
impl Eq for Droppable<'_> {}
516516

517517
#[test]
518518
fn test_into_iter_drops() {
@@ -526,8 +526,8 @@ mod test_par_map {
526526
assert_eq!(value.load(Ordering::Relaxed), 0);
527527

528528
for i in 0..100 {
529-
let d1 = Dropable::new(i, &key);
530-
let d2 = Dropable::new(i + 100, &value);
529+
let d1 = Droppable::new(i, &key);
530+
let d2 = Droppable::new(i + 100, &value);
531531
hm.insert(d1, d2);
532532
}
533533

@@ -573,8 +573,8 @@ mod test_par_map {
573573
assert_eq!(value.load(Ordering::Relaxed), 0);
574574

575575
for i in 0..100 {
576-
let d1 = Dropable::new(i, &key);
577-
let d2 = Dropable::new(i + 100, &value);
576+
let d1 = Droppable::new(i, &key);
577+
let d2 = Droppable::new(i + 100, &value);
578578
hm.insert(d1, d2);
579579
}
580580

src/raw/mod.rs

Lines changed: 18 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -135,7 +135,7 @@ fn h1(hash: u64) -> usize {
135135
hash as usize
136136
}
137137

138-
// Constant for h2 function that grabing the top 7 bits of the hash.
138+
// Constant for h2 function that grabs the top 7 bits of the hash.
139139
const MIN_HASH_LEN: usize = if mem::size_of::<usize>() < mem::size_of::<u64>() {
140140
mem::size_of::<usize>()
141141
} else {
@@ -433,7 +433,7 @@ impl<T> Bucket<T> {
433433
// mem::size_of::<T>()
434434
// |
435435
// | `self = from_base_index(base, 5)` that returns pointer
436-
// | that points here in tha data part of the table
436+
// | that points here in the data part of the table
437437
// | (to the end of T5)
438438
// | | `base: NonNull<T>` must point here
439439
// v | (to the end of T0 or to the start of C0)
@@ -504,15 +504,15 @@ impl<T> Bucket<T> {
504504
///
505505
/// * `self` contained pointer must not be `dangling`;
506506
///
507-
/// * `self.to_base_index() + ofset` must not be greater than `RawTableInner.bucket_mask`,
508-
/// i.e. `(self.to_base_index() + ofset) <= RawTableInner.bucket_mask` or, in other
509-
/// words, `self.to_base_index() + ofset + 1` must be no greater than the number returned
507+
/// * `self.to_base_index() + offset` must not be greater than `RawTableInner.bucket_mask`,
508+
/// i.e. `(self.to_base_index() + offset) <= RawTableInner.bucket_mask` or, in other
509+
/// words, `self.to_base_index() + offset + 1` must be no greater than the number returned
510510
/// by the function [`RawTable::buckets`] or [`RawTableInner::buckets`].
511511
///
512512
/// If `mem::size_of::<T>() == 0`, then the only requirement is that the
513-
/// `self.to_base_index() + ofset` must not be greater than `RawTableInner.bucket_mask`,
514-
/// i.e. `(self.to_base_index() + ofset) <= RawTableInner.bucket_mask` or, in other words,
515-
/// `self.to_base_index() + ofset + 1` must be no greater than the number returned by the
513+
/// `self.to_base_index() + offset` must not be greater than `RawTableInner.bucket_mask`,
514+
/// i.e. `(self.to_base_index() + offset) <= RawTableInner.bucket_mask` or, in other words,
515+
/// `self.to_base_index() + offset + 1` must be no greater than the number returned by the
516516
/// function [`RawTable::buckets`] or [`RawTableInner::buckets`].
517517
///
518518
/// [`Bucket`]: crate::raw::Bucket
@@ -562,7 +562,7 @@ impl<T> Bucket<T> {
562562
///
563563
/// You should use [`RawTable::remove`] instead of this function,
564564
/// or be careful with calling this function directly, because compiler
565-
/// calls its destructor when readed `value` goes out of scope. It
565+
/// calls its destructor when the read `value` goes out of scope. It
566566
/// can cause double dropping when [`RawTable`] goes out of scope,
567567
/// because of not erased `data control byte`.
568568
///
@@ -1736,8 +1736,8 @@ impl RawTableInner {
17361736
// * Caller of this function ensures that the control bytes are properly initialized.
17371737
//
17381738
// * `ProbeSeq.pos` cannot be greater than `self.bucket_mask = self.buckets() - 1`
1739-
// of the table due to masking with `self.bucket_mask` and also because mumber of
1740-
// buckets is a power of two (see `self.probe_seq` function).
1739+
// of the table due to masking with `self.bucket_mask` and also because the number
1740+
// of buckets is a power of two (see `self.probe_seq` function).
17411741
//
17421742
// * Even if `ProbeSeq.pos` returns `position == self.bucket_mask`, it is safe to
17431743
// call `Group::load` due to the extended control bytes range, which is
@@ -1788,7 +1788,7 @@ impl RawTableInner {
17881788
///
17891789
/// This function does not check if the given element exists in the table. Also,
17901790
/// this function does not check if there is enough space in the table to insert
1791-
/// a new element. Caller of the funtion must make ensure that the table has at
1791+
/// a new element. The caller of the function must make sure that the table has at
17921792
/// least 1 empty or deleted `bucket`, otherwise this function will never return
17931793
/// (will go into an infinite loop) for tables larger than the group width, or
17941794
/// return an index outside of the table indices range if the table is less than
@@ -1885,8 +1885,8 @@ impl RawTableInner {
18851885
// * Caller of this function ensures that the control bytes are properly initialized.
18861886
//
18871887
// * `ProbeSeq.pos` cannot be greater than `self.bucket_mask = self.buckets() - 1`
1888-
// of the table due to masking with `self.bucket_mask` and also because mumber of
1889-
// buckets is a power of two (see `self.probe_seq` function).
1888+
// of the table due to masking with `self.bucket_mask` and also because the number
1889+
// of buckets is a power of two (see `self.probe_seq` function).
18901890
//
18911891
// * Even if `ProbeSeq.pos` returns `position == self.bucket_mask`, it is safe to
18921892
// call `Group::load` due to the extended control bytes range, which is
@@ -3171,7 +3171,7 @@ impl<T: Clone, A: Allocator + Clone> Clone for RawTable<T, A> {
31713171
// Avoid `Result::ok_or_else` because it bloats LLVM IR.
31723172
//
31733173
// SAFETY: This is safe as we are taking the size of an already allocated table
3174-
// and therefore сapacity overflow cannot occur, `self.table.buckets()` is power
3174+
// and therefore capacity overflow cannot occur, `self.table.buckets()` is power
31753175
// of two and all allocator errors will be caught inside `RawTableInner::new_uninitialized`.
31763176
let mut new_table = match Self::new_uninitialized(
31773177
self.alloc.clone(),
@@ -3185,11 +3185,11 @@ impl<T: Clone, A: Allocator + Clone> Clone for RawTable<T, A> {
31853185
// Cloning elements may fail (the clone function may panic). But we don't
31863186
// need to worry about uninitialized control bits, since:
31873187
// 1. The number of items (elements) in the table is zero, which means that
3188-
// the control bits will not be readed by Drop function.
3188+
// the control bits will not be read by Drop function.
31893189
// 2. The `clone_from_spec` method will first copy all control bits from
31903190
// `self` (thus initializing them). But this will not affect the `Drop`
31913191
// function, since the `clone_from_spec` function sets `items` only after
3192-
// successfully clonning all elements.
3192+
// successfully cloning all elements.
31933193
new_table.clone_from_spec(self);
31943194
new_table
31953195
}
@@ -3587,7 +3587,7 @@ impl<T> RawIterRange<T> {
35873587
// start of the array of control bytes, and never try to iterate after
35883588
// getting all the elements, the last `self.current_group` will read bytes
35893589
// from the `self.buckets() - Group::WIDTH` index. We know also that
3590-
// `self.current_group.next()` will always retun indices within the range
3590+
// `self.current_group.next()` will always return indices within the range
35913591
// `0..Group::WIDTH`.
35923592
//
35933593
// Knowing all of the above and taking into account that we are synchronizing

0 commit comments

Comments
 (0)