|
14 | 14 | import static com.oracle.truffle.api.dsl.Cached.Exclusive;
|
15 | 15 | import static org.truffleruby.core.hash.library.HashStoreLibrary.EachEntryCallback;
|
16 | 16 |
|
17 |
| -import java.util.ArrayList; |
18 |
| -import java.util.HashMap; |
19 |
| -import java.util.List; |
20 |
| -import java.util.Map; |
21 | 17 | import java.util.Set;
|
22 | 18 |
|
23 | 19 | import org.truffleruby.RubyContext;
|
@@ -120,10 +116,6 @@ public final class CompactHashStore {
|
120 | 116 | static final int KEY_NOT_FOUND = -2;
|
121 | 117 | private static final int HASH_NOT_FOUND = KEY_NOT_FOUND;
|
122 | 118 |
|
123 |
| - // a generic "not a valid array position" value to be used by all code doing array searches for things other than |
124 |
| - // keys and hashes |
125 |
| - private static final int INVALID_ARRAY_POSITION = Integer.MIN_VALUE; |
126 |
| - |
127 | 119 | // In hash entries, not array positions (in general, capacities and sizes are always in entries)
|
128 | 120 | public static final int DEFAULT_INITIAL_CAPACITY = 8;
|
129 | 121 |
|
@@ -317,54 +309,25 @@ RubyArray shift(RubyHash hash,
|
317 | 309 | @TruffleBoundary
|
318 | 310 | @ExportMessage
|
319 | 311 | void rehash(RubyHash hash,
|
320 |
| - @Cached @Shared HashingNodes.ToHash hashFunction, |
321 |
| - @Cached CompareHashKeysNode.AssumingEqualHashes compareHashKeysNode, |
322 | 312 | @Cached @Exclusive InlinedConditionProfile slotUsed,
|
323 |
| - @Cached @Exclusive InlinedConditionProfile duplicateIndexEntry, |
324 |
| - @Cached @Exclusive InlinedLoopConditionProfile indexSlotUnavailable, |
325 | 313 | @Cached @Exclusive InlinedLoopConditionProfile loopProfile,
|
| 314 | + @CachedLibrary("this") HashStoreLibrary hashlib, |
326 | 315 | @Bind("$node") Node node) {
|
327 |
| - int[] oldIndex = index; |
328 |
| - this.index = new int[oldIndex.length]; |
329 |
| - Map<Integer, List<Integer>> hashesToKvPositions = new HashMap<>(); |
| 316 | + Object[] oldKvStore = kvStore; |
| 317 | + int oldKvStoreInsertionPos = kvStoreInsertionPos; |
| 318 | + |
| 319 | + this.kvStore = new Object[oldKvStore.length]; |
| 320 | + this.kvStoreInsertionPos = 0; |
| 321 | + this.index = new int[index.length]; |
| 322 | + hash.size = 0; |
330 | 323 |
|
331 | 324 | int i = 0;
|
332 | 325 | try {
|
333 |
| - int numDuplicates = 0; |
334 |
| - for (; loopProfile.inject(node, i < oldIndex.length); i += 2) { |
335 |
| - int kvOffset = oldIndex[i + 1]; |
336 |
| - |
337 |
| - if (slotUsed.profile(node, kvOffset > INDEX_SLOT_UNUSED)) { |
338 |
| - Object key = kvStore[kvOffset - 1]; |
339 |
| - Integer newHash = hashFunction.execute(key, hash.compareByIdentity); |
340 |
| - |
341 |
| - List<Integer> kvPositions = hashesToKvPositions.get(newHash); |
342 |
| - if (kvPositions != null) { |
343 |
| - boolean duplicate = false; |
344 |
| - for (int kvPos : kvPositions) { |
345 |
| - Object possiblyEqualKey = kvStore[kvPos]; |
346 |
| - if (compareHashKeysNode.execute(hash.compareByIdentity, key, possiblyEqualKey)) { |
347 |
| - duplicate = true; |
348 |
| - break; |
349 |
| - } |
350 |
| - } |
351 |
| - if (duplicateIndexEntry.profile(node, duplicate)) { |
352 |
| - numDuplicates++; |
353 |
| - kvStore[kvOffset - 1] = null; |
354 |
| - kvStore[kvOffset] = null; |
355 |
| - continue; |
356 |
| - } |
357 |
| - |
358 |
| - } else { |
359 |
| - hashesToKvPositions.put(newHash, new ArrayList<>()); |
360 |
| - } |
361 |
| - |
362 |
| - SetKvAtNode.insertIntoIndex(newHash, kvOffset, index, |
363 |
| - indexSlotUnavailable, node); |
364 |
| - hashesToKvPositions.get(newHash).add(kvOffset - 1); |
| 326 | + for (; loopProfile.inject(node, i < oldKvStoreInsertionPos); i += 2) { |
| 327 | + if (slotUsed.profile(node, oldKvStore[i] != null)) { |
| 328 | + hashlib.set(this, hash, oldKvStore[i], oldKvStore[i + 1], hash.compareByIdentity); |
365 | 329 | }
|
366 | 330 | }
|
367 |
| - hash.size -= numDuplicates; |
368 | 331 | } finally {
|
369 | 332 | RubyBaseNode.profileAndReportLoopCount(node, loopProfile, i >> 1);
|
370 | 333 | }
|
@@ -527,22 +490,22 @@ abstract static class GetHashNextPosInIndexNode extends RubyBaseNode {
|
527 | 490 |
|
528 | 491 | @Specialization
|
529 | 492 | int getHashNextPos(int startingFromPos, int hash, int[] index, int stop,
|
530 |
| - @Cached @Exclusive InlinedConditionProfile slotIsDeleted, |
| 493 | + @Cached @Exclusive InlinedConditionProfile slotIsNotDeleted, |
531 | 494 | @Cached @Exclusive InlinedConditionProfile slotIsUnused,
|
532 | 495 | @Cached @Exclusive InlinedConditionProfile hashFound,
|
533 |
| - @Cached @Exclusive InlinedConditionProfile noValidFirstDeletedSlot, |
534 | 496 | @Cached @Exclusive InlinedLoopConditionProfile stopNotYetReached,
|
535 | 497 | @Bind("$node") Node node) {
|
536 | 498 | int nextHashPos = startingFromPos;
|
| 499 | + |
537 | 500 | do {
|
538 | 501 | if (slotIsUnused.profile(node, index[nextHashPos + 1] == INDEX_SLOT_UNUSED)) {
|
539 | 502 | return HASH_NOT_FOUND;
|
540 | 503 | }
|
541 | 504 |
|
542 |
| - if (slotIsDeleted.profile(node, index[nextHashPos + 1] == INDEX_SLOT_DELETED)) { |
543 |
| - // next |
544 |
| - } else if (hashFound.profile(node, index[nextHashPos] == hash)) { |
545 |
| - return nextHashPos; |
| 505 | + if (slotIsNotDeleted.profile(node, index[nextHashPos + 1] != INDEX_SLOT_DELETED)) { |
| 506 | + if (hashFound.profile(node, index[nextHashPos] == hash)) { |
| 507 | + return nextHashPos; |
| 508 | + } |
546 | 509 | }
|
547 | 510 |
|
548 | 511 | nextHashPos = incrementIndexPos(nextHashPos, index.length);
|
@@ -595,7 +558,7 @@ boolean keyAlreadyExistsWithDifferentValue(
|
595 | 558 | return false;
|
596 | 559 | }
|
597 | 560 |
|
598 |
| - // setting the key is a relatively expensive insertion |
| 561 | + // setting a new key is a relatively expensive insertion |
599 | 562 | @Specialization(guards = "kvPos == KEY_NOT_FOUND")
|
600 | 563 | static boolean keyDoesntExist(
|
601 | 564 | RubyHash hash, CompactHashStore store, int kvPos, int keyHash, Object frozenKey, Object value,
|
|
0 commit comments