1 // Written in the D programming language.
2 
3 module swisstable.table;
4 
5 import swisstable.group;
6 import swisstable.hash;
7 
8 /**
9 Swiss tables internal data structure and functions
10 
11 This mixin template is used in container struct.
12 
13 Params:
14  Container = Container type which uses this template
15  KeyType = Key type for hash.
16  SlotType = Slot type, e.g. Tuple in Map
17  Allocator = Allocator type for internal allocation. Default is "shared const Mallocator"
18  CallGCRange = If true, call GC's addRange/removeRange for stored data
19 
20 NOTE: This table based containers don't guarantee pointer stability. Range and returned pointers are invalidated after insert/rehash.
21 
22 Copyright: Copyright 2024- Masahiro Nakagawa
23 Authros:   Masahiro Nakagawa
24 License:   $(HTTP www.apache.org/licenses/LICENSE-2.0, Apache License Version 2.0)
25 Credits:   Internal design and logic are based on abseil's hash map/set. See also
26            $(HTTP abseil.io/about/design/swisstables, Swiss Tables Design Notes) and $(HTTP github.com/abseil/abseil-cpp/blob/master/absl/container/internal/raw_hash_set.h, abseil's source)
27  */
28 mixin template Table(Container, KeyType, SlotType, Allocator, alias CallGCRange)
29 {
30     import core.memory : GC;
31     import std.algorithm.comparison : max;
32     import std.experimental.allocator.common : stateSize;
33 
34     enum HeapAlignment = max(GrowthInfo.alignof, SlotType.alignof);
35 
36   private:
37     /*
38      * Internal table is consists of growth info, control bytes and actual data slots.
39      * Container allocates one large table to store these information.
40      *
41      * Here is a layout of the table:
42      * {
43      *     // The available slots before growing the capacity. Managed by GrowthInfo object.
44      *     size_t growthInfo;
45      *     // The control bytes corresponding to actual data `slots`.
46      *     // `_control` member variable points to the first address of this member. 
47      *     Control[capacity] controls;
48      *     // Sentinel for control bytes. Range / iterate operations use this marker as a stopper
49      *     // TODO: We can remove this sentinel by accurate offset control.
50      *     Control Control.Sentinel;
51      *     // Copy of 'Group.Width - 1' bytes of above `controls`.
52      *     // Group can safely scan control bytes by this member.
53      *     Control[Group.Width - 1] clonedControls
54      *     // Data slots to store actual key-value pairs.
55      *     // `_slots` member variable is same as this member.
56      *     SlotType[capacity] slots;
57      * }
58      */
59     size_t _capacity, _size;
60     Control* _control = emptyGroup();
61     SlotType[] _slots;
62     Allocator _allocator;
63     size_t _hashSeed = RapidSeed; // Use hahser object for various hash support?
64 
65     invariant
66     {
67         assert(_capacity == 0 || isValidCapacity(_capacity));
68     }
69 
70   public @trusted:
71     static if (stateSize!Allocator > 0) {
72         // If Allocator has an internal state, Container must be initialized with an allocator object.
73         @disable this();
74 
75         /// Constructor with the Allocator.
76         this(Allocator allocator) nothrow
77         {
78             _allocator = allocator;
79         }
80 
81         /// Constructor taking an initial capacity and the Allocator. Given capacity is automatically normalized to proper value.
82         this(Allocator allocator, size_t capacity) nothrow
83         {
84             _allocator = allocator;
85             resize(normalizeCapacity(capacity));
86         }
87     } else {
88         /// Constructor taking an initial capacity. Given capacity is automatically normalized to proper value.
89         this(size_t capacity) nothrow
90         {
91             resize(normalizeCapacity(capacity));
92         }
93     }
94 
95     ~this()
96     {
97         clear(false);
98     }
99 
100     /// Returns: the number of stored slots in the container.
101     @property size_t length() const nothrow { return _size; }
102 
103     /// Returns: the number of current capacity.
104     @property size_t capacity() const nothrow { return _capacity; }
105 
106     /// Setter for hash seed.
107     @property void hashSeed(size_t seed)
108     {
109         import std.exception : enforce;
110 
111         enforce(_size == 0, "Can't set new hash seed for non-empty table");
112         _hashSeed = seed;
113     }
114 
115     /**
116      * Returns: a new Container of the same size and copy the contents of Container into it.
117      */
118     Container dup() const nothrow
119     {
120         import core.stdc.string : memcpy;
121 
122         static if (stateSize!Allocator)
123             Container result = Container(_allocator, _capacity);
124         else
125             Container result = Container(_capacity);
126         auto layout = TableLayout(_capacity, HeapAlignment);
127 
128         result._size = _size;
129         result._hashSeed = _hashSeed;
130         memcpy(result._control - layout.controlOffset, _control - layout.controlOffset,
131                layout.allocSize(SlotType.sizeof));
132 
133         return result;
134     }
135 
136     /**
137      * Reorganizes the container in place.
138      *
139      * Params:
140      *  cap = new capacity for rehash. Callers can force rehash/resize by specifying 0.
141      */
142     void rehash(size_t cap = 0) nothrow
143     {
144         if (cap == 0) {
145             if (_capacity == 0 || _size == 0)
146                 return;
147         }
148 
149         auto newCap = normalizeCapacity(max(cap, growthToLowerboundCapacity(_size)));
150         if (cap == 0 || newCap > _capacity)
151             resize(newCap);
152     }
153 
154     /**
155      * Removes all remaining slots from a container.
156      *
157      * Params:
158      *  reuse = If true, keep allocated memory and reset metadata. Otherwise all resources are disposed.
159      */
160     void clear(bool reuse = true) nothrow
161     {
162         import std.experimental.allocator : dispose;
163 
164         if (_capacity > 0) {
165             destroySlots();
166 
167             if (reuse) {
168                 resetCtrl(_control, _capacity);
169                 _size = 0;
170             } else {
171                 auto layout = TableLayout(_capacity, HeapAlignment);
172                 auto allocSize = layout.allocSize(SlotType.sizeof);
173                 auto data = cast(ubyte[])(_control - layout.controlOffset)[0..allocSize];
174 
175                 static if (CallGCRange)
176                     GC.removeRange(data.ptr);
177                 _allocator.dispose(data);
178 
179                 _size = _capacity = 0;
180                 _control = emptyGroup();
181                 _slots = null;
182             }
183         }
184     }
185 
186   private @trusted:
187     // Actual collect code for `keys()` and `values()`
188     auto funcKeysValuesBody(Elem, string putBody)() const nothrow
189     {
190         // import std.algorithm.mutation : move; This move does't support void[0]
191         import core.lifetime : move;
192         import std.array : appender;
193 
194         auto ctrl = cast(Control*)_control;
195         auto slot = cast(SlotType*)_slots.ptr;
196         auto res = appender!(Elem[])();
197 
198         skipEmptyOrTombstoneSlots(ctrl, slot);
199         while (*ctrl != Control.Sentinel) {
200             res.put(move(mixin(putBody)));
201             ++ctrl;
202             ++slot;
203             skipEmptyOrTombstoneSlots(ctrl, slot);
204         }
205 
206         return res.data;
207     }
208 
209     // Called by Range and iterate functions
210     static void skipEmptyOrTombstoneSlots(ref Control* ctrl, ref SlotType* slot) nothrow
211     {
212         while ((*ctrl).isEmptyOrTombstone) {
213             uint shift = Group(ctrl).countLeadingEmptyOrTombstone();
214             ctrl += shift;
215             slot += shift;
216         }
217     }
218 
219     /// Helper class for calculating table layout and offset
220     static struct TableLayout
221     {
222       private:
223         size_t _capacity, _slotOffset;
224 
225       public nothrow @safe:
226         this(size_t capacity, size_t slotAlign)
227         {
228             _capacity = capacity;
229             _slotOffset = (controlOffset() + numControlBytes(capacity) + slotAlign - 1) & (~slotAlign + 1);
230         }
231 
232         @property size_t capacity() const { return _capacity; }
233         @property size_t slotOffset() const { return _slotOffset; }
234         @property size_t controlOffset() const { return GrowthInfo.sizeof; }
235         @property size_t allocSize(size_t slotSize) const { return _slotOffset + (_capacity * slotSize); }
236     }
237 
238     unittest
239     {
240         auto layout = TableLayout(7, 8);
241         assert(layout.capacity == 7);
242         assert(layout.controlOffset == GrowthInfo.sizeof);
243         // GroupSSE2 and GroupPortable have diffrent width
244         version (D_SIMD)
245         {
246             assert(layout.slotOffset == 32);
247             assert(layout.allocSize(16) == 144);
248         }
249         else
250         {
251             assert(layout.slotOffset == 24);
252             assert(layout.allocSize(16) == 136);
253         }
254     }
255 
256     /**
257      * Destroy existing slots for cleanup resources.
258      */
259     void destroySlots() nothrow
260     {
261         // TODO : Skip when K and V don't have destructors
262         auto ctrl = _control;
263         auto slots = _slots.ptr;
264 
265         if (_capacity < (Group.Width - 1)) {  // Small table case
266             auto m = GroupPortable(ctrl + _capacity).maskFull();  // use GroupPortable for small table
267             --slots;
268             foreach (i; m)
269                 destroy(slots[i]);
270 
271             return;
272         } else {
273             auto remaining = _size;
274             while (remaining != 0) {
275                 foreach (uint i; Group(ctrl).maskFull()) {
276                     destroy(slots[i]);
277                     --remaining;
278                 }
279                 ctrl += Group.Width;
280                 slots += Group.Width;
281             }
282         }
283     }
284 
285     // helper for remove function
286     static bool wasNeverFull(const Control* ctrl, size_t cap, size_t index) nothrow
287     in
288     {
289         assert(isValidCapacity(cap));
290     }
291     do
292     {
293         const size_t indexBefore = (index - Group.Width) & cap;
294         const emptyAfter = Group(ctrl + index).maskEmpty();
295         const emptyBefore = Group(ctrl + indexBefore).maskEmpty();
296 
297         return emptyBefore && emptyAfter &&
298             (emptyAfter.trailingZeros() + emptyBefore.leadingZeros()) < Group.Width;
299     }
300 
301     /**
302      * Attempt to find key.
303      *
304      * Params:
305      *  key = key to search for
306      *  found = indicate whether key is found or not
307      *
308      * Returns: slot index
309      */
310     size_t find(const ref KeyType key, out bool found) const
311     {
312         auto hash = rapidhashOf(key, _hashSeed);
313         auto seq = probe(_capacity, hash);
314 
315         found = false;
316         while (true) {
317             auto g = Group(_control + seq.offset);
318             foreach (i; g.match(calcH2(hash))) {
319                 auto offset = seq.offset(i);
320                 if (mixin(CompCondition)) {  // why Object.opEquals is not 'nothrow'...
321                     found = true;
322                     return offset;
323                 }
324             }
325             if (g.maskEmpty())
326                 return 0;
327 
328             seq.next();
329         }
330 
331         assert(0);
332     }
333 
334     /**
335      * Attempt to find key. If key not found, prepare new slot and update control bytes.
336      *
337      * Params:
338      *  key = key to search for
339      *  found = indicate whether key is found or not
340      *
341      * Returns: insertable/updatable slot index
342      */
343     size_t findOrPrepareInsert(const ref KeyType key, out bool found)
344     {
345         auto hash = rapidhashOf(key, _hashSeed);
346         auto seq = probe(_capacity, hash);
347 
348         found = false;
349         while (true) {
350             auto g = Group(_control + seq.offset);
351             foreach (i; g.match(calcH2(hash))) {
352                 auto offset = seq.offset(i);
353                 if (mixin(CompCondition)) {
354                     found = true;
355                     return offset;
356                 }
357             }
358             auto maskEmpty = g.maskEmpty();
359             if (maskEmpty) {
360                 auto target = seq.offset(maskEmpty.lowestBitSet());
361                 return prepareInsert(hash, target);
362             }
363 
364             seq.next();
365         }
366 
367         assert(0);
368     }
369 
370     /**
371      * Find slot index to insert. If there is no available space, the table can be resized.
372      *
373      * Returns: insertable, empty or Tombstone, slot index
374      */
375     size_t prepareInsert(size_t hash, size_t target) nothrow
376     {
377         auto gi = growthInfo;
378         if (gi.hasNoTombstoneAndGrowthLeft()) {
379             if (gi.hasNoGrowthLeftAndNoTombstone()) {
380                 resize(nextCapacity(_capacity));
381                 target = findFirstNonFull(hash);
382             }
383         } else {
384             if (gi.growthLeft > 0)
385                 target = findFirstNonFull(hash);
386             else
387                 target = findInsertPositionWithGrowthOrRehash(hash);
388         }
389 
390         ++_size;
391         growthInfo.overwriteControlAsFull(_control[target]);
392         setCtrl(_control, _capacity, target, cast(Control)calcH2(hash));
393 
394         return target;
395     }
396 
397     size_t findInsertPositionWithGrowthOrRehash(size_t hash) nothrow
398     {
399         const cap = _capacity;
400 
401         if (cap > Group.Width && (_size * 32UL) <= (cap * 25UL))
402             dropTomstonesWithoutResize();
403         else
404             resize(nextCapacity(cap));
405 
406         return findFirstNonFull(hash);
407     }
408 
409     void dropTomstonesWithoutResize() nothrow
410     {
411         import core.stdc.string : memcpy;
412 
413         // See DropDeletesWithoutResize of abseil's raw_hash_set for the internal algorithm
414 
415         SlotType* slots = _slots.ptr;
416         Control* ctrl = _control;
417         const cap = _capacity;
418 
419         // Convert Tombstone to Empty and Full to Tombstone
420         for (Control* pos = ctrl; pos < ctrl + cap; pos += Group.Width)
421             Group(pos).convertSpecialToEmptyAndFullToTombstone(pos);
422         memcpy(ctrl + cap + 1, ctrl, numClonedBytes());
423         ctrl[cap] = Control.Sentinel;
424 
425         enum UnknownId = size_t.max;
426         size_t tmpId = UnknownId;
427         SlotType* slot = _slots.ptr;
428 
429         for (size_t i; i != cap; i++, slot++) {
430             if (ctrl[i].isEmpty) {
431                 tmpId = i;
432                 continue;
433             }
434             if (!ctrl[i].isTombstone)
435                 continue;
436 
437             const hash = rapidhashOf(mixin(HashKeyArg), _hashSeed);
438             const newI = findFirstNonFull(hash);
439             const probeOffseet = probe(cap, hash).offset;
440             size_t probeIndex(size_t pos) {
441                 return ((pos - probeOffseet) & cap) / Group.Width;
442             }
443 
444             if (probeIndex(newI) == probeIndex(i)) {
445                 setCtrl(ctrl, cap, i, cast(Control)calcH2(hash));
446                 continue;
447             }
448 
449             SlotType* newSlotPtr = slots + newI;
450             if (ctrl[newI].isEmpty) {
451                 setCtrl(ctrl, cap, newI, cast(Control)calcH2(hash));
452                 memcpy(newSlotPtr, slot, SlotType.sizeof);
453                 setCtrl(ctrl, cap, i, Control.Empty);
454                 tmpId = i;
455             } else {
456                 setCtrl(ctrl, cap, newI, cast(Control)calcH2(hash));
457 
458                 if (tmpId == UnknownId)
459                     tmpId = findEmptySlot(ctrl, i + 1, cap);
460 
461                 SlotType* tmpSlotPtr = slots + tmpId;
462 
463                 // Swap i and newI slot
464                 memcpy(tmpSlotPtr, newSlotPtr, SlotType.sizeof);
465                 memcpy(newSlotPtr, slot, SlotType.sizeof);
466                 memcpy(slot, tmpSlotPtr, SlotType.sizeof);
467 
468                 // repeat i postition again
469                 i--;
470                 slot--;
471             }
472         }
473 
474         resetGrowthLeft();
475     }
476 
477     // Returns: slot index
478     size_t findEmptySlot(const Control* ctrl, size_t start, size_t end) nothrow
479     {
480         import std.exception : enforce;
481 
482         for (size_t i = start; i < end; i++) {
483             if (ctrl[i].isEmpty)
484                 return i;
485         }
486 
487         assert(false, "No empty slots. Caller must guarantee empty slot in given controls");
488         return size_t.max;
489     }
490 
491     // Set ctrl[index] to hash.
492     static void setCtrl(Control* ctrl, size_t cap, size_t index, Control hash) nothrow
493     {
494         ctrl[index] = hash;
495         ctrl[((index - numClonedBytes()) & cap) + (numClonedBytes() & cap)] = hash; // Update mirror position together.
496     }
497 
498     static void resetCtrl(Control* ctrl, size_t cap) nothrow
499     {
500         import core.stdc.string : memset;
501 
502         memset(ctrl, cast(byte)Control.Empty, numControlBytes(cap));
503         ctrl[cap] = Control.Sentinel;
504     }
505 
506     /**
507      * Probe a control bytes using ProbeSeq derived from given hash,
508      * and return the offset of first tombstone or empty slot.
509      *
510      * NOTE: If the entire table is full, the behavior is undefined.
511      */
512     size_t findFirstNonFull(size_t hash) nothrow
513     {
514         auto seq = probe(_capacity, hash);
515         auto offset = seq.offset;
516 
517         if (_control[offset].isEmptyOrTombstone)
518             return offset;
519 
520         while (true) {
521             auto g = Group(_control + seq.offset);
522             auto m = g.maskEmptyOrTombstone();
523             if (m)
524                 return seq.offset(m.lowestBitSet());
525 
526             seq.next();
527         }
528     }
529 
530     /**
531      * Resize internal teble with given capacity.
532      */
533     void resize(size_t newCap) nothrow
534     in
535     {
536         assert(isValidCapacity(newCap));
537     }
538     do
539     {
540         import core.stdc.string : memcpy;
541         import std.experimental.allocator : dispose;
542 
543         if (_capacity == 0) {
544             initTable(newCap);
545         } else {
546             // initTable updates following members with allocated slots.
547             auto oldCap = _capacity;
548             auto layout = TableLayout(oldCap, HeapAlignment);
549             auto oldCtrl = _control;
550             auto oldSlots = _slots.ptr;
551 
552             initTable(newCap);
553 
554             // Move data from old slots to new slots
555             void insertSlot(SlotType* slot) {
556                 size_t h = rapidhashOf(mixin(HashKeyArg), _hashSeed);
557                 size_t offset = findFirstNonFull(h);
558                 setCtrl(_control, _capacity, offset, cast(Control)calcH2(h));
559                 memcpy(_slots.ptr + offset, slot, SlotType.sizeof);
560             }
561             for (size_t i; i != oldCap; i++) {
562                 if (oldCtrl[i].isFull)
563                     insertSlot(oldSlots + i);
564             }
565 
566             // Bye old slots
567             auto data = cast(ubyte[])(oldCtrl - layout.controlOffset)[0..layout.allocSize(SlotType.sizeof)];
568             static if (CallGCRange)
569                 GC.removeRange(data.ptr);
570             _allocator.dispose(data);
571         }
572     }
573 
574     /**
575      * Allocate and initialize a new table with new capacity. This also initializes control and growth info.
576      */
577     void initTable(size_t newCap) nothrow
578     in
579     {
580         assert(isValidCapacity(newCap));
581     }
582     do
583     {
584         import core.stdc.string : memset;
585 
586         auto layout = TableLayout(newCap, HeapAlignment);
587         auto size = layout.allocSize(SlotType.sizeof);
588         auto data = cast(ubyte[])_allocator.allocate(size);
589         static if (CallGCRange)
590             GC.addRange(data.ptr, size);
591 
592         // Initialize control and data slots
593         _capacity = newCap;
594         _control = cast(Control*)(data.ptr + layout.controlOffset());
595         _slots = cast(SlotType[])(data.ptr + layout.slotOffset())[0..size - layout.slotOffset()];
596         resetGrowthLeft();
597 
598         // Reset control array
599         resetCtrl(_control, _capacity);
600     }
601 
602     // Reset growth info with current capacity and size.
603     void resetGrowthLeft() nothrow @safe
604     {
605         growthInfo.initializeWith(capacityToGrowth(_capacity) - _size);
606     }
607 
608     // getter for growth info in the table.
609     GrowthInfo* growthInfo() nothrow
610     {
611         return cast(GrowthInfo*)(_control) - 1;
612     }
613 }
614 
615 // TODO : Add SwissTable internal tests
616 
617 package:
618 
619 nothrow pure @safe @nogc
620 {
621 
622 bool isValidCapacity(size_t cap) { return ((cap + 1) & cap) == 0 && cap > 0; }
623 size_t numClonedBytes() { return Group.Width - 1; }
624 size_t numControlBytes(size_t cap) { return cap + 1 + numClonedBytes(); }
625 // Returns: the next valid capacity
626 size_t nextCapacity(size_t cap) { return cap * 2 + 1; }
627 // Returns: Convert `num` into next valid capacity
628 size_t normalizeCapacity(size_t num) { return num ? size_t.max >> countLeadingZeros(num) : 1; }
629 
630 unittest
631 {
632     // normalizeCapacity
633     assert(normalizeCapacity(0) == 1);
634     assert(normalizeCapacity(1) == 1);
635     assert(normalizeCapacity(2) == 3);
636     assert(normalizeCapacity(3) == 3);
637     assert(normalizeCapacity(4) == 7);
638     assert(normalizeCapacity(7) == 7);
639     assert(normalizeCapacity(15 + 1) == 15 * 2 + 1);
640     assert(normalizeCapacity(15 + 2) == 15 * 2 + 1);
641 }
642 
643 /*
644  * Swiss Tables uses 0.875, 7 / 8, as max load factor. `capacityToGrowth` and `growthToLowerboundCapacity` follow it
645  */
646 
647 // Calculate the number of available slots based on load factor.
648 size_t capacityToGrowth(size_t cap)
649 in
650 {
651     assert(isValidCapacity(cap));
652 }
653 do
654 {
655     if (Group.Width == 8 && cap == 7)
656         return 6;
657     return cap - (cap / 8);
658 }
659 
660 // This function doesn't gurantee the result is valid capacity. Apply `normalizeCapacity` to function result.
661 size_t growthToLowerboundCapacity(size_t growth)
662 {
663     if (Group.Width == 8 && growth == 7)
664         return 8;
665     
666     return growth + cast(size_t)((growth - 1) / 7);
667 }
668 
669 } // nothrow pure @safe @nogc
670 
671 unittest
672 {
673     // capacityToGrowth and growthToLowerboundCapacity
674     foreach (growth; 0..10000) {
675         size_t cap = normalizeCapacity(growthToLowerboundCapacity(growth));
676         assert(capacityToGrowth(cap) >= growth);
677         // For (capacity + 1) < Group.Width case, growth should equal capacity.
678         if (cap + 1 < Group.Width)
679             assert(capacityToGrowth(cap) == cap);
680         else
681             assert(capacityToGrowth(cap) < cap);
682         if (growth != 0 && cap > 1)
683             assert(capacityToGrowth(cap / 2) < growth); // There is no smaller capacity that works.
684     }
685 
686     for (size_t cap = Group.Width - 1; cap < 10000; cap = cap * 2 + 1) {
687         size_t growth = capacityToGrowth(cap);
688         assert(growth < cap);
689         assert(growthToLowerboundCapacity(growth) <= cap);
690         assert(normalizeCapacity(growthToLowerboundCapacity(growth)) == cap);
691     }
692 }
693 
694 /**
695  * Store the information regarding the number of slots we can still fill without rehash.
696  */
697 struct GrowthInfo
698 {
699     enum size_t GrowthLeftMask = size_t.max >> 1;
700     enum size_t TombstoneBit = ~GrowthLeftMask;
701 
702   private:
703     size_t _growthLeft;
704 
705   public nothrow @safe @nogc:
706     this(size_t gl) { _growthLeft = gl; }
707     void initializeWith(size_t gl) { _growthLeft = gl; }
708     void overwriteFullAsEmpty() { ++_growthLeft; }
709     void overwriteEmptyAsFull() { --_growthLeft; }
710     void overwriteManyEmptyAsFull(size_t count) { _growthLeft -= count; }
711     void overwriteControlAsFull(Control c) { _growthLeft -= cast(size_t)c.isEmpty; }
712     void overwriteFullAsTombstone() { _growthLeft |= TombstoneBit; }
713     bool hasNoTombstoneAndGrowthLeft() const { return cast(ptrdiff_t)_growthLeft > 0; }
714     bool hasNoGrowthLeftAndNoTombstone() const { return _growthLeft == 0; }
715     bool hasNoTombstone() const { return cast(ptrdiff_t)_growthLeft >= 0; }
716     size_t growthLeft() const { return _growthLeft & GrowthLeftMask; }
717 }
718 
719 unittest
720 {
721     // growthLeft
722     {
723         GrowthInfo gi;
724         gi.initializeWith(5);
725         assert(gi.growthLeft == 5);
726         gi.overwriteFullAsTombstone();
727         assert(gi.growthLeft == 5);
728     }
729     // hasNoTombstone
730     {
731         GrowthInfo gi;
732         gi.initializeWith(5);
733         assert(gi.hasNoTombstone());
734         gi.overwriteFullAsTombstone();
735         assert(!gi.hasNoTombstone());
736         gi.initializeWith(5); // after re-initialization, no deleted
737         assert(gi.hasNoTombstone());
738     }
739     // hasNoTombstoneAndGrowthLeft
740     {
741         GrowthInfo gi;
742         gi.initializeWith(5);
743         assert(gi.hasNoTombstoneAndGrowthLeft());
744         gi.overwriteFullAsTombstone();
745         assert(!gi.hasNoTombstoneAndGrowthLeft());
746         gi.initializeWith(0);
747         assert(!gi.hasNoTombstoneAndGrowthLeft());
748         gi.overwriteFullAsTombstone();
749         assert(!gi.hasNoTombstoneAndGrowthLeft());
750         gi.initializeWith(5); // after re-initialization, no deleted
751         assert(gi.hasNoTombstoneAndGrowthLeft);
752     }
753     // hasNoGrowthLeftAndNoTombstone
754     {
755         GrowthInfo gi;
756         gi.initializeWith(1);
757         assert(!gi.hasNoGrowthLeftAndNoTombstone());
758         gi.overwriteEmptyAsFull();
759         assert(gi.hasNoGrowthLeftAndNoTombstone());
760         gi.overwriteFullAsTombstone();
761         assert(!gi.hasNoGrowthLeftAndNoTombstone());
762         gi.overwriteFullAsEmpty();
763         assert(!gi.hasNoGrowthLeftAndNoTombstone());
764         gi.initializeWith(0);
765         assert(gi.hasNoGrowthLeftAndNoTombstone());
766         gi.overwriteFullAsEmpty();
767         assert(!gi.hasNoGrowthLeftAndNoTombstone());
768     }
769     // overwriteFullAsEmpty
770     {
771         GrowthInfo gi;
772         gi.initializeWith(5);
773         gi.overwriteFullAsEmpty();
774         assert(gi.growthLeft == 6);
775         gi.overwriteFullAsTombstone();
776         assert(gi.growthLeft == 6);
777         gi.overwriteFullAsEmpty();
778         assert(gi.growthLeft == 7);
779         assert(!gi.hasNoTombstone());
780     }
781     // overwriteEmptyAsFull
782     {
783         GrowthInfo gi;
784         gi.initializeWith(5);
785         gi.overwriteEmptyAsFull();
786         assert(gi.growthLeft == 4);
787         gi.overwriteFullAsTombstone();
788         assert(gi.growthLeft == 4);
789         gi.overwriteEmptyAsFull();
790         assert(gi.growthLeft == 3);
791         assert(!gi.hasNoTombstone());
792     }
793     // overwriteControlAsFull
794     {
795         GrowthInfo gi;
796         gi.initializeWith(5);
797         gi.overwriteControlAsFull(Control.Empty);
798         assert(gi.growthLeft == 4);
799         gi.overwriteControlAsFull(Control.Tombstone);
800         assert(gi.growthLeft == 4);
801         gi.overwriteFullAsTombstone();
802         gi.overwriteControlAsFull(Control.Tombstone);
803         assert(!gi.hasNoTombstoneAndGrowthLeft());
804         assert(!gi.hasNoTombstone());
805     }
806 }
807 
808 /**
809  * Triangular based Probe sequence.
810  *
811  * Use `Group.Width` to ensure each probing step doesn't overlap groups.
812  */
813 struct ProbeSeq
814 {
815   private:
816     size_t _index, _mask, _offset;
817 
818   public nothrow @safe:
819     this(H1 h, size_t m)
820     {
821         _mask = m;
822         _offset = h & m;
823     }
824 
825     size_t index() const { return _index; }
826     size_t offset() const { return _offset; }
827     size_t offset(size_t i) const { return (_offset + i) & _mask; }
828 
829     void next()
830     {
831         _index += Group.Width;
832         _offset += _index;
833         _offset &= _mask;
834     }
835 }
836 
837 /// Construct ProbeSeq with calculated H1 hash and capacity
838 ProbeSeq probe(size_t capacity, size_t hash) nothrow @safe
839 {
840     return ProbeSeq(calcH1(hash), capacity);
841 }
842 
843 unittest
844 {
845     // ProbeSeq functions
846     import std.algorithm : equal;
847     import std.range : generate, take;
848 
849     ProbeSeq seq = ProbeSeq(0, 127);
850     size_t genOffset() {
851         size_t res = seq.offset;
852         seq.next();
853         return res;
854     }
855 
856     static if (Group.Width == 16)
857         int[] ans = [0, 16, 48, 96, 32, 112, 80, 64];
858     else
859         int[] ans = [0, 8, 24, 48, 80, 120, 40, 96];
860     
861     assert(equal(generate(&genOffset).take(8), ans));
862     seq = ProbeSeq(128, 127);
863     assert(equal(generate(&genOffset).take(8), ans));
864 }