/home/uke/oil/mycpp/gc_dict.h
Line | Count | Source (jump to first uncovered line) |
1 | | // Hash table based on CPython's "compact dict": |
2 | | // |
3 | | // https://mail.python.org/pipermail/python-dev/2012-December/123028.html |
4 | | // https://code.activestate.com/recipes/578375/ |
5 | | // |
6 | | // Main differences: |
7 | | // - It's type-specialized in C++ -- Dict<K, V>. |
8 | | // - It's integrated with our mark and sweep GC, using Slab<int>, Slab<K>, and |
9 | | // Slab<V> |
10 | | // - We use linear probing, not the pseudo-random number generator |
11 | | |
12 | | #ifndef MYCPP_GC_DICT_H |
13 | | #define MYCPP_GC_DICT_H |
14 | | |
15 | | #include "mycpp/comparators.h" |
16 | | #include "mycpp/gc_builtins.h" |
17 | | #include "mycpp/gc_list.h" |
18 | | #include "mycpp/hash.h" |
19 | | |
20 | | // Non-negative entries in index_ are array indices into keys_ and values_. |
21 | | // There are two special negative entries: |
22 | | |
23 | | // index_ value to say this Dict item was deleted (a tombstone). |
24 | | const int kDeletedEntry = -1; |
25 | | |
26 | | // index_ value to say this Dict entry is free. |
27 | | const int kEmptyEntry = -2; |
28 | | |
29 | | // Return value for find_kv_index(), not stored in index_. |
30 | | const int kNotFound = -3; |
31 | | |
32 | | // Return value for hash_and_probe(), not stored in index_. |
33 | | const int kTooSmall = -4; |
34 | | |
35 | | // Helper for keys() and values() |
36 | | template <typename T> |
37 | | List<T>* ListFromDictSlab(Slab<T>* slab, int n) { |
38 | | List<T>* result = Alloc<List<T>>(); |
39 | | result->reserve(n); |
40 | | |
41 | | for (int i = 0; i < n; ++i) { |
42 | | result->append(slab->items_[i]); |
43 | | } |
44 | | return result; |
45 | | } |
46 | | |
47 | | // GlobalDict is layout-compatible with Dict (unit tests assert this), and it |
48 | | // can be a true C global (incurs zero startup time) |
49 | | |
50 | | template <typename K, typename V, int N> |
51 | | class GlobalDict { |
52 | | public: |
53 | | int len_; |
54 | | int capacity_; |
55 | | int index_len_; |
56 | | GlobalSlab<int, N>* index_; |
57 | | GlobalSlab<K, N>* keys_; |
58 | | GlobalSlab<V, N>* values_; |
59 | | }; |
60 | | |
61 | | #define GLOBAL_DICT(name, K, V, N, keys, vals) \ |
62 | | GcGlobal<GlobalSlab<K, N>> _keys_##name = {ObjHeader::Global(TypeTag::Slab), \ |
63 | | {.items_ = keys}}; \ |
64 | | GcGlobal<GlobalSlab<V, N>> _vals_##name = {ObjHeader::Global(TypeTag::Slab), \ |
65 | | {.items_ = vals}}; \ |
66 | | GcGlobal<GlobalDict<K, V, N>> _dict_##name = { \ |
67 | | ObjHeader::Global(TypeTag::Dict), \ |
68 | | {.len_ = N, \ |
69 | | .capacity_ = N, \ |
70 | | .index_len_ = 0, \ |
71 | | .index_ = nullptr, \ |
72 | | .keys_ = &_keys_##name.obj, \ |
73 | | .values_ = &_vals_##name.obj}, \ |
74 | | }; \ |
75 | | Dict<K, V>* name = reinterpret_cast<Dict<K, V>*>(&_dict_##name.obj); |
76 | | |
77 | | template <class K, class V> |
78 | | class Dict { |
79 | | public: |
80 | | Dict() |
81 | | : len_(0), |
82 | | capacity_(0), |
83 | | index_len_(0), |
84 | | index_(nullptr), |
85 | | keys_(nullptr), |
86 | 10 | values_(nullptr) { |
87 | 10 | } Unexecuted instantiation: _ZN4DictIibEC2Ev _ZN4DictIP6BigStrS1_EC2Ev Line | Count | Source | 86 | 1 | values_(nullptr) { | 87 | 1 | } |
Unexecuted instantiation: _ZN4DictIP6BigStriEC2Ev Unexecuted instantiation: _ZN4DictIiP6BigStrEC2Ev Unexecuted instantiation: _ZN4DictIiP6Tuple2IP4ListIPS1_IPS0_IiiEEEPS_IiiEEEC2Ev Unexecuted instantiation: _ZN4DictIiiEC2Ev _ZN4DictIP6BigStrPN4args7_ActionEEC2Ev Line | Count | Source | 86 | 6 | values_(nullptr) { | 87 | 6 | } |
_ZN4DictIP6BigStrPN10value_asdl7value_tEEC2Ev Line | Count | Source | 86 | 3 | values_(nullptr) { | 87 | 3 | } |
|
88 | | |
89 | | Dict(std::initializer_list<K> keys, std::initializer_list<V> values) |
90 | | : len_(0), |
91 | | capacity_(0), |
92 | | index_len_(0), |
93 | | index_(nullptr), |
94 | | keys_(nullptr), |
95 | | values_(nullptr) { |
96 | | DCHECK(keys.size() == values.size()); |
97 | | auto v = values.begin(); // This simulates a "zip" loop |
98 | | for (auto key : keys) { |
99 | | // note: calls reserve(), and may allocate |
100 | | this->set(key, *v); |
101 | | ++v; |
102 | | } |
103 | | } |
104 | | |
105 | | // Reserve enough space for at LEAST this many key-value pairs. |
106 | | void reserve(int num_desired); |
107 | | |
108 | | // d[key] in Python: raises KeyError if not found |
109 | | V at(K key) const; |
110 | | |
111 | | // d.get(key) in Python. (Can't use this if V isn't a pointer!) |
112 | | V get(K key) const; |
113 | | |
114 | | // Get a key, but return a default if not found. |
115 | | // expr_parse.py uses this with OTHER_BALANCE |
116 | | V get(K key, V default_val) const; |
117 | | |
118 | | // Implements d[k] = v. May resize the dictionary. |
119 | | void set(K key, V val); |
120 | | |
121 | | void update(List<Tuple2<K, V>*>* pairs); |
122 | | void update(Dict<K, V>* other); |
123 | | |
124 | | List<K>* keys() const; |
125 | | |
126 | | List<V>* values() const; |
127 | | |
128 | | void clear(); |
129 | | |
130 | | // Helper used by find_kv_index(), set(), mylib::dict_erase() in |
131 | | // gc_mylib.h |
132 | | // Returns either: |
133 | | // - the slot for an existing key, or an empty slot for a new key |
134 | | // - kTooSmall if the table is full |
135 | | int hash_and_probe(K key) const; |
136 | | |
137 | | // Helper used by at(), get(), dict_contains() |
138 | | // Given a key, returns either: |
139 | | // - an index into keys_ and values_ |
140 | | // - kNotFound |
141 | | int find_kv_index(K key) const; |
142 | | |
143 | 10 | static constexpr ObjHeader obj_header() { |
144 | 10 | return ObjHeader::ClassFixed(field_mask(), sizeof(Dict)); |
145 | 10 | } Unexecuted instantiation: _ZN4DictIibE10obj_headerEv _ZN4DictIP6BigStrS1_E10obj_headerEv Line | Count | Source | 143 | 1 | static constexpr ObjHeader obj_header() { | 144 | 1 | return ObjHeader::ClassFixed(field_mask(), sizeof(Dict)); | 145 | 1 | } |
Unexecuted instantiation: _ZN4DictIP6BigStriE10obj_headerEv Unexecuted instantiation: _ZN4DictIiP6BigStrE10obj_headerEv Unexecuted instantiation: _ZN4DictIiP6Tuple2IP4ListIPS1_IPS0_IiiEEEPS_IiiEEE10obj_headerEv Unexecuted instantiation: _ZN4DictIiiE10obj_headerEv _ZN4DictIP6BigStrPN4args7_ActionEE10obj_headerEv Line | Count | Source | 143 | 6 | static constexpr ObjHeader obj_header() { | 144 | 6 | return ObjHeader::ClassFixed(field_mask(), sizeof(Dict)); | 145 | 6 | } |
_ZN4DictIP6BigStrPN10value_asdl7value_tEE10obj_headerEv Line | Count | Source | 143 | 3 | static constexpr ObjHeader obj_header() { | 144 | 3 | return ObjHeader::ClassFixed(field_mask(), sizeof(Dict)); | 145 | 3 | } |
|
146 | | |
147 | | int len_; // number of entries (keys and values, almost dense) |
148 | | int capacity_; // number of k/v slots |
149 | | int index_len_; // number of index slots |
150 | | |
151 | | // These 3 slabs are resized at the same time. |
152 | | Slab<int>* index_; // kEmptyEntry, kDeletedEntry, or a valid index into |
153 | | // keys_ and values_ |
154 | | Slab<K>* keys_; // Dict<K, int> |
155 | | Slab<V>* values_; // Dict<int, V> |
156 | | |
157 | | // A dict has 3 pointers the GC needs to follow. |
158 | 10 | static constexpr uint32_t field_mask() { |
159 | 10 | return maskbit(offsetof(Dict, index_)) | maskbit(offsetof(Dict, keys_)) | |
160 | 10 | maskbit(offsetof(Dict, values_)); |
161 | 10 | } Unexecuted instantiation: _ZN4DictIibE10field_maskEv _ZN4DictIP6BigStrS1_E10field_maskEv Line | Count | Source | 158 | 1 | static constexpr uint32_t field_mask() { | 159 | 1 | return maskbit(offsetof(Dict, index_)) | maskbit(offsetof(Dict, keys_)) | | 160 | 1 | maskbit(offsetof(Dict, values_)); | 161 | 1 | } |
Unexecuted instantiation: _ZN4DictIP6BigStriE10field_maskEv Unexecuted instantiation: _ZN4DictIiP6BigStrE10field_maskEv Unexecuted instantiation: _ZN4DictIiP6Tuple2IP4ListIPS1_IPS0_IiiEEEPS_IiiEEE10field_maskEv Unexecuted instantiation: _ZN4DictIiiE10field_maskEv _ZN4DictIP6BigStrPN4args7_ActionEE10field_maskEv Line | Count | Source | 158 | 6 | static constexpr uint32_t field_mask() { | 159 | 6 | return maskbit(offsetof(Dict, index_)) | maskbit(offsetof(Dict, keys_)) | | 160 | 6 | maskbit(offsetof(Dict, values_)); | 161 | 6 | } |
_ZN4DictIP6BigStrPN10value_asdl7value_tEE10field_maskEv Line | Count | Source | 158 | 3 | static constexpr uint32_t field_mask() { | 159 | 3 | return maskbit(offsetof(Dict, index_)) | maskbit(offsetof(Dict, keys_)) | | 160 | 3 | maskbit(offsetof(Dict, values_)); | 161 | 3 | } |
|
162 | | |
163 | | DISALLOW_COPY_AND_ASSIGN(Dict); |
164 | | |
165 | | // kItemSize is max of K and V size. That is, on 64-bit machines, the RARE |
166 | | // Dict<int, int> is smaller than other dicts |
167 | | static constexpr int kItemSize = sizeof(K) > sizeof(V) ? sizeof(K) |
168 | | : sizeof(V); |
169 | | |
170 | | // Matches mark_sweep_heap.h |
171 | | static constexpr int kPoolBytes2 = 48 - sizeof(ObjHeader); |
172 | | static_assert(kPoolBytes2 % kItemSize == 0, |
173 | | "An integral number of items should fit in second pool"); |
174 | | static constexpr int kNumItems2 = kPoolBytes2 / kItemSize; |
175 | | |
176 | | static const int kHeaderFudge = sizeof(ObjHeader) / kItemSize; |
177 | | static_assert(sizeof(ObjHeader) % kItemSize == 0, |
178 | | "Slab header size should be multiple of key size"); |
179 | | |
180 | | #if 0 |
181 | | static constexpr int kMinBytes2 = 128 - sizeof(ObjHeader); |
182 | | static_assert(kMinBytes2 % kItemSize == 0, |
183 | | "An integral number of items should fit"); |
184 | | static constexpr int kMinItems2 = kMinBytes2 / kItemSize; |
185 | | #endif |
186 | | |
187 | 18 | int HowManyPairs(int num_desired) { |
188 | | // See gc_list.h for comments on nearly identical logic |
189 | | |
190 | 18 | if (num_desired <= kNumItems2) { // use full cell in pool 2 |
191 | 6 | return kNumItems2; |
192 | 6 | } |
193 | | #if 0 |
194 | | if (num_desired <= kMinItems2) { // 48 -> 128, not 48 -> 64 |
195 | | return kMinItems2; |
196 | | } |
197 | | #endif |
198 | 12 | return RoundUp(num_desired + kHeaderFudge) - kHeaderFudge; |
199 | 18 | } Unexecuted instantiation: _ZN4DictIibE12HowManyPairsEi _ZN4DictIP6BigStrS1_E12HowManyPairsEi Line | Count | Source | 187 | 3 | int HowManyPairs(int num_desired) { | 188 | | // See gc_list.h for comments on nearly identical logic | 189 | | | 190 | 3 | if (num_desired <= kNumItems2) { // use full cell in pool 2 | 191 | 1 | return kNumItems2; | 192 | 1 | } | 193 | | #if 0 | 194 | | if (num_desired <= kMinItems2) { // 48 -> 128, not 48 -> 64 | 195 | | return kMinItems2; | 196 | | } | 197 | | #endif | 198 | 2 | return RoundUp(num_desired + kHeaderFudge) - kHeaderFudge; | 199 | 3 | } |
Unexecuted instantiation: _ZN4DictIiiE12HowManyPairsEi Unexecuted instantiation: _ZN4DictIiP6Tuple2IP4ListIPS1_IPS0_IiiEEEPS_IiiEEE12HowManyPairsEi Unexecuted instantiation: _ZN4DictIP6BigStriE12HowManyPairsEi Unexecuted instantiation: _ZN4DictIiP6BigStrE12HowManyPairsEi _ZN4DictIP6BigStrPN10value_asdl7value_tEE12HowManyPairsEi Line | Count | Source | 187 | 8 | int HowManyPairs(int num_desired) { | 188 | | // See gc_list.h for comments on nearly identical logic | 189 | | | 190 | 8 | if (num_desired <= kNumItems2) { // use full cell in pool 2 | 191 | 3 | return kNumItems2; | 192 | 3 | } | 193 | | #if 0 | 194 | | if (num_desired <= kMinItems2) { // 48 -> 128, not 48 -> 64 | 195 | | return kMinItems2; | 196 | | } | 197 | | #endif | 198 | 5 | return RoundUp(num_desired + kHeaderFudge) - kHeaderFudge; | 199 | 8 | } |
_ZN4DictIP6BigStrPN4args7_ActionEE12HowManyPairsEi Line | Count | Source | 187 | 7 | int HowManyPairs(int num_desired) { | 188 | | // See gc_list.h for comments on nearly identical logic | 189 | | | 190 | 7 | if (num_desired <= kNumItems2) { // use full cell in pool 2 | 191 | 2 | return kNumItems2; | 192 | 2 | } | 193 | | #if 0 | 194 | | if (num_desired <= kMinItems2) { // 48 -> 128, not 48 -> 64 | 195 | | return kMinItems2; | 196 | | } | 197 | | #endif | 198 | 5 | return RoundUp(num_desired + kHeaderFudge) - kHeaderFudge; | 199 | 7 | } |
|
200 | | }; |
201 | | |
202 | | template <typename K, typename V> |
203 | 0 | inline bool dict_contains(const Dict<K, V>* haystack, K needle) { |
204 | 0 | return haystack->find_kv_index(needle) != kNotFound; |
205 | 0 | } Unexecuted instantiation: _Z13dict_containsIibEbPK4DictIT_T0_ES1_ Unexecuted instantiation: _Z13dict_containsIP6BigStrPN4args7_ActionEEbPK4DictIT_T0_ES6_ |
206 | | |
207 | | template <typename K, typename V> |
208 | 18 | void Dict<K, V>::reserve(int num_desired) { |
209 | 18 | if (capacity_ >= num_desired) { |
210 | 0 | return; // Don't do anything if there's already enough space. |
211 | 0 | } |
212 | | |
213 | 18 | int old_len = len_; |
214 | 18 | Slab<K>* old_k = keys_; |
215 | 18 | Slab<V>* old_v = values_; |
216 | | |
217 | | // Calculate the number of keys and values we should have |
218 | 18 | capacity_ = HowManyPairs(num_desired); |
219 | | |
220 | | // 1) Ensure index len a power of 2, to avoid expensive modulus % operation |
221 | | // 2) Introduce hash table load factor. Use capacity_+1 to simulate ceil() |
222 | | // div, not floor() div. |
223 | 18 | index_len_ = RoundUp((capacity_ + 1) * 5 / 4); |
224 | 18 | DCHECK(index_len_ > capacity_); |
225 | | |
226 | 0 | index_ = NewSlab<int>(index_len_); |
227 | 434 | for (int i = 0; i < index_len_; ++i) { |
228 | 416 | index_->items_[i] = kEmptyEntry; |
229 | 416 | } |
230 | | |
231 | | // These are DENSE, while index_ is sparse. |
232 | 18 | keys_ = NewSlab<K>(capacity_); |
233 | 18 | values_ = NewSlab<V>(capacity_); |
234 | | |
235 | 18 | if (old_k != nullptr) { // rehash if there were any entries |
236 | | // log("REHASH num_desired %d", num_desired); |
237 | 12 | len_ = 0; |
238 | 102 | for (int i = 0; i < old_len; ++i) { |
239 | 90 | set(old_k->items_[i], old_v->items_[i]); |
240 | 90 | } |
241 | 12 | } |
242 | 18 | } Unexecuted instantiation: _ZN4DictIibE7reserveEi _ZN4DictIP6BigStrS1_E7reserveEi Line | Count | Source | 208 | 3 | void Dict<K, V>::reserve(int num_desired) { | 209 | 3 | if (capacity_ >= num_desired) { | 210 | 0 | return; // Don't do anything if there's already enough space. | 211 | 0 | } | 212 | | | 213 | 3 | int old_len = len_; | 214 | 3 | Slab<K>* old_k = keys_; | 215 | 3 | Slab<V>* old_v = values_; | 216 | | | 217 | | // Calculate the number of keys and values we should have | 218 | 3 | capacity_ = HowManyPairs(num_desired); | 219 | | | 220 | | // 1) Ensure index len a power of 2, to avoid expensive modulus % operation | 221 | | // 2) Introduce hash table load factor. Use capacity_+1 to simulate ceil() | 222 | | // div, not floor() div. | 223 | 3 | index_len_ = RoundUp((capacity_ + 1) * 5 / 4); | 224 | 3 | DCHECK(index_len_ > capacity_); | 225 | | | 226 | 0 | index_ = NewSlab<int>(index_len_); | 227 | 59 | for (int i = 0; i < index_len_; ++i) { | 228 | 56 | index_->items_[i] = kEmptyEntry; | 229 | 56 | } | 230 | | | 231 | | // These are DENSE, while index_ is sparse. | 232 | 3 | keys_ = NewSlab<K>(capacity_); | 233 | 3 | values_ = NewSlab<V>(capacity_); | 234 | | | 235 | 3 | if (old_k != nullptr) { // rehash if there were any entries | 236 | | // log("REHASH num_desired %d", num_desired); | 237 | 2 | len_ = 0; | 238 | 14 | for (int i = 0; i < old_len; ++i) { | 239 | 12 | set(old_k->items_[i], old_v->items_[i]); | 240 | 12 | } | 241 | 2 | } | 242 | 3 | } |
Unexecuted instantiation: _ZN4DictIiiE7reserveEi Unexecuted instantiation: _ZN4DictIiP6Tuple2IP4ListIPS1_IPS0_IiiEEEPS_IiiEEE7reserveEi Unexecuted instantiation: _ZN4DictIP6BigStriE7reserveEi Unexecuted instantiation: _ZN4DictIiP6BigStrE7reserveEi _ZN4DictIP6BigStrPN10value_asdl7value_tEE7reserveEi Line | Count | Source | 208 | 8 | void Dict<K, V>::reserve(int num_desired) { | 209 | 8 | if (capacity_ >= num_desired) { | 210 | 0 | return; // Don't do anything if there's already enough space. | 211 | 0 | } | 212 | | | 213 | 8 | int old_len = len_; | 214 | 8 | Slab<K>* old_k = keys_; | 215 | 8 | Slab<V>* old_v = values_; | 216 | | | 217 | | // Calculate the number of keys and values we should have | 218 | 8 | capacity_ = HowManyPairs(num_desired); | 219 | | | 220 | | // 1) Ensure index len a power of 2, to avoid expensive modulus % operation | 221 | | // 2) Introduce hash table load factor. Use capacity_+1 to simulate ceil() | 222 | | // div, not floor() div. | 223 | 8 | index_len_ = RoundUp((capacity_ + 1) * 5 / 4); | 224 | 8 | DCHECK(index_len_ > capacity_); | 225 | | | 226 | 0 | index_ = NewSlab<int>(index_len_); | 227 | 192 | for (int i = 0; i < index_len_; ++i) { | 228 | 184 | index_->items_[i] = kEmptyEntry; | 229 | 184 | } | 230 | | | 231 | | // These are DENSE, while index_ is sparse. | 232 | 8 | keys_ = NewSlab<K>(capacity_); | 233 | 8 | values_ = NewSlab<V>(capacity_); | 234 | | | 235 | 8 | if (old_k != nullptr) { // rehash if there were any entries | 236 | | // log("REHASH num_desired %d", num_desired); | 237 | 5 | len_ = 0; | 238 | 44 | for (int i = 0; i < old_len; ++i) { | 239 | 39 | set(old_k->items_[i], old_v->items_[i]); | 240 | 39 | } | 241 | 5 | } | 242 | 8 | } |
_ZN4DictIP6BigStrPN4args7_ActionEE7reserveEi Line | Count | Source | 208 | 7 | void Dict<K, V>::reserve(int num_desired) { | 209 | 7 | if (capacity_ >= num_desired) { | 210 | 0 | return; // Don't do anything if there's already enough space. | 211 | 0 | } | 212 | | | 213 | 7 | int old_len = len_; | 214 | 7 | Slab<K>* old_k = keys_; | 215 | 7 | Slab<V>* old_v = values_; | 216 | | | 217 | | // Calculate the number of keys and values we should have | 218 | 7 | capacity_ = HowManyPairs(num_desired); | 219 | | | 220 | | // 1) Ensure index len a power of 2, to avoid expensive modulus % operation | 221 | | // 2) Introduce hash table load factor. Use capacity_+1 to simulate ceil() | 222 | | // div, not floor() div. | 223 | 7 | index_len_ = RoundUp((capacity_ + 1) * 5 / 4); | 224 | 7 | DCHECK(index_len_ > capacity_); | 225 | | | 226 | 0 | index_ = NewSlab<int>(index_len_); | 227 | 183 | for (int i = 0; i < index_len_; ++i) { | 228 | 176 | index_->items_[i] = kEmptyEntry; | 229 | 176 | } | 230 | | | 231 | | // These are DENSE, while index_ is sparse. | 232 | 7 | keys_ = NewSlab<K>(capacity_); | 233 | 7 | values_ = NewSlab<V>(capacity_); | 234 | | | 235 | 7 | if (old_k != nullptr) { // rehash if there were any entries | 236 | | // log("REHASH num_desired %d", num_desired); | 237 | 5 | len_ = 0; | 238 | 44 | for (int i = 0; i < old_len; ++i) { | 239 | 39 | set(old_k->items_[i], old_v->items_[i]); | 240 | 39 | } | 241 | 5 | } | 242 | 7 | } |
|
243 | | |
244 | | template <typename K, typename V> |
245 | 0 | V Dict<K, V>::at(K key) const { |
246 | 0 | int kv_index = find_kv_index(key); |
247 | 0 | if (kv_index == kNotFound) { |
248 | 0 | throw Alloc<KeyError>(); |
249 | 0 | } else { |
250 | 0 | return values_->items_[kv_index]; |
251 | 0 | } |
252 | 0 | } Unexecuted instantiation: _ZNK4DictIP6BigStrPN10value_asdl7value_tEE2atES1_ Unexecuted instantiation: _ZNK4DictIP6BigStrPN4args7_ActionEE2atES1_ |
253 | | |
254 | | template <typename K, typename V> |
255 | 1 | V Dict<K, V>::get(K key) const { |
256 | 1 | int kv_index = find_kv_index(key); |
257 | 1 | if (kv_index == kNotFound) { |
258 | 0 | return nullptr; |
259 | 1 | } else { |
260 | 1 | return values_->items_[kv_index]; |
261 | 1 | } |
262 | 1 | } _ZNK4DictIP6BigStrS1_E3getES1_ Line | Count | Source | 255 | 1 | V Dict<K, V>::get(K key) const { | 256 | 1 | int kv_index = find_kv_index(key); | 257 | 1 | if (kv_index == kNotFound) { | 258 | 0 | return nullptr; | 259 | 1 | } else { | 260 | 1 | return values_->items_[kv_index]; | 261 | 1 | } | 262 | 1 | } |
Unexecuted instantiation: _ZNK4DictIP6BigStrPN4args7_ActionEE3getES1_ |
263 | | |
264 | | template <typename K, typename V> |
265 | | V Dict<K, V>::get(K key, V default_val) const { |
266 | | int kv_index = find_kv_index(key); |
267 | | if (kv_index == kNotFound) { |
268 | | return default_val; |
269 | | } else { |
270 | | return values_->items_[kv_index]; |
271 | | } |
272 | | } |
273 | | |
274 | | template <typename K, typename V> |
275 | | List<K>* Dict<K, V>::keys() const { |
276 | | return ListFromDictSlab<K>(keys_, len_); |
277 | | } |
278 | | |
279 | | template <typename K, typename V> |
280 | | List<V>* Dict<K, V>::values() const { |
281 | | return ListFromDictSlab<V>(values_, len_); |
282 | | } |
283 | | |
284 | | template <typename K, typename V> |
285 | | void Dict<K, V>::clear() { |
286 | | // Maintain invariant |
287 | | for (int i = 0; i < index_len_; ++i) { |
288 | | index_->items_[i] = kEmptyEntry; |
289 | | } |
290 | | |
291 | | if (keys_) { |
292 | | memset(keys_->items_, 0, len_ * sizeof(K)); // zero for GC scan |
293 | | } |
294 | | if (values_) { |
295 | | memset(values_->items_, 0, len_ * sizeof(V)); // zero for GC scan |
296 | | } |
297 | | len_ = 0; |
298 | | } |
299 | | |
300 | | // TODO: |
301 | | // - Special case to intern BigStr* when it's hashed? How? |
302 | | // - Should we have wrappers like: |
303 | | // - V GetAndIntern<V>(D, &string_key) |
304 | | // - SetAndIntern<V>(D, &string_key, value) |
305 | | // This will enable duplicate copies of the string to be garbage collected |
306 | | template <typename K, typename V> |
307 | 186 | int Dict<K, V>::hash_and_probe(K key) const { |
308 | 186 | if (capacity_ == 0) { |
309 | 6 | return kTooSmall; |
310 | 6 | } |
311 | | |
312 | | // Hash the key onto a slot in the index. If the first slot is occupied, |
313 | | // probe until an empty one is found. |
314 | 180 | unsigned h = hash_key(key); |
315 | | // faster % using & -- assuming index_len_ is power of 2 |
316 | 180 | int init_bucket = h & (index_len_ - 1); |
317 | | |
318 | | // If we see a tombstone along the probing path, stash it. |
319 | 180 | int open_slot = -1; |
320 | | |
321 | 244 | for (int i = 0; i < index_len_; ++i) { |
322 | | // Start at init_bucket and wrap araound |
323 | | |
324 | | // faster % using & -- assuming index_len_ is power of 2 |
325 | 244 | int slot = (i + init_bucket) & (index_len_ - 1); |
326 | | |
327 | 244 | int kv_index = index_->items_[slot]; |
328 | 244 | DCHECK(kv_index < len_); |
329 | | // Optimistically this is the common case once the table has been populated. |
330 | 244 | if (kv_index >= 0) { |
331 | 65 | unsigned h2 = hash_key(keys_->items_[kv_index]); |
332 | 65 | if (h == h2 && keys_equal(keys_->items_[kv_index], key)) { |
333 | 1 | return slot; |
334 | 1 | } |
335 | 65 | } |
336 | | |
337 | 243 | if (kv_index == kEmptyEntry) { |
338 | 179 | if (open_slot != -1) { |
339 | 0 | slot = open_slot; |
340 | 0 | } |
341 | | // If there isn't room in the entry arrays, tell the caller to resize. |
342 | 179 | return len_ < capacity_ ? slot : kTooSmall; |
343 | 179 | } |
344 | | |
345 | | // Tombstone or collided keys unequal. Keep scanning. |
346 | 64 | DCHECK(kv_index >= 0 || kv_index == kDeletedEntry); |
347 | 64 | if (kv_index == kDeletedEntry && open_slot == -1) { |
348 | | // NOTE: We only record the open slot here. We DON'T return it. If we're |
349 | | // looking for a key that was writen before this tombstone was written to |
350 | | // the index we should continue probing until we get to that key. If we |
351 | | // get to an empty index slot or the end of the index then we know we are |
352 | | // dealing with a new key and can safely replace the tombstone without |
353 | | // disrupting any existing keys. |
354 | 0 | open_slot = slot; |
355 | 0 | } |
356 | 64 | } |
357 | | |
358 | 0 | if (open_slot != -1) { |
359 | 0 | return len_ < capacity_ ? open_slot : kTooSmall; |
360 | 0 | } |
361 | | |
362 | 0 | return kTooSmall; |
363 | 0 | } _ZNK4DictIP6BigStrS1_E14hash_and_probeES1_ Line | Count | Source | 307 | 27 | int Dict<K, V>::hash_and_probe(K key) const { | 308 | 27 | if (capacity_ == 0) { | 309 | 1 | return kTooSmall; | 310 | 1 | } | 311 | | | 312 | | // Hash the key onto a slot in the index. If the first slot is occupied, | 313 | | // probe until an empty one is found. | 314 | 26 | unsigned h = hash_key(key); | 315 | | // faster % using & -- assuming index_len_ is power of 2 | 316 | 26 | int init_bucket = h & (index_len_ - 1); | 317 | | | 318 | | // If we see a tombstone along the probing path, stash it. | 319 | 26 | int open_slot = -1; | 320 | | | 321 | 31 | for (int i = 0; i < index_len_; ++i) { | 322 | | // Start at init_bucket and wrap araound | 323 | | | 324 | | // faster % using & -- assuming index_len_ is power of 2 | 325 | 31 | int slot = (i + init_bucket) & (index_len_ - 1); | 326 | | | 327 | 31 | int kv_index = index_->items_[slot]; | 328 | 31 | DCHECK(kv_index < len_); | 329 | | // Optimistically this is the common case once the table has been populated. | 330 | 31 | if (kv_index >= 0) { | 331 | 6 | unsigned h2 = hash_key(keys_->items_[kv_index]); | 332 | 6 | if (h == h2 && keys_equal(keys_->items_[kv_index], key)) { | 333 | 1 | return slot; | 334 | 1 | } | 335 | 6 | } | 336 | | | 337 | 30 | if (kv_index == kEmptyEntry) { | 338 | 25 | if (open_slot != -1) { | 339 | 0 | slot = open_slot; | 340 | 0 | } | 341 | | // If there isn't room in the entry arrays, tell the caller to resize. | 342 | 25 | return len_ < capacity_ ? slot : kTooSmall; | 343 | 25 | } | 344 | | | 345 | | // Tombstone or collided keys unequal. Keep scanning. | 346 | 5 | DCHECK(kv_index >= 0 || kv_index == kDeletedEntry); | 347 | 5 | if (kv_index == kDeletedEntry && open_slot == -1) { | 348 | | // NOTE: We only record the open slot here. We DON'T return it. If we're | 349 | | // looking for a key that was writen before this tombstone was written to | 350 | | // the index we should continue probing until we get to that key. If we | 351 | | // get to an empty index slot or the end of the index then we know we are | 352 | | // dealing with a new key and can safely replace the tombstone without | 353 | | // disrupting any existing keys. | 354 | 0 | open_slot = slot; | 355 | 0 | } | 356 | 5 | } | 357 | | | 358 | 0 | if (open_slot != -1) { | 359 | 0 | return len_ < capacity_ ? open_slot : kTooSmall; | 360 | 0 | } | 361 | | | 362 | 0 | return kTooSmall; | 363 | 0 | } |
Unexecuted instantiation: _ZNK4DictIibE14hash_and_probeEi Unexecuted instantiation: _ZNK4DictIiiE14hash_and_probeEi Unexecuted instantiation: _ZNK4DictIiP6Tuple2IP4ListIPS1_IPS0_IiiEEEPS_IiiEEE14hash_and_probeEi Unexecuted instantiation: _ZNK4DictIP6BigStriE14hash_and_probeES1_ Unexecuted instantiation: _ZNK4DictIiP6BigStrE14hash_and_probeEi _ZNK4DictIP6BigStrPN10value_asdl7value_tEE14hash_and_probeES1_ Line | Count | Source | 307 | 82 | int Dict<K, V>::hash_and_probe(K key) const { | 308 | 82 | if (capacity_ == 0) { | 309 | 3 | return kTooSmall; | 310 | 3 | } | 311 | | | 312 | | // Hash the key onto a slot in the index. If the first slot is occupied, | 313 | | // probe until an empty one is found. | 314 | 79 | unsigned h = hash_key(key); | 315 | | // faster % using & -- assuming index_len_ is power of 2 | 316 | 79 | int init_bucket = h & (index_len_ - 1); | 317 | | | 318 | | // If we see a tombstone along the probing path, stash it. | 319 | 79 | int open_slot = -1; | 320 | | | 321 | 106 | for (int i = 0; i < index_len_; ++i) { | 322 | | // Start at init_bucket and wrap araound | 323 | | | 324 | | // faster % using & -- assuming index_len_ is power of 2 | 325 | 106 | int slot = (i + init_bucket) & (index_len_ - 1); | 326 | | | 327 | 106 | int kv_index = index_->items_[slot]; | 328 | 106 | DCHECK(kv_index < len_); | 329 | | // Optimistically this is the common case once the table has been populated. | 330 | 106 | if (kv_index >= 0) { | 331 | 27 | unsigned h2 = hash_key(keys_->items_[kv_index]); | 332 | 27 | if (h == h2 && keys_equal(keys_->items_[kv_index], key)) { | 333 | 0 | return slot; | 334 | 0 | } | 335 | 27 | } | 336 | | | 337 | 106 | if (kv_index == kEmptyEntry) { | 338 | 79 | if (open_slot != -1) { | 339 | 0 | slot = open_slot; | 340 | 0 | } | 341 | | // If there isn't room in the entry arrays, tell the caller to resize. | 342 | 79 | return len_ < capacity_ ? slot : kTooSmall; | 343 | 79 | } | 344 | | | 345 | | // Tombstone or collided keys unequal. Keep scanning. | 346 | 27 | DCHECK(kv_index >= 0 || kv_index == kDeletedEntry); | 347 | 27 | if (kv_index == kDeletedEntry && open_slot == -1) { | 348 | | // NOTE: We only record the open slot here. We DON'T return it. If we're | 349 | | // looking for a key that was writen before this tombstone was written to | 350 | | // the index we should continue probing until we get to that key. If we | 351 | | // get to an empty index slot or the end of the index then we know we are | 352 | | // dealing with a new key and can safely replace the tombstone without | 353 | | // disrupting any existing keys. | 354 | 0 | open_slot = slot; | 355 | 0 | } | 356 | 27 | } | 357 | | | 358 | 0 | if (open_slot != -1) { | 359 | 0 | return len_ < capacity_ ? open_slot : kTooSmall; | 360 | 0 | } | 361 | | | 362 | 0 | return kTooSmall; | 363 | 0 | } |
_ZNK4DictIP6BigStrPN4args7_ActionEE14hash_and_probeES1_ Line | Count | Source | 307 | 77 | int Dict<K, V>::hash_and_probe(K key) const { | 308 | 77 | if (capacity_ == 0) { | 309 | 2 | return kTooSmall; | 310 | 2 | } | 311 | | | 312 | | // Hash the key onto a slot in the index. If the first slot is occupied, | 313 | | // probe until an empty one is found. | 314 | 75 | unsigned h = hash_key(key); | 315 | | // faster % using & -- assuming index_len_ is power of 2 | 316 | 75 | int init_bucket = h & (index_len_ - 1); | 317 | | | 318 | | // If we see a tombstone along the probing path, stash it. | 319 | 75 | int open_slot = -1; | 320 | | | 321 | 107 | for (int i = 0; i < index_len_; ++i) { | 322 | | // Start at init_bucket and wrap araound | 323 | | | 324 | | // faster % using & -- assuming index_len_ is power of 2 | 325 | 107 | int slot = (i + init_bucket) & (index_len_ - 1); | 326 | | | 327 | 107 | int kv_index = index_->items_[slot]; | 328 | 107 | DCHECK(kv_index < len_); | 329 | | // Optimistically this is the common case once the table has been populated. | 330 | 107 | if (kv_index >= 0) { | 331 | 32 | unsigned h2 = hash_key(keys_->items_[kv_index]); | 332 | 32 | if (h == h2 && keys_equal(keys_->items_[kv_index], key)) { | 333 | 0 | return slot; | 334 | 0 | } | 335 | 32 | } | 336 | | | 337 | 107 | if (kv_index == kEmptyEntry) { | 338 | 75 | if (open_slot != -1) { | 339 | 0 | slot = open_slot; | 340 | 0 | } | 341 | | // If there isn't room in the entry arrays, tell the caller to resize. | 342 | 75 | return len_ < capacity_ ? slot : kTooSmall; | 343 | 75 | } | 344 | | | 345 | | // Tombstone or collided keys unequal. Keep scanning. | 346 | 32 | DCHECK(kv_index >= 0 || kv_index == kDeletedEntry); | 347 | 32 | if (kv_index == kDeletedEntry && open_slot == -1) { | 348 | | // NOTE: We only record the open slot here. We DON'T return it. If we're | 349 | | // looking for a key that was writen before this tombstone was written to | 350 | | // the index we should continue probing until we get to that key. If we | 351 | | // get to an empty index slot or the end of the index then we know we are | 352 | | // dealing with a new key and can safely replace the tombstone without | 353 | | // disrupting any existing keys. | 354 | 0 | open_slot = slot; | 355 | 0 | } | 356 | 32 | } | 357 | | | 358 | 0 | if (open_slot != -1) { | 359 | 0 | return len_ < capacity_ ? open_slot : kTooSmall; | 360 | 0 | } | 361 | | | 362 | 0 | return kTooSmall; | 363 | 0 | } |
|
364 | | |
365 | | template <typename K, typename V> |
366 | 1 | int Dict<K, V>::find_kv_index(K key) const { |
367 | 1 | if (index_ != nullptr) { // Common case |
368 | 1 | int pos = hash_and_probe(key); |
369 | 1 | if (pos == kTooSmall) { |
370 | 0 | return kNotFound; |
371 | 0 | } |
372 | 1 | DCHECK(pos >= 0); |
373 | 0 | int kv_index = index_->items_[pos]; |
374 | 1 | if (kv_index < 0) { |
375 | 0 | return kNotFound; |
376 | 0 | } |
377 | 1 | return kv_index; |
378 | 1 | } |
379 | | |
380 | | // Linear search on GlobalDict instances. |
381 | | // TODO: Should we populate and compare their hash values? |
382 | 0 | for (int i = 0; i < len_; ++i) { |
383 | 0 | if (keys_equal(keys_->items_[i], key)) { |
384 | 0 | return i; |
385 | 0 | } |
386 | 0 | } |
387 | | |
388 | 0 | return kNotFound; |
389 | 0 | } _ZNK4DictIP6BigStrS1_E13find_kv_indexES1_ Line | Count | Source | 366 | 1 | int Dict<K, V>::find_kv_index(K key) const { | 367 | 1 | if (index_ != nullptr) { // Common case | 368 | 1 | int pos = hash_and_probe(key); | 369 | 1 | if (pos == kTooSmall) { | 370 | 0 | return kNotFound; | 371 | 0 | } | 372 | 1 | DCHECK(pos >= 0); | 373 | 0 | int kv_index = index_->items_[pos]; | 374 | 1 | if (kv_index < 0) { | 375 | 0 | return kNotFound; | 376 | 0 | } | 377 | 1 | return kv_index; | 378 | 1 | } | 379 | | | 380 | | // Linear search on GlobalDict instances. | 381 | | // TODO: Should we populate and compare their hash values? | 382 | 0 | for (int i = 0; i < len_; ++i) { | 383 | 0 | if (keys_equal(keys_->items_[i], key)) { | 384 | 0 | return i; | 385 | 0 | } | 386 | 0 | } | 387 | | | 388 | 0 | return kNotFound; | 389 | 0 | } |
Unexecuted instantiation: _ZNK4DictIibE13find_kv_indexEi Unexecuted instantiation: _ZNK4DictIP6BigStrPN10value_asdl7value_tEE13find_kv_indexES1_ Unexecuted instantiation: _ZNK4DictIP6BigStrPN4args7_ActionEE13find_kv_indexES1_ |
390 | | |
391 | | template <typename K, typename V> |
392 | 167 | void Dict<K, V>::set(K key, V val) { |
393 | 167 | DCHECK(obj_header().heap_tag != HeapTag::Global); |
394 | 0 | int pos = hash_and_probe(key); |
395 | 167 | if (pos == kTooSmall) { |
396 | 18 | reserve(len_ + 1); |
397 | 18 | pos = hash_and_probe(key); |
398 | 18 | } |
399 | 167 | DCHECK(pos >= 0); |
400 | | |
401 | 0 | int kv_index = index_->items_[pos]; |
402 | 167 | DCHECK(kv_index < len_); |
403 | 167 | if (kv_index < 0) { |
404 | | // Write new entries to the end of the k/v arrays. This allows us to recall |
405 | | // insertion order until the first deletion. |
406 | 167 | keys_->items_[len_] = key; |
407 | 167 | values_->items_[len_] = val; |
408 | 167 | index_->items_[pos] = len_; |
409 | 167 | len_++; |
410 | 167 | DCHECK(len_ <= capacity_); |
411 | 167 | } else { |
412 | 0 | values_->items_[kv_index] = val; |
413 | 0 | } |
414 | 167 | } Unexecuted instantiation: _ZN4DictIibE3setEib _ZN4DictIP6BigStrS1_E3setES1_S1_ Line | Count | Source | 392 | 23 | void Dict<K, V>::set(K key, V val) { | 393 | 23 | DCHECK(obj_header().heap_tag != HeapTag::Global); | 394 | 0 | int pos = hash_and_probe(key); | 395 | 23 | if (pos == kTooSmall) { | 396 | 3 | reserve(len_ + 1); | 397 | 3 | pos = hash_and_probe(key); | 398 | 3 | } | 399 | 23 | DCHECK(pos >= 0); | 400 | | | 401 | 0 | int kv_index = index_->items_[pos]; | 402 | 23 | DCHECK(kv_index < len_); | 403 | 23 | if (kv_index < 0) { | 404 | | // Write new entries to the end of the k/v arrays. This allows us to recall | 405 | | // insertion order until the first deletion. | 406 | 23 | keys_->items_[len_] = key; | 407 | 23 | values_->items_[len_] = val; | 408 | 23 | index_->items_[pos] = len_; | 409 | 23 | len_++; | 410 | 23 | DCHECK(len_ <= capacity_); | 411 | 23 | } else { | 412 | 0 | values_->items_[kv_index] = val; | 413 | 0 | } | 414 | 23 | } |
Unexecuted instantiation: _ZN4DictIiiE3setEii Unexecuted instantiation: _ZN4DictIiP6Tuple2IP4ListIPS1_IPS0_IiiEEEPS_IiiEEE3setEiSB_ Unexecuted instantiation: _ZN4DictIP6BigStriE3setES1_i Unexecuted instantiation: _ZN4DictIiP6BigStrE3setEiS1_ _ZN4DictIP6BigStrPN10value_asdl7value_tEE3setES1_S4_ Line | Count | Source | 392 | 74 | void Dict<K, V>::set(K key, V val) { | 393 | 74 | DCHECK(obj_header().heap_tag != HeapTag::Global); | 394 | 0 | int pos = hash_and_probe(key); | 395 | 74 | if (pos == kTooSmall) { | 396 | 8 | reserve(len_ + 1); | 397 | 8 | pos = hash_and_probe(key); | 398 | 8 | } | 399 | 74 | DCHECK(pos >= 0); | 400 | | | 401 | 0 | int kv_index = index_->items_[pos]; | 402 | 74 | DCHECK(kv_index < len_); | 403 | 74 | if (kv_index < 0) { | 404 | | // Write new entries to the end of the k/v arrays. This allows us to recall | 405 | | // insertion order until the first deletion. | 406 | 74 | keys_->items_[len_] = key; | 407 | 74 | values_->items_[len_] = val; | 408 | 74 | index_->items_[pos] = len_; | 409 | 74 | len_++; | 410 | 74 | DCHECK(len_ <= capacity_); | 411 | 74 | } else { | 412 | 0 | values_->items_[kv_index] = val; | 413 | 0 | } | 414 | 74 | } |
_ZN4DictIP6BigStrPN4args7_ActionEE3setES1_S4_ Line | Count | Source | 392 | 70 | void Dict<K, V>::set(K key, V val) { | 393 | 70 | DCHECK(obj_header().heap_tag != HeapTag::Global); | 394 | 0 | int pos = hash_and_probe(key); | 395 | 70 | if (pos == kTooSmall) { | 396 | 7 | reserve(len_ + 1); | 397 | 7 | pos = hash_and_probe(key); | 398 | 7 | } | 399 | 70 | DCHECK(pos >= 0); | 400 | | | 401 | 0 | int kv_index = index_->items_[pos]; | 402 | 70 | DCHECK(kv_index < len_); | 403 | 70 | if (kv_index < 0) { | 404 | | // Write new entries to the end of the k/v arrays. This allows us to recall | 405 | | // insertion order until the first deletion. | 406 | 70 | keys_->items_[len_] = key; | 407 | 70 | values_->items_[len_] = val; | 408 | 70 | index_->items_[pos] = len_; | 409 | 70 | len_++; | 410 | 70 | DCHECK(len_ <= capacity_); | 411 | 70 | } else { | 412 | 0 | values_->items_[kv_index] = val; | 413 | 0 | } | 414 | 70 | } |
|
415 | | |
416 | | template <typename K, typename V> |
417 | 0 | inline int len(const Dict<K, V>* d) { |
418 | 0 | return d->len_; |
419 | 0 | } Unexecuted instantiation: _Z3lenIP6BigStrS1_EiPK4DictIT_T0_E Unexecuted instantiation: _Z3lenIP6BigStrPN4args7_ActionEEiPK4DictIT_T0_E |
420 | | |
421 | | template <class K, class V> |
422 | | class DictIter { |
423 | | public: |
424 | 0 | explicit DictIter(Dict<K, V>* D) : D_(D), pos_(0) { |
425 | 0 | } Unexecuted instantiation: _ZN8DictIterIP6BigStrPN10value_asdl7value_tEEC2EP4DictIS1_S4_E Unexecuted instantiation: _ZN8DictIterIP6BigStrPN12runtime_asdl7HayNodeEEC2EP4DictIS1_S4_E Unexecuted instantiation: _ZN8DictIterIlP6BigStrEC2EP4DictIlS1_E Unexecuted instantiation: _ZN8DictIterIP6BigStrS1_EC2EP4DictIS1_S1_E Unexecuted instantiation: _ZN8DictIterIP6BigStrPN12runtime_asdl4CellEEC2EP4DictIS1_S4_E |
426 | 0 | void Next() { |
427 | 0 | pos_++; |
428 | 0 | } Unexecuted instantiation: _ZN8DictIterIP6BigStrPN10value_asdl7value_tEE4NextEv Unexecuted instantiation: _ZN8DictIterIP6BigStrPN12runtime_asdl7HayNodeEE4NextEv Unexecuted instantiation: _ZN8DictIterIlP6BigStrE4NextEv Unexecuted instantiation: _ZN8DictIterIP6BigStrS1_E4NextEv Unexecuted instantiation: _ZN8DictIterIP6BigStrPN12runtime_asdl4CellEE4NextEv |
429 | 0 | bool Done() { |
430 | 0 | return pos_ == D_->len_; |
431 | 0 | } Unexecuted instantiation: _ZN8DictIterIP6BigStrPN10value_asdl7value_tEE4DoneEv Unexecuted instantiation: _ZN8DictIterIP6BigStrPN12runtime_asdl7HayNodeEE4DoneEv Unexecuted instantiation: _ZN8DictIterIlP6BigStrE4DoneEv Unexecuted instantiation: _ZN8DictIterIP6BigStrS1_E4DoneEv Unexecuted instantiation: _ZN8DictIterIP6BigStrPN12runtime_asdl4CellEE4DoneEv |
432 | 0 | K Key() { |
433 | 0 | return D_->keys_->items_[pos_]; |
434 | 0 | } Unexecuted instantiation: _ZN8DictIterIP6BigStrPN10value_asdl7value_tEE3KeyEv Unexecuted instantiation: _ZN8DictIterIP6BigStrPN12runtime_asdl7HayNodeEE3KeyEv Unexecuted instantiation: _ZN8DictIterIlP6BigStrE3KeyEv Unexecuted instantiation: _ZN8DictIterIP6BigStrS1_E3KeyEv Unexecuted instantiation: _ZN8DictIterIP6BigStrPN12runtime_asdl4CellEE3KeyEv |
435 | 0 | V Value() { |
436 | 0 | return D_->values_->items_[pos_]; |
437 | 0 | } Unexecuted instantiation: _ZN8DictIterIP6BigStrPN10value_asdl7value_tEE5ValueEv Unexecuted instantiation: _ZN8DictIterIP6BigStrPN12runtime_asdl7HayNodeEE5ValueEv Unexecuted instantiation: _ZN8DictIterIlP6BigStrE5ValueEv Unexecuted instantiation: _ZN8DictIterIP6BigStrS1_E5ValueEv Unexecuted instantiation: _ZN8DictIterIP6BigStrPN12runtime_asdl4CellEE5ValueEv |
438 | | |
439 | | private: |
440 | | const Dict<K, V>* D_; |
441 | | int pos_; |
442 | | }; |
443 | | |
444 | | // dict(mylist) converts a list of (k, v) tuples into a dict |
445 | | template <typename K, typename V> |
446 | | Dict<K, V>* dict(List<Tuple2<K, V>*>* l) { |
447 | | auto ret = Alloc<Dict<K, V>>(); |
448 | | ret->reserve(len(l)); |
449 | | for (ListIter<Tuple2<K, V>*> it(l); !it.Done(); it.Next()) { |
450 | | ret->set(it.Value()->at0(), it.Value()->at1()); |
451 | | } |
452 | | return ret; |
453 | | } |
454 | | |
455 | | template <class K, class V> |
456 | | void Dict<K, V>::update(List<Tuple2<K, V>*>* pairs) { |
457 | | reserve(len_ + len(pairs)); |
458 | | for (ListIter<Tuple2<K, V>*> it(pairs); !it.Done(); it.Next()) { |
459 | | set(it.Value()->at0(), it.Value()->at1()); |
460 | | } |
461 | | } |
462 | | |
463 | | template <class K, class V> |
464 | | void Dict<K, V>::update(Dict<K, V>* other) { |
465 | | reserve(len_ + len(other)); |
466 | | for (DictIter<K, V> it(other); !it.Done(); it.Next()) { |
467 | | set(it.Key(), it.Value()); |
468 | | } |
469 | | } |
470 | | |
471 | | #endif // MYCPP_GC_DICT_H |