/home/uke/oil/mycpp/gc_heap.h
Line | Count | Source (jump to first uncovered line) |
1 | | // gc_heap.h |
2 | | // |
3 | | // A garbage collected heap that looks like statically typed Python: Str, |
4 | | // List<T>, Dict<K, V>. |
5 | | |
6 | | #ifndef GC_HEAP_H |
7 | | #define GC_HEAP_H |
8 | | |
9 | | #include <cassert> // assert() |
10 | | #include <cstddef> // max_align_t |
11 | | #include <cstdint> // max_align_t |
12 | | #include <cstdlib> // malloc |
13 | | #include <cstring> // memcpy |
14 | | #include <initializer_list> |
15 | | #include <new> // placement new |
16 | | #include <utility> // std::forward |
17 | | |
18 | | #include "common.h" |
19 | | |
20 | | // Design Notes: |
21 | | |
22 | | // It's a semi-space collector using the Cheney algorithm. (Later we may add a |
23 | | // "large object space", managed by mark-and-sweep after each copy step.) |
24 | | |
25 | | // Influences / Prior art: |
26 | | // |
27 | | // - OCaml / ZINC machine - statically typed, everything is a pointer (no value |
28 | | // types). |
29 | | // - femtolisp - Cheney collector; used to bootstrap Juliia. |
30 | | // - Other: ES shell, Lua, Python and especially Python dicts. |
31 | | |
32 | | // Design: |
33 | | // |
34 | | // - Graph of application types: |
35 | | // - Str* |
36 | | // - List<T>* |
37 | | // - Dict<K, V>* |
38 | | // - User-defined classes, which may have a vtable pointer. |
39 | | // - Graph of GC nodes: everything is an Obj* with an 8 byte header |
40 | | // - 1 byte heap tag, for Cheney |
41 | | // - 1 byte type tag for Zephyr ASDL unions |
42 | | // - 2 byte / 16-bit field bit mask for following pointers on user-defined |
43 | | // classes and List / Dict "headers" |
44 | | // - 4 bytes length for Cheney to determine how much to copy. |
45 | | // |
46 | | // Operations that resize: |
47 | | // |
48 | | // - List::append() and extend() can realloc |
49 | | // - Dict::set() can realloc and rehash (TODO) |
50 | | // |
51 | | // Important Types: |
52 | | // |
53 | | // - Slab<T> to trace the object graph. Slab<int> is opaque, but Slab<T*> |
54 | | // requires tracing. |
55 | | // - A List<T> is a fixed-size structure, with int fields and a pointer |
56 | | // to a single Slab<T> (the items). |
57 | | // - A Dict<K, V> is also fixed-size, with pointers to 3 slabs: the index |
58 | | // Slab<int>, the keys Slab<K>, and the index Slab<V>. |
59 | | // |
60 | | // "Ghost" layout types: |
61 | | // |
62 | | // - LayoutFixed - for walking up to 16 fields of a user-defined type. |
63 | | // - LayoutForwarded - for storing the forwarding pointer that the Cheney |
64 | | // algorithm uses. |
65 | | // |
66 | | // Stack rooting API: |
67 | | // |
68 | | // StackRoots _roots({&mystr, &mydict, &mylist}); |
69 | | // |
70 | | // This pushes local variables onto the global data structure managed by the |
71 | | // GC. |
72 | | |
73 | | // TODO: Dicts should actually use hashing! Test computational complexity. |
74 | | |
75 | | // Memory allocation APIs: |
76 | | // |
77 | | // - gc_heap::Alloc<Foo>(x) |
78 | | // The typed public API. An alternative to new Foo(x). mycpp/ASDL should |
79 | | // generate these calls. |
80 | | // - AllocStr(length), CopyStr(), NewList, NewDict: gc_heap::Alloc() doesn't |
81 | | // work |
82 | | // for these types for various reasons |
83 | | // - Heap::Allocate() |
84 | | // The untyped internal API. For AllocStr() and NewSlab(). |
85 | | // - malloc() -- for say yajl to use. Manually deallocated. |
86 | | // - new/delete -- shouldn't be in Oil? |
87 | | |
88 | | // Slab Sizing with 8-byte slab header |
89 | | // |
90 | | // 16 - 8 = 8 = 1 eight-byte or 2 four-byte elements |
91 | | // 32 - 8 = 24 = 3 eight-byte or 6 four-byte elements |
92 | | // 64 - 8 = 56 = 7 eight-byte or 14 four-byte elements |
93 | | |
94 | | // #defines for degbugging: |
95 | | // |
96 | | // GC_EVERY_ALLOC: Collect() on every Allocate(). Exposes many bugs! |
97 | | // GC_PROTECT: Use mprotect() |
98 | | // GC_VERBOSE: Log when we collect |
99 | | // GC_DEBUG: Collect more stats. TODO: Rename this? |
100 | | |
101 | | // Obj::heap_tag_ values. They're odd numbers to distinguish them from vtable |
102 | | // pointers. |
103 | | namespace Tag { |
104 | | const int Forwarded = 1; // For the Cheney algorithm. |
105 | | const int Global = 3; // Neither copy nor scan. |
106 | | const int Opaque = 5; // Copy but don't scan. List<int> and Str |
107 | | const int FixedSize = 7; // Fixed size headers: consult field_mask_ |
108 | | const int Scanned = 9; // Copy AND scan for non-NULL pointers. |
109 | | } // namespace Tag |
110 | | |
111 | | // Silly definition for passing types like GlobalList<T, N> and initializer |
112 | | // lists like {1, 2, 3} to macros |
113 | | |
114 | | #define COMMA , |
115 | | |
116 | | namespace gc_heap { |
117 | | |
118 | | template <class T> |
119 | | class List; |
120 | | |
121 | | constexpr int kMask = alignof(max_align_t) - 1; // e.g. 15 or 7 |
122 | | |
123 | | // Align returned pointers to the worst case of 8 bytes (64-bit pointers) |
124 | 4.34k | inline size_t aligned(size_t n) { |
125 | | // https://stackoverflow.com/questions/2022179/c-quick-calculation-of-next-multiple-of-4 |
126 | | // return (n + 7) & ~7; |
127 | | |
128 | 4.34k | return (n + kMask) & ~kMask; |
129 | 4.34k | } |
130 | | |
131 | | class Obj; |
132 | | |
133 | | const int kMaxRoots = 4 * 1024; // related to C stack size |
134 | | |
135 | | // #define GC_DEBUG 1 |
136 | | |
137 | | class Space { |
138 | | public: |
139 | 55 | Space() { |
140 | 55 | } |
141 | | void Init(int space_size); |
142 | | |
143 | | void Free(); |
144 | | |
145 | 448 | void Clear() { |
146 | | // Slab scanning relies on 0 bytes (nullptr). e.g. for a List<Token*>*. |
147 | | // Note: I noticed that memset() of say 400 MiB is pretty expensive. Does |
148 | | // it makes sense to zero the slabs instead? |
149 | 448 | #ifndef NO_GC_HACK |
150 | | // When not collecting, we need a huge 400 MiB heap. Try to save startup |
151 | | // time by not doing this. |
152 | 448 | memset(begin_, 0, size_); |
153 | 448 | #endif |
154 | 448 | } |
155 | | |
156 | | #if GC_PROTECT |
157 | | // To maintain invariants |
158 | | void Protect(); |
159 | | void Unprotect(); |
160 | | #endif |
161 | | #if GC_DEBUG |
162 | 0 | void AssertValid(void* p) { |
163 | 0 | if (begin_ <= p && p < begin_ + size_) { |
164 | 0 | return; |
165 | 0 | } |
166 | 0 | log("p = %p isn't between %p and %p", begin_, begin_ + size_); |
167 | 0 | InvalidCodePath(); |
168 | 0 | } |
169 | | #endif |
170 | | |
171 | | char* begin_; |
172 | | int size_; // number of bytes |
173 | | }; |
174 | | |
175 | | class Heap { |
176 | | public: |
177 | 27 | Heap() { // default constructor does nothing -- relies on zero initialization |
178 | 27 | } |
179 | | |
180 | | // Real initialization with the initial heap size. The heap grows with |
181 | | // allocations. |
182 | 35 | void Init(int space_size) { |
183 | | // malloc() and memset() |
184 | 35 | from_space_.Init(space_size); |
185 | 35 | to_space_.Init(space_size); |
186 | | |
187 | 35 | free_ = from_space_.begin_; // where we allocate from |
188 | 35 | limit_ = free_ + space_size; |
189 | | |
190 | 35 | roots_top_ = 0; |
191 | | |
192 | 35 | #if GC_DEBUG |
193 | 35 | num_collections_ = 0; |
194 | 35 | num_heap_growths_ = 0; |
195 | 35 | num_forced_growths_ = 0; |
196 | 35 | num_live_objs_ = 0; |
197 | 35 | #endif |
198 | 35 | } |
199 | | |
200 | 3.02k | void* Bump(int n) { |
201 | 3.02k | char* p = free_; |
202 | 3.02k | free_ += n; |
203 | 3.02k | #if GC_DEBUG |
204 | 3.02k | num_live_objs_++; |
205 | 3.02k | #endif |
206 | 3.02k | return p; |
207 | 3.02k | } |
208 | | |
209 | 3.02k | void* Allocate(int num_bytes) { |
210 | 3.02k | int n = aligned(num_bytes); |
211 | | // log("n = %d, p = %p", n, p); |
212 | | |
213 | | #if GC_EVERY_ALLOC |
214 | | Collect(); // force collection to find problems early |
215 | | #endif |
216 | | |
217 | 3.02k | if (free_ + n <= limit_) { // Common case: we have space for it. |
218 | 2.68k | return Bump(n); |
219 | 2.68k | } |
220 | | |
221 | 344 | #if GC_DEBUG |
222 | | // log("GC free_ %p, from_space_ %p, space_size_ %d", free_, from_space_, |
223 | | // space_size_); |
224 | 344 | #endif |
225 | | |
226 | 344 | Collect(); // Try to free some space. |
227 | | |
228 | | // log("after GC: from begin %p, free_ %p, n %d, limit_ %p", |
229 | | // from_space_.begin_, free_, n, limit_); |
230 | | |
231 | 344 | if (free_ + n <= limit_) { // Now we have space for it. |
232 | 333 | return Bump(n); |
233 | 333 | } |
234 | | |
235 | | // It's STILL too small. Resize to_space_ to ENSURE that allocation will |
236 | | // succeed, copy the heap to it, then allocate the object. |
237 | 11 | int multiple = 2; |
238 | 11 | while (from_space_.size_ + n > to_space_.size_ * multiple) { |
239 | 0 | multiple *= 2; |
240 | 0 | } |
241 | | // log("=== FORCED by multiple of %d", multiple); |
242 | 11 | to_space_.Free(); |
243 | 11 | to_space_.Init(to_space_.size_ * multiple); |
244 | | |
245 | 11 | Collect(); |
246 | 11 | #if GC_DEBUG |
247 | 11 | num_forced_growths_++; |
248 | 11 | #endif |
249 | | |
250 | 11 | return Bump(n); |
251 | 344 | } |
252 | | |
253 | 357 | void Swap() { |
254 | | // Swap spaces for next collection. |
255 | 357 | char* tmp = from_space_.begin_; |
256 | 357 | from_space_.begin_ = to_space_.begin_; |
257 | 357 | to_space_.begin_ = tmp; |
258 | | |
259 | 357 | int tmp2 = from_space_.size_; |
260 | 357 | from_space_.size_ = to_space_.size_; |
261 | 357 | to_space_.size_ = tmp2; |
262 | 357 | } |
263 | | |
264 | 11.4k | void PushRoot(Obj** p) { |
265 | | // log("PushRoot %d", roots_top_); |
266 | 11.4k | roots_[roots_top_++] = p; |
267 | | // TODO: This should be like a malloc() failure? |
268 | 11.4k | assert(roots_top_ < kMaxRoots); |
269 | 11.4k | } |
270 | | |
271 | 11.4k | void PopRoot() { |
272 | 11.4k | roots_top_--; |
273 | | // log("PopRoot %d", roots_top_); |
274 | 11.4k | } |
275 | | |
276 | | Obj* Relocate(Obj* obj, Obj* header); |
277 | | |
278 | | // mutates free_ and other variables |
279 | | void Collect(); |
280 | | |
281 | | #if GC_DEBUG |
282 | 3 | void Report() { |
283 | 3 | log("-----"); |
284 | 3 | log("num collections = %d", num_collections_); |
285 | 3 | log("num heap growths = %d", num_heap_growths_); |
286 | 3 | log("num forced heap growths = %d", num_forced_growths_); |
287 | 3 | log("num live objects = %d", num_live_objs_); |
288 | | |
289 | 3 | log("from_space_ %p", from_space_.begin_); |
290 | 3 | log("to_space %p", to_space_.begin_); |
291 | 3 | log("-----"); |
292 | 3 | } |
293 | | #endif |
294 | | |
295 | | Space from_space_; // space we allocate from |
296 | | Space to_space_; // space that the collector copies to |
297 | | |
298 | | char* free_; // next place to allocate, from_space_ <= free_ < limit_ |
299 | | char* limit_; // end of space we're allocating from |
300 | | |
301 | | // Stack roots. The obvious data structure is a linked list, but an array |
302 | | // has better locality. |
303 | | // |
304 | | // femtolisp uses a global pointer to dynamically-allocated growable array, |
305 | | // with initial N_STACK = 262144! Kind of arbitrary. |
306 | | |
307 | | int roots_top_; |
308 | | Obj** roots_[kMaxRoots]; // These are pointers to Obj* pointers |
309 | | |
310 | | #if GC_DEBUG |
311 | | int num_collections_; |
312 | | int num_heap_growths_; |
313 | | int num_forced_growths_; // when a single allocation is too big |
314 | | int num_live_objs_; |
315 | | #endif |
316 | | }; |
317 | | |
318 | | // The heap is a (compound) global variable. Notes: |
319 | | // - The default constructor does nothing, to avoid initialization order |
320 | | // problems. |
321 | | // - For some applications, this can be thread_local rather than global. |
322 | | extern Heap gHeap; |
323 | | |
324 | | class StackRoots { |
325 | | public: |
326 | 8.16k | StackRoots(std::initializer_list<void*> roots) { |
327 | 8.16k | n_ = roots.size(); |
328 | 11.2k | for (auto root : roots) { // can't use roots[i] |
329 | 11.2k | gHeap.PushRoot(reinterpret_cast<Obj**>(root)); |
330 | 11.2k | } |
331 | 8.16k | } |
332 | | |
333 | 8.16k | ~StackRoots() { |
334 | | // TODO: optimize this |
335 | 19.3k | for (int i = 0; i < n_; ++i) { |
336 | 11.2k | gHeap.PopRoot(); |
337 | 11.2k | } |
338 | 8.16k | } |
339 | | |
340 | | private: |
341 | | int n_; |
342 | | }; |
343 | | |
344 | | template <typename T> |
345 | | class Local { |
346 | | // We can garbage collect at any Alloc() invocation, so we need a level of |
347 | | // indirection for locals / pointers directly on the stack. Pointers on the |
348 | | // heap are updated by the Cheney GC algorithm. |
349 | | |
350 | | public: |
351 | | Local() : raw_pointer_(nullptr) { |
352 | | } |
353 | | |
354 | | // IMPLICIT conversion. No 'explicit'. |
355 | 0 | Local(T* raw_pointer) : raw_pointer_(raw_pointer) { |
356 | | // TODO(Jesse): Does this get called? |
357 | | // Is this NotImplemented() or InvalidCodePath() ?? |
358 | 0 | assert(0); |
359 | | // gHeap.PushRoot(this); |
360 | 0 | } |
361 | | |
362 | | // Copy constructor, e.g. f(mylocal) where f(Local<T> param); |
363 | | Local(const Local& other) : raw_pointer_(other.raw_pointer_) { |
364 | | // TODO(Jesse): Does this get called? |
365 | | // Is this NotImplemented() or InvalidCodePath() ?? |
366 | | assert(0); |
367 | | // gHeap.PushRoot(this); |
368 | | } |
369 | | |
370 | | void operator=(const Local& other) { // Assignment operator |
371 | | raw_pointer_ = other.raw_pointer_; |
372 | | |
373 | | // Note: we could try to avoid PushRoot() as an optimization. Example: |
374 | | // |
375 | | // Local<Str> a = CopyStr("foo"); |
376 | | // Local<Str> b; |
377 | | // b = a; // invokes operator=, it's already a root |
378 | | // |
379 | | // Local<Str> c = myfunc(); // T* constructor takes care of PushRoot() |
380 | | |
381 | | // log("operator="); |
382 | | |
383 | | // However the problem is that then we'll have an unbalanced PopRoot(). |
384 | | // So we keep it for now. |
385 | | gHeap.PushRoot(this); |
386 | | } |
387 | | |
388 | 0 | ~Local() { |
389 | 0 | gHeap.PopRoot(); |
390 | 0 | } |
391 | | |
392 | | // This cast operator overload allows: |
393 | | // |
394 | | // Local<Str> s = CopyStr("foo"); |
395 | | // node->mystr = s; // convert from Local to raw |
396 | | // |
397 | | // As well as: |
398 | | // |
399 | | // Local<List<Str*>> strings = Alloc<List<Str*>>(); |
400 | | // strings->append(CopyStr("foo")); // convert from local to raw |
401 | | // |
402 | | // The heap should NOT have locals! List<Str> and not List<Local<Str>>. |
403 | | // |
404 | | // Note: This could be considered dangerous if we don't maintain |
405 | | // discipline. |
406 | | // |
407 | | // https://www.informit.com/articles/article.aspx?p=31529&seqNum=7 |
408 | | // |
409 | | // Putting .get() at the call site in mycpp is more explicit. The |
410 | | // readability of the generated code is important! |
411 | | #if 1 |
412 | 0 | operator T*() { |
413 | 0 | return raw_pointer_; |
414 | 0 | } Unexecuted instantiation: _ZN7gc_heap5LocalINS_3StrEEcvPS1_Ev Unexecuted instantiation: _ZN7gc_heap5LocalI5PointEcvPS1_Ev |
415 | | #endif |
416 | | |
417 | | // Allows ref->field and ref->method() |
418 | 0 | T* operator->() const { |
419 | | // log("operator->"); |
420 | 0 | return raw_pointer_; |
421 | 0 | } Unexecuted instantiation: _ZNK7gc_heap5LocalI4BaseEptEv Unexecuted instantiation: _ZNK7gc_heap5LocalI5PointEptEv Unexecuted instantiation: _ZNK7gc_heap5LocalI4LineEptEv Unexecuted instantiation: _ZNK7gc_heap5LocalINS_4ListIiEEEptEv Unexecuted instantiation: _ZNK7gc_heap5LocalINS_4ListIPNS_3StrEEEEptEv Unexecuted instantiation: _ZNK7gc_heap5LocalI7DerivedEptEv |
422 | | T* Get() const { |
423 | | return raw_pointer_; |
424 | | } |
425 | | // called by the garbage collector when moved to a new location! |
426 | | void Update(T* moved) { |
427 | | raw_pointer_ = moved; |
428 | | } |
429 | | |
430 | | // Dereference to get the real value. Doesn't seem like we need this. |
431 | | #if 0 |
432 | | T operator*() const { |
433 | | //log("operator*"); |
434 | | return *raw_pointer_; |
435 | | } |
436 | | #endif |
437 | | |
438 | | T* raw_pointer_; |
439 | | }; |
440 | | |
441 | | template <typename T> |
442 | | class Param : public Local<T> { |
443 | | // This could be an optimization like SpiderMonkey's Handle<T> vs Rooted<T>. |
444 | | // We use the names Param<T> and Local<T>. |
445 | | |
446 | | public: |
447 | | // hm this is awkward, I think we should NOT inherit! We should only |
448 | | // convert. |
449 | | Param(const Local<T>& other) : Local<T>(nullptr) { |
450 | | this->raw_pointer_ = other.raw_pointer_; |
451 | | } |
452 | | |
453 | | ~Param() { // do not PopRoot() |
454 | | } |
455 | | |
456 | | // Construct from T* -- PushRoot() |
457 | | // Construct from Local<T> -- we don't need to PushRoot() |
458 | | // operator= -- I don't think we need to PushRoot() |
459 | | }; |
460 | | |
461 | | // Variadic templates: |
462 | | // https://eli.thegreenplace.net/2014/variadic-templates-in-c/ |
463 | | template <typename T, typename... Args> |
464 | 705 | T* Alloc(Args&&... args) { |
465 | 705 | void* place = gHeap.Allocate(sizeof(T)); |
466 | 705 | assert(place != nullptr); |
467 | | // placement new |
468 | 0 | return new (place) T(std::forward<Args>(args)...); |
469 | 705 | } _ZN7gc_heap5AllocINS_4ListIiEEJEEEPT_DpOT0_ Line | Count | Source | 464 | 315 | T* Alloc(Args&&... args) { | 465 | 315 | void* place = gHeap.Allocate(sizeof(T)); | 466 | 315 | assert(place != nullptr); | 467 | | // placement new | 468 | 0 | return new (place) T(std::forward<Args>(args)...); | 469 | 315 | } |
_ZN7gc_heap5AllocINS_4ListIPNS_3StrEEEJEEEPT_DpOT0_ Line | Count | Source | 464 | 59 | T* Alloc(Args&&... args) { | 465 | 59 | void* place = gHeap.Allocate(sizeof(T)); | 466 | 59 | assert(place != nullptr); | 467 | | // placement new | 468 | 0 | return new (place) T(std::forward<Args>(args)...); | 469 | 59 | } |
_ZN7gc_heap5AllocINS_4DictIiiEEJEEEPT_DpOT0_ Line | Count | Source | 464 | 1 | T* Alloc(Args&&... args) { | 465 | 1 | void* place = gHeap.Allocate(sizeof(T)); | 466 | 1 | assert(place != nullptr); | 467 | | // placement new | 468 | 0 | return new (place) T(std::forward<Args>(args)...); | 469 | 1 | } |
_ZN7gc_heap5AllocINS_4DictIPNS_3StrES3_EEJEEEPT_DpOT0_ Line | Count | Source | 464 | 1 | T* Alloc(Args&&... args) { | 465 | 1 | void* place = gHeap.Allocate(sizeof(T)); | 466 | 1 | assert(place != nullptr); | 467 | | // placement new | 468 | 0 | return new (place) T(std::forward<Args>(args)...); | 469 | 1 | } |
_ZN7gc_heap5AllocINS_4DictIPNS_3StrEiEEJEEEPT_DpOT0_ Line | Count | Source | 464 | 6 | T* Alloc(Args&&... args) { | 465 | 6 | void* place = gHeap.Allocate(sizeof(T)); | 466 | 6 | assert(place != nullptr); | 467 | | // placement new | 468 | 0 | return new (place) T(std::forward<Args>(args)...); | 469 | 6 | } |
_ZN7gc_heap5AllocINS_4DictIiPNS_3StrEEEJEEEPT_DpOT0_ Line | Count | Source | 464 | 1 | T* Alloc(Args&&... args) { | 465 | 1 | void* place = gHeap.Allocate(sizeof(T)); | 466 | 1 | assert(place != nullptr); | 467 | | // placement new | 468 | 0 | return new (place) T(std::forward<Args>(args)...); | 469 | 1 | } |
_ZN7gc_heap5AllocI10DerivedObjJEEEPT_DpOT0_ Line | Count | Source | 464 | 1 | T* Alloc(Args&&... args) { | 465 | 1 | void* place = gHeap.Allocate(sizeof(T)); | 466 | 1 | assert(place != nullptr); | 467 | | // placement new | 468 | 0 | return new (place) T(std::forward<Args>(args)...); | 469 | 1 | } |
Unexecuted instantiation: _ZN7gc_heap5AllocI5PointJiiEEEPT_DpOT0_ Unexecuted instantiation: _ZN7gc_heap5AllocI4LineJEEEPT_DpOT0_ _ZN7gc_heap5AllocINS_4ListIbEEJEEEPT_DpOT0_ Line | Count | Source | 464 | 1 | T* Alloc(Args&&... args) { | 465 | 1 | void* place = gHeap.Allocate(sizeof(T)); | 466 | 1 | assert(place != nullptr); | 467 | | // placement new | 468 | 0 | return new (place) T(std::forward<Args>(args)...); | 469 | 1 | } |
Unexecuted instantiation: _ZN7gc_heap5AllocINS_4ListIdEEJEEEPT_DpOT0_ _ZN7gc_heap5AllocIN5mylib13BufLineReaderEJRPNS_3StrEEEEPT_DpOT0_ Line | Count | Source | 464 | 1 | T* Alloc(Args&&... args) { | 465 | 1 | void* place = gHeap.Allocate(sizeof(T)); | 466 | 1 | assert(place != nullptr); | 467 | | // placement new | 468 | 0 | return new (place) T(std::forward<Args>(args)...); | 469 | 1 | } |
_ZN7gc_heap5AllocIN5mylib15CFileLineReaderEJRP8_IO_FILEEEEPT_DpOT0_ Line | Count | Source | 464 | 3 | T* Alloc(Args&&... args) { | 465 | 3 | void* place = gHeap.Allocate(sizeof(T)); | 466 | 3 | assert(place != nullptr); | 467 | | // placement new | 468 | 0 | return new (place) T(std::forward<Args>(args)...); | 469 | 3 | } |
Unexecuted instantiation: _ZN7gc_heap5AllocI4BaseJiEEEPT_DpOT0_ Unexecuted instantiation: _ZN7gc_heap5AllocI7DerivedJiiEEEPT_DpOT0_ _ZN7gc_heap5AllocIN14asdl_generated17arith_expr__ConstEJiEEEPT_DpOT0_ Line | Count | Source | 464 | 1 | T* Alloc(Args&&... args) { | 465 | 1 | void* place = gHeap.Allocate(sizeof(T)); | 466 | 1 | assert(place != nullptr); | 467 | | // placement new | 468 | 0 | return new (place) T(std::forward<Args>(args)...); | 469 | 1 | } |
Unexecuted instantiation: _ZN7gc_heap5AllocIN14asdl_generated17arith_expr__ConstEJRiEEEPT_DpOT0_ _ZN7gc_heap5AllocIN7classes10TextOutputEJRPN5mylib6WriterEEEEPT_DpOT0_ Line | Count | Source | 464 | 1 | T* Alloc(Args&&... args) { | 465 | 1 | void* place = gHeap.Allocate(sizeof(T)); | 466 | 1 | assert(place != nullptr); | 467 | | // placement new | 468 | 0 | return new (place) T(std::forward<Args>(args)...); | 469 | 1 | } |
_ZN7gc_heap5AllocIN7classes7DerivedEJEEEPT_DpOT0_ Line | Count | Source | 464 | 1 | T* Alloc(Args&&... args) { | 465 | 1 | void* place = gHeap.Allocate(sizeof(T)); | 466 | 1 | assert(place != nullptr); | 467 | | // placement new | 468 | 0 | return new (place) T(std::forward<Args>(args)...); | 469 | 1 | } |
_ZN7gc_heap5AllocIN5mylib9BufWriterEJEEEPT_DpOT0_ Line | Count | Source | 464 | 33 | T* Alloc(Args&&... args) { | 465 | 33 | void* place = gHeap.Allocate(sizeof(T)); | 466 | 33 | assert(place != nullptr); | 467 | | // placement new | 468 | 0 | return new (place) T(std::forward<Args>(args)...); | 469 | 33 | } |
Unexecuted instantiation: _ZN7gc_heap5AllocIN7classes10TextOutputEJRPN5mylib9BufWriterEEEEPT_DpOT0_ _ZN7gc_heap5AllocI6Tuple2IiPNS_3StrEEJiRS3_EEEPT_DpOT0_ Line | Count | Source | 464 | 5 | T* Alloc(Args&&... args) { | 465 | 5 | void* place = gHeap.Allocate(sizeof(T)); | 466 | 5 | assert(place != nullptr); | 467 | | // placement new | 468 | 0 | return new (place) T(std::forward<Args>(args)...); | 469 | 5 | } |
_ZN7gc_heap5AllocIN10containers5PointEJiiEEEPT_DpOT0_ Line | Count | Source | 464 | 1 | T* Alloc(Args&&... args) { | 465 | 1 | void* place = gHeap.Allocate(sizeof(T)); | 466 | 1 | assert(place != nullptr); | 467 | | // placement new | 468 | 0 | return new (place) T(std::forward<Args>(args)...); | 469 | 1 | } |
_ZN7gc_heap5AllocIN12control_flow10ParseErrorEJRPNS_3StrEEEEPT_DpOT0_ Line | Count | Source | 464 | 1 | T* Alloc(Args&&... args) { | 465 | 1 | void* place = gHeap.Allocate(sizeof(T)); | 466 | 1 | assert(place != nullptr); | 467 | | // placement new | 468 | 0 | return new (place) T(std::forward<Args>(args)...); | 469 | 1 | } |
_ZN7gc_heap5AllocI6Tuple2IPNS_3StrEiEJRS3_iEEEPT_DpOT0_ Line | Count | Source | 464 | 3 | T* Alloc(Args&&... args) { | 465 | 3 | void* place = gHeap.Allocate(sizeof(T)); | 466 | 3 | assert(place != nullptr); | 467 | | // placement new | 468 | 0 | return new (place) T(std::forward<Args>(args)...); | 469 | 3 | } |
_ZN7gc_heap5AllocINS_4ListIP6Tuple2IPNS_3StrEiEEEJEEEPT_DpOT0_ Line | Count | Source | 464 | 1 | T* Alloc(Args&&... args) { | 465 | 1 | void* place = gHeap.Allocate(sizeof(T)); | 466 | 1 | assert(place != nullptr); | 467 | | // placement new | 468 | 0 | return new (place) T(std::forward<Args>(args)...); | 469 | 1 | } |
_ZN7gc_heap5AllocINS_4ListIP6Tuple2IiPNS_3StrEEEEJEEEPT_DpOT0_ Line | Count | Source | 464 | 2 | T* Alloc(Args&&... args) { | 465 | 2 | void* place = gHeap.Allocate(sizeof(T)); | 466 | 2 | assert(place != nullptr); | 467 | | // placement new | 468 | 0 | return new (place) T(std::forward<Args>(args)...); | 469 | 2 | } |
Unexecuted instantiation: _ZN7gc_heap5AllocI19NotImplementedErrorJEEEPT_DpOT0_ _ZN7gc_heap5AllocIN7modules3DogEJRPNS_3StrEEEEPT_DpOT0_ Line | Count | Source | 464 | 1 | T* Alloc(Args&&... args) { | 465 | 1 | void* place = gHeap.Allocate(sizeof(T)); | 466 | 1 | assert(place != nullptr); | 467 | | // placement new | 468 | 0 | return new (place) T(std::forward<Args>(args)...); | 469 | 1 | } |
_ZN7gc_heap5AllocIN7module13CatEJEEEPT_DpOT0_ Line | Count | Source | 464 | 1 | T* Alloc(Args&&... args) { | 465 | 1 | void* place = gHeap.Allocate(sizeof(T)); | 466 | 1 | assert(place != nullptr); | 467 | | // placement new | 468 | 0 | return new (place) T(std::forward<Args>(args)...); | 469 | 1 | } |
_ZN7gc_heap5AllocIN7modules6SphinxEJRPNS_3StrEEEEPT_DpOT0_ Line | Count | Source | 464 | 1 | T* Alloc(Args&&... args) { | 465 | 1 | void* place = gHeap.Allocate(sizeof(T)); | 466 | 1 | assert(place != nullptr); | 467 | | // placement new | 468 | 0 | return new (place) T(std::forward<Args>(args)...); | 469 | 1 | } |
_ZN7gc_heap5AllocINS_4ListIPN10hnode_asdl5fieldEEEJEEEPT_DpOT0_ Line | Count | Source | 464 | 34 | T* Alloc(Args&&... args) { | 465 | 34 | void* place = gHeap.Allocate(sizeof(T)); | 466 | 34 | assert(place != nullptr); | 467 | | // placement new | 468 | 0 | return new (place) T(std::forward<Args>(args)...); | 469 | 34 | } |
_ZN7gc_heap5AllocINS_4ListIPN10hnode_asdl7hnode_tEEEJEEEPT_DpOT0_ Line | Count | Source | 464 | 34 | T* Alloc(Args&&... args) { | 465 | 34 | void* place = gHeap.Allocate(sizeof(T)); | 466 | 34 | assert(place != nullptr); | 467 | | // placement new | 468 | 0 | return new (place) T(std::forward<Args>(args)...); | 469 | 34 | } |
_ZN7gc_heap5AllocIN10hnode_asdl13hnode__RecordEJRPNS_3StrEPNS_4ListIPNS1_5fieldEEEbS5_S5_PNS6_IPNS1_7hnode_tEEEEEEPT_DpOT0_ Line | Count | Source | 464 | 34 | T* Alloc(Args&&... args) { | 465 | 34 | void* place = gHeap.Allocate(sizeof(T)); | 466 | 34 | assert(place != nullptr); | 467 | | // placement new | 468 | 0 | return new (place) T(std::forward<Args>(args)...); | 469 | 34 | } |
Unexecuted instantiation: _ZN7gc_heap5AllocIN10hnode_asdl11hnode__LeafEJRPNS_3StrENS1_7color_eEEEEPT_DpOT0_ _ZN7gc_heap5AllocIN10hnode_asdl11hnode__LeafEJRPNS_3StrERNS1_7color_eEEEEPT_DpOT0_ Line | Count | Source | 464 | 20 | T* Alloc(Args&&... args) { | 465 | 20 | void* place = gHeap.Allocate(sizeof(T)); | 466 | 20 | assert(place != nullptr); | 467 | | // placement new | 468 | 0 | return new (place) T(std::forward<Args>(args)...); | 469 | 20 | } |
Unexecuted instantiation: _ZN7gc_heap5AllocIN6format10AnsiOutputEJRPN5mylib6WriterEEEEPT_DpOT0_ Unexecuted instantiation: _ZN7gc_heap5AllocIN6format10TextOutputEJRPN5mylib6WriterEEEEPT_DpOT0_ Unexecuted instantiation: _ZN7gc_heap5AllocIN6format10TextOutputEJPN5mylib9BufWriterEEEEPT_DpOT0_ Unexecuted instantiation: _ZN7gc_heap5AllocIN6format10HtmlOutputEJPN5mylib9BufWriterEEEEPT_DpOT0_ _ZN7gc_heap5AllocIN6format10AnsiOutputEJPN5mylib9BufWriterEEEEPT_DpOT0_ Line | Count | Source | 464 | 31 | T* Alloc(Args&&... args) { | 465 | 31 | void* place = gHeap.Allocate(sizeof(T)); | 466 | 31 | assert(place != nullptr); | 467 | | // placement new | 468 | 0 | return new (place) T(std::forward<Args>(args)...); | 469 | 31 | } |
_ZN7gc_heap5AllocI6Tuple2IPNS_3StrEiEJRS3_RiEEEPT_DpOT0_ Line | Count | Source | 464 | 22 | T* Alloc(Args&&... args) { | 465 | 22 | void* place = gHeap.Allocate(sizeof(T)); | 466 | 22 | assert(place != nullptr); | 467 | | // placement new | 468 | 0 | return new (place) T(std::forward<Args>(args)...); | 469 | 22 | } |
_ZN7gc_heap5AllocIN6format14_PrettyPrinterEJiEEEPT_DpOT0_ Line | Count | Source | 464 | 8 | T* Alloc(Args&&... args) { | 465 | 8 | void* place = gHeap.Allocate(sizeof(T)); | 466 | 8 | assert(place != nullptr); | 467 | | // placement new | 468 | 0 | return new (place) T(std::forward<Args>(args)...); | 469 | 8 | } |
_ZN7gc_heap5AllocIN5parse10ParseErrorEJPNS_3StrEEEEPT_DpOT0_ Line | Count | Source | 464 | 5 | T* Alloc(Args&&... args) { | 465 | 5 | void* place = gHeap.Allocate(sizeof(T)); | 466 | 5 | assert(place != nullptr); | 467 | | // placement new | 468 | 0 | return new (place) T(std::forward<Args>(args)...); | 469 | 5 | } |
_ZN7gc_heap5AllocIN9expr_asdl9expr__VarEJRPNS_3StrEEEEPT_DpOT0_ Line | Count | Source | 464 | 9 | T* Alloc(Args&&... args) { | 465 | 9 | void* place = gHeap.Allocate(sizeof(T)); | 466 | 9 | assert(place != nullptr); | 467 | | // placement new | 468 | 0 | return new (place) T(std::forward<Args>(args)...); | 469 | 9 | } |
_ZN7gc_heap5AllocIN9expr_asdl11expr__ConstEJiEEEPT_DpOT0_ Line | Count | Source | 464 | 14 | T* Alloc(Args&&... args) { | 465 | 14 | void* place = gHeap.Allocate(sizeof(T)); | 466 | 14 | assert(place != nullptr); | 467 | | // placement new | 468 | 0 | return new (place) T(std::forward<Args>(args)...); | 469 | 14 | } |
_ZN7gc_heap5AllocIN9expr_asdl12expr__BinaryEJRPNS_3StrERPNS1_6expr_tES8_EEEPT_DpOT0_ Line | Count | Source | 464 | 14 | T* Alloc(Args&&... args) { | 465 | 14 | void* place = gHeap.Allocate(sizeof(T)); | 466 | 14 | assert(place != nullptr); | 467 | | // placement new | 468 | 0 | return new (place) T(std::forward<Args>(args)...); | 469 | 14 | } |
_ZN7gc_heap5AllocIN5parse5LexerEJRPNS_3StrEEEEPT_DpOT0_ Line | Count | Source | 464 | 14 | T* Alloc(Args&&... args) { | 465 | 14 | void* place = gHeap.Allocate(sizeof(T)); | 466 | 14 | assert(place != nullptr); | 467 | | // placement new | 468 | 0 | return new (place) T(std::forward<Args>(args)...); | 469 | 14 | } |
_ZN7gc_heap5AllocIN5parse6ParserEJRPNS1_5LexerEEEEPT_DpOT0_ Line | Count | Source | 464 | 13 | T* Alloc(Args&&... args) { | 465 | 13 | void* place = gHeap.Allocate(sizeof(T)); | 466 | 13 | assert(place != nullptr); | 467 | | // placement new | 468 | 0 | return new (place) T(std::forward<Args>(args)...); | 469 | 13 | } |
_ZN7gc_heap5AllocIN6format10AnsiOutputEJPN5mylib6WriterEEEEPT_DpOT0_ Line | Count | Source | 464 | 8 | T* Alloc(Args&&... args) { | 465 | 8 | void* place = gHeap.Allocate(sizeof(T)); | 466 | 8 | assert(place != nullptr); | 467 | | // placement new | 468 | 0 | return new (place) T(std::forward<Args>(args)...); | 469 | 8 | } |
_ZN7gc_heap5AllocIN15scoped_resource7MyErrorEJEEEPT_DpOT0_ Line | Count | Source | 464 | 1 | T* Alloc(Args&&... args) { | 465 | 1 | void* place = gHeap.Allocate(sizeof(T)); | 466 | 1 | assert(place != nullptr); | 467 | | // placement new | 468 | 0 | return new (place) T(std::forward<Args>(args)...); | 469 | 1 | } |
_ZN7gc_heap5AllocIN15scoped_resource8DirStackEJEEEPT_DpOT0_ Line | Count | Source | 464 | 1 | T* Alloc(Args&&... args) { | 465 | 1 | void* place = gHeap.Allocate(sizeof(T)); | 466 | 1 | assert(place != nullptr); | 467 | | // placement new | 468 | 0 | return new (place) T(std::forward<Args>(args)...); | 469 | 1 | } |
_ZN7gc_heap5AllocIN7strings3FooEJEEEPT_DpOT0_ Line | Count | Source | 464 | 1 | T* Alloc(Args&&... args) { | 465 | 1 | void* place = gHeap.Allocate(sizeof(T)); | 466 | 1 | assert(place != nullptr); | 467 | | // placement new | 468 | 0 | return new (place) T(std::forward<Args>(args)...); | 469 | 1 | } |
_ZN7gc_heap5AllocIN9test_cast11ColorOutputEJRPN5mylib9BufWriterEEEEPT_DpOT0_ Line | Count | Source | 464 | 1 | T* Alloc(Args&&... args) { | 465 | 1 | void* place = gHeap.Allocate(sizeof(T)); | 466 | 1 | assert(place != nullptr); | 467 | | // placement new | 468 | 0 | return new (place) T(std::forward<Args>(args)...); | 469 | 1 | } |
|
470 | | |
471 | | // Return the size of a resizeable allocation. For now we just round up by |
472 | | // powers of 2. This could be optimized later. CPython has an interesting |
473 | | // policy in listobject.c. |
474 | | // |
475 | | // https://stackoverflow.com/questions/466204/rounding-up-to-next-power-of-2 |
476 | 912 | inline int RoundUp(int n) { |
477 | | // minimum size |
478 | 912 | if (n < 8) { |
479 | 421 | return 8; |
480 | 421 | } |
481 | | |
482 | | // TODO: what if int isn't 32 bits? |
483 | 491 | n--; |
484 | 491 | n |= n >> 1; |
485 | 491 | n |= n >> 2; |
486 | 491 | n |= n >> 4; |
487 | 491 | n |= n >> 8; |
488 | 491 | n |= n >> 16; |
489 | 491 | n++; |
490 | 491 | return n; |
491 | 912 | } |
492 | | |
493 | | const int kZeroMask = 0; // for types with no pointers |
494 | | // no obj_len_ computed for global List/Slab/Dict |
495 | | const int kNoObjLen = 0x0eadbeef; |
496 | | |
497 | | // Why do we need this macro instead of using inheritance? |
498 | | // - Because ASDL uses multiple inheritance for first class variants, but we |
499 | | // don't want multiple IMPLEMENTATION inheritance. Instead we just generate |
500 | | // compatible layouts. |
501 | | // - Similarly, GlobalStr is layout-compatible with Str. It can't inherit from |
502 | | // Obj like Str, because of the constexpr issue with char[N]. |
503 | | |
504 | | // heap_tag_: one of Tag:: |
505 | | // type_tag_: ASDL tag (variant) |
506 | | // field_mask_: for fixed length records, so max 16 fields |
507 | | // obj_len_: number of bytes to copy |
508 | | // TODO: with a limitation of ~15 fields, we can encode obj_len_ in |
509 | | // field_mask_, and save space on many ASDL types. |
510 | | // And we can sort integers BEFORE pointers. |
511 | | |
512 | | // TODO: ./configure could detect big or little endian, and then flip the |
513 | | // fields in OBJ_HEADER? |
514 | | // |
515 | | // https://stackoverflow.com/questions/2100331/c-macro-definition-to-determine-big-endian-or-little-endian-machine |
516 | | // |
517 | | // Because we want to do (obj->heap_tag_ & 1 == 0) to distinguish it from |
518 | | // vtable pointer. We assume low bits of a pointer are 0 but not high bits. |
519 | | |
520 | | #define OBJ_HEADER() \ |
521 | | uint8_t heap_tag_; \ |
522 | | uint8_t type_tag_; \ |
523 | | uint16_t field_mask_; \ |
524 | | uint32_t obj_len_; |
525 | | |
526 | | class Obj { |
527 | | // The unit of garbage collection. It has a header describing how to find |
528 | | // the pointers within it. |
529 | | // |
530 | | // Note: Sorting ASDL fields by (non-pointer, pointer) is a good idea, but it |
531 | | // breaks down because mycpp has inheritance. Could do this later. |
532 | | |
533 | | public: |
534 | | // Note: ASDL types are layout-compatible with Obj, but don't actually |
535 | | // inherit from it because of the 'multiple inheritance of implementation' |
536 | | // issue. So they don't call this constructor. |
537 | | constexpr Obj(uint8_t heap_tag, uint16_t field_mask, int obj_len) |
538 | | : heap_tag_(heap_tag), |
539 | | type_tag_(0), |
540 | | field_mask_(field_mask), |
541 | 3.59k | obj_len_(obj_len) { |
542 | 3.59k | } |
543 | | |
544 | 1.84k | void SetObjLen(int obj_len) { |
545 | 1.84k | this->obj_len_ = obj_len; |
546 | 1.84k | } |
547 | | |
548 | | OBJ_HEADER() |
549 | | |
550 | | DISALLOW_COPY_AND_ASSIGN(Obj) |
551 | | }; |
552 | | |
553 | | template <typename T> |
554 | 112 | inline void InitSlabCell(Obj* obj) { |
555 | | // log("SCANNED"); |
556 | 112 | obj->heap_tag_ = Tag::Scanned; |
557 | 112 | } _ZN7gc_heap12InitSlabCellIPNS_3StrEEEvPNS_3ObjE Line | Count | Source | 554 | 74 | inline void InitSlabCell(Obj* obj) { | 555 | | // log("SCANNED"); | 556 | 74 | obj->heap_tag_ = Tag::Scanned; | 557 | 74 | } |
_ZN7gc_heap12InitSlabCellIbEEvPNS_3ObjE Line | Count | Source | 554 | 1 | inline void InitSlabCell(Obj* obj) { | 555 | | // log("SCANNED"); | 556 | 1 | obj->heap_tag_ = Tag::Scanned; | 557 | 1 | } |
Unexecuted instantiation: _ZN7gc_heap12InitSlabCellIdEEvPNS_3ObjE _ZN7gc_heap12InitSlabCellIP6Tuple2IPNS_3StrEiEEEvPNS_3ObjE Line | Count | Source | 554 | 1 | inline void InitSlabCell(Obj* obj) { | 555 | | // log("SCANNED"); | 556 | 1 | obj->heap_tag_ = Tag::Scanned; | 557 | 1 | } |
_ZN7gc_heap12InitSlabCellIP6Tuple2IiPNS_3StrEEEEvPNS_3ObjE Line | Count | Source | 554 | 2 | inline void InitSlabCell(Obj* obj) { | 555 | | // log("SCANNED"); | 556 | 2 | obj->heap_tag_ = Tag::Scanned; | 557 | 2 | } |
_ZN7gc_heap12InitSlabCellIPN10hnode_asdl5fieldEEEvPNS_3ObjE Line | Count | Source | 554 | 34 | inline void InitSlabCell(Obj* obj) { | 555 | | // log("SCANNED"); | 556 | 34 | obj->heap_tag_ = Tag::Scanned; | 557 | 34 | } |
|
558 | | |
559 | | template <> |
560 | 351 | inline void InitSlabCell<int>(Obj* obj) { |
561 | | // log("OPAQUE"); |
562 | 351 | obj->heap_tag_ = Tag::Opaque; |
563 | 351 | } |
564 | | |
565 | | // don't include items_[1] |
566 | | const int kSlabHeaderSize = sizeof(Obj); |
567 | | |
568 | | // Opaque slab, e.g. for List<int> |
569 | | template <typename T> |
570 | | class Slab : public Obj { |
571 | | public: |
572 | 463 | Slab(int obj_len) : Obj(0, 0, obj_len) { |
573 | 463 | InitSlabCell<T>(this); |
574 | 463 | } Line | Count | Source | 572 | 351 | Slab(int obj_len) : Obj(0, 0, obj_len) { | 573 | 351 | InitSlabCell<T>(this); | 574 | 351 | } |
_ZN7gc_heap4SlabIPNS_3StrEEC2Ei Line | Count | Source | 572 | 74 | Slab(int obj_len) : Obj(0, 0, obj_len) { | 573 | 74 | InitSlabCell<T>(this); | 574 | 74 | } |
Line | Count | Source | 572 | 1 | Slab(int obj_len) : Obj(0, 0, obj_len) { | 573 | 1 | InitSlabCell<T>(this); | 574 | 1 | } |
Unexecuted instantiation: _ZN7gc_heap4SlabIdEC2Ei _ZN7gc_heap4SlabIP6Tuple2IPNS_3StrEiEEC2Ei Line | Count | Source | 572 | 1 | Slab(int obj_len) : Obj(0, 0, obj_len) { | 573 | 1 | InitSlabCell<T>(this); | 574 | 1 | } |
_ZN7gc_heap4SlabIP6Tuple2IiPNS_3StrEEEC2Ei Line | Count | Source | 572 | 2 | Slab(int obj_len) : Obj(0, 0, obj_len) { | 573 | 2 | InitSlabCell<T>(this); | 574 | 2 | } |
_ZN7gc_heap4SlabIPN10hnode_asdl5fieldEEC2Ei Line | Count | Source | 572 | 34 | Slab(int obj_len) : Obj(0, 0, obj_len) { | 573 | 34 | InitSlabCell<T>(this); | 574 | 34 | } |
|
575 | | T items_[1]; // variable length |
576 | | }; |
577 | | |
578 | | template <typename T, int N> |
579 | | class GlobalSlab { |
580 | | // A template type with the same layout as Str with length N-1 (which needs a |
581 | | // buffer of size N). For initializing global constant instances. |
582 | | public: |
583 | | OBJ_HEADER() |
584 | | |
585 | | T items_[N]; |
586 | | |
587 | | DISALLOW_COPY_AND_ASSIGN(GlobalSlab) |
588 | | }; |
589 | | |
590 | | // Note: entries will be zero'd because the Heap is zero'd. |
591 | | template <typename T> |
592 | 463 | inline Slab<T>* NewSlab(int len) { |
593 | 463 | int obj_len = RoundUp(kSlabHeaderSize + len * sizeof(T)); |
594 | 463 | void* place = gHeap.Allocate(obj_len); |
595 | 463 | auto slab = new (place) Slab<T>(obj_len); // placement new |
596 | 463 | return slab; |
597 | 463 | } _ZN7gc_heap7NewSlabIiEEPNS_4SlabIT_EEi Line | Count | Source | 592 | 351 | inline Slab<T>* NewSlab(int len) { | 593 | 351 | int obj_len = RoundUp(kSlabHeaderSize + len * sizeof(T)); | 594 | 351 | void* place = gHeap.Allocate(obj_len); | 595 | 351 | auto slab = new (place) Slab<T>(obj_len); // placement new | 596 | 351 | return slab; | 597 | 351 | } |
_ZN7gc_heap7NewSlabIPNS_3StrEEEPNS_4SlabIT_EEi Line | Count | Source | 592 | 74 | inline Slab<T>* NewSlab(int len) { | 593 | 74 | int obj_len = RoundUp(kSlabHeaderSize + len * sizeof(T)); | 594 | 74 | void* place = gHeap.Allocate(obj_len); | 595 | 74 | auto slab = new (place) Slab<T>(obj_len); // placement new | 596 | 74 | return slab; | 597 | 74 | } |
_ZN7gc_heap7NewSlabIbEEPNS_4SlabIT_EEi Line | Count | Source | 592 | 1 | inline Slab<T>* NewSlab(int len) { | 593 | 1 | int obj_len = RoundUp(kSlabHeaderSize + len * sizeof(T)); | 594 | 1 | void* place = gHeap.Allocate(obj_len); | 595 | 1 | auto slab = new (place) Slab<T>(obj_len); // placement new | 596 | 1 | return slab; | 597 | 1 | } |
Unexecuted instantiation: _ZN7gc_heap7NewSlabIdEEPNS_4SlabIT_EEi _ZN7gc_heap7NewSlabIP6Tuple2IPNS_3StrEiEEEPNS_4SlabIT_EEi Line | Count | Source | 592 | 1 | inline Slab<T>* NewSlab(int len) { | 593 | 1 | int obj_len = RoundUp(kSlabHeaderSize + len * sizeof(T)); | 594 | 1 | void* place = gHeap.Allocate(obj_len); | 595 | 1 | auto slab = new (place) Slab<T>(obj_len); // placement new | 596 | 1 | return slab; | 597 | 1 | } |
_ZN7gc_heap7NewSlabIP6Tuple2IiPNS_3StrEEEEPNS_4SlabIT_EEi Line | Count | Source | 592 | 2 | inline Slab<T>* NewSlab(int len) { | 593 | 2 | int obj_len = RoundUp(kSlabHeaderSize + len * sizeof(T)); | 594 | 2 | void* place = gHeap.Allocate(obj_len); | 595 | 2 | auto slab = new (place) Slab<T>(obj_len); // placement new | 596 | 2 | return slab; | 597 | 2 | } |
_ZN7gc_heap7NewSlabIPN10hnode_asdl5fieldEEEPNS_4SlabIT_EEi Line | Count | Source | 592 | 34 | inline Slab<T>* NewSlab(int len) { | 593 | 34 | int obj_len = RoundUp(kSlabHeaderSize + len * sizeof(T)); | 594 | 34 | void* place = gHeap.Allocate(obj_len); | 595 | 34 | auto slab = new (place) Slab<T>(obj_len); // placement new | 596 | 34 | return slab; | 597 | 34 | } |
|
598 | | |
599 | | #ifdef MYLIB_LEAKY |
600 | | #define GLOBAL_STR(name, val) Str* name = new Str(val); |
601 | | #define GLOBAL_LIST(T, N, name, array) List<T>* name = new List<T>(array); |
602 | | #endif |
603 | | |
604 | | #ifndef MYLIB_LEAKY |
605 | | |
606 | | // |
607 | | // Str |
608 | | // |
609 | | |
610 | | class Str : public gc_heap::Obj { |
611 | | public: |
612 | | // Don't call this directly. Call AllocStr() instead, which calls this. |
613 | 1.85k | explicit Str() : Obj(Tag::Opaque, kZeroMask, 0) { |
614 | | // log("GC Str()"); |
615 | 1.85k | } |
616 | | |
617 | 22 | char* data() { |
618 | 22 | return data_; |
619 | 22 | }; |
620 | | |
621 | | void SetObjLenFromStrLen(int str_len); |
622 | | |
623 | | Str* index_(int i); |
624 | | Str* slice(int begin); |
625 | | Str* slice(int begin, int end); |
626 | | |
627 | | Str* strip(); |
628 | | // Used for CommandSub in osh/cmd_exec.py |
629 | | Str* rstrip(Str* chars); |
630 | | Str* rstrip(); |
631 | | |
632 | | Str* ljust(int width, Str* fillchar); |
633 | | Str* rjust(int width, Str* fillchar); |
634 | | |
635 | | bool startswith(Str* s); |
636 | | bool endswith(Str* s); |
637 | | |
638 | | Str* replace(Str* old, Str* new_str); |
639 | | Str* join(List<Str*>* items); |
640 | | List<Str*>* split(Str* sep); |
641 | | |
642 | | bool isdigit(); |
643 | | bool isalpha(); |
644 | | bool isupper(); |
645 | | |
646 | 0 | Str* upper() { |
647 | 0 | NotImplemented(); // Uncalled |
648 | 0 | } |
649 | | |
650 | 0 | Str* lower() { |
651 | 0 | NotImplemented(); // Uncalled |
652 | 0 | } |
653 | | |
654 | | // Other options for fast comparison / hashing / string interning: |
655 | | // - unique_id_: an index into intern table. I don't think this works unless |
656 | | // you want to deal with rehashing all strings when the set grows. |
657 | | // - although note that the JVM has -XX:StringTableSize=FIXED, which means |
658 | | // - it can degrade into linked list performance |
659 | | // - Hashed strings become GLOBAL_STR(). Never deallocated. |
660 | | // - Hashed strings become part of the "large object space", which might be |
661 | | // managed by mark and sweep. This requires linked list overhead. |
662 | | // (doubly-linked?) |
663 | | // - Intern strings at GARBAGE COLLECTION TIME, with |
664 | | // LayoutForwarded::new_location_? Is this possible? Does it introduce |
665 | | // too much coupling between strings, hash tables, and GC? |
666 | | int hash_value_; |
667 | | char data_[1]; // flexible array |
668 | | |
669 | | private: |
670 | | int _strip_left_pos(); |
671 | | int _strip_right_pos(); |
672 | | |
673 | | DISALLOW_COPY_AND_ASSIGN(Str) |
674 | | }; |
675 | | |
676 | | constexpr int kStrHeaderSize = offsetof(Str, data_); |
677 | | |
678 | 11 | inline void Str::SetObjLenFromStrLen(int str_len) { |
679 | 11 | obj_len_ = kStrHeaderSize + str_len + 1; // NUL terminator |
680 | 11 | } |
681 | | |
682 | | template <int N> |
683 | | class GlobalStr { |
684 | | // A template type with the same layout as Str with length N-1 (which needs a |
685 | | // buffer of size N). For initializing global constant instances. |
686 | | public: |
687 | | OBJ_HEADER() |
688 | | |
689 | | int hash_value_; |
690 | | const char data_[N]; |
691 | | |
692 | | DISALLOW_COPY_AND_ASSIGN(GlobalStr) |
693 | | }; |
694 | | |
695 | | extern Str* kEmptyString; |
696 | | |
697 | | // This macro is a workaround for the fact that it's impossible to have a |
698 | | // a constexpr initializer for char[N]. The "String Literals as Non-Type |
699 | | // Template Parameters" feature of C++ 20 would have done it, but it's not |
700 | | // there. |
701 | | // |
702 | | // https://old.reddit.com/r/cpp_questions/comments/j0khh6/how_to_constexpr_initialize_class_member_thats/ |
703 | | // https://stackoverflow.com/questions/10422487/how-can-i-initialize-char-arrays-in-a-constructor |
704 | | |
705 | | #define GLOBAL_STR(name, val) \ |
706 | | gc_heap::GlobalStr<sizeof(val)> _##name = { \ |
707 | | Tag::Global, \ |
708 | | 0, \ |
709 | | gc_heap::kZeroMask, \ |
710 | | gc_heap::kStrHeaderSize + sizeof(val), \ |
711 | | -1, \ |
712 | | val}; \ |
713 | | Str* name = reinterpret_cast<Str*>(&_##name); |
714 | | |
715 | | // Notes: |
716 | | // - sizeof("foo") == 4, for the NUL terminator. |
717 | | // - gc_heap_test.cc has a static_assert that GlobalStr matches Str. We don't |
718 | | // put it here because it triggers -Winvalid-offsetof |
719 | | |
720 | | // |
721 | | // String "Constructors". We need these because of the "flexible array" |
722 | | // pattern. I don't think "new Str()" can do that, and placement new would |
723 | | // require mycpp to generate 2 statements everywhere. |
724 | | // |
725 | | |
726 | | // New string of a certain length, to be filled in |
727 | 1.84k | inline Str* AllocStr(int len) { |
728 | 1.84k | int obj_len = kStrHeaderSize + len + 1; // NUL terminator |
729 | 1.84k | void* place = gHeap.Allocate(obj_len); |
730 | 1.84k | auto s = new (place) Str(); |
731 | 1.84k | s->SetObjLen(obj_len); // So the GC can copy it |
732 | 1.84k | return s; |
733 | 1.84k | } |
734 | | |
735 | | // Like AllocStr, but allocate more than you need, e.g. for snprintf() to write |
736 | | // into. CALLER IS RESPONSIBLE for calling s->SetObjLenFromStrLen() afterward! |
737 | 11 | inline Str* OverAllocatedStr(int len) { |
738 | 11 | int obj_len = kStrHeaderSize + len + 1; // NUL terminator |
739 | 11 | void* place = gHeap.Allocate(obj_len); |
740 | 11 | auto s = new (place) Str(); |
741 | 11 | return s; |
742 | 11 | } |
743 | | |
744 | 747 | inline Str* CopyStr(const char* data, int len) { |
745 | | // Problem: if data points inside a Str, it's often invalidated! |
746 | 747 | Str* s = AllocStr(len); |
747 | | |
748 | | // log("AllocStr s->data_ %p len = %d", s->data_, len); |
749 | | // log("sizeof(Str) = %d", sizeof(Str)); |
750 | 747 | memcpy(s->data_, data, len); |
751 | 747 | assert(s->data_[len] == '\0'); // should be true because Heap was zeroed |
752 | | |
753 | 0 | return s; |
754 | 747 | } |
755 | | |
756 | | // CHOPPED OFF at internal NUL. Use explicit length if you have a NUL. |
757 | 142 | inline Str* CopyStr(const char* data) { |
758 | 142 | return CopyStr(data, strlen(data)); |
759 | 142 | } |
760 | | |
761 | | bool str_equals(Str* left, Str* right); |
762 | | bool maybe_str_equals(Str* left, Str* right); |
763 | | |
764 | | // |
765 | | // Compile-time computation of GC field masks. |
766 | | // |
767 | | |
768 | | class _DummyObj { // For maskbit() |
769 | | public: |
770 | | OBJ_HEADER() |
771 | | int first_field_; |
772 | | }; |
773 | | |
774 | 473 | constexpr int maskbit(int offset) { |
775 | 473 | return 1 << ((offset - offsetof(_DummyObj, first_field_)) / sizeof(void*)); |
776 | 473 | } |
777 | | |
778 | | class _DummyObj_v { // For maskbit_v() |
779 | | public: |
780 | | void* vtable; // how the compiler does dynamic dispatch |
781 | | OBJ_HEADER() |
782 | | int first_field_; |
783 | | }; |
784 | | |
785 | 1 | constexpr int maskbit_v(int offset) { |
786 | 1 | return 1 << ((offset - offsetof(_DummyObj_v, first_field_)) / sizeof(void*)); |
787 | 1 | } |
788 | | |
789 | | // |
790 | | // List<T> |
791 | | // |
792 | | |
793 | | // Type that is layout-compatible with List (unit tests assert this). Two |
794 | | // purposes: |
795 | | // - To make globals of "plain old data" at compile-time, not at startup time. |
796 | | // This can't be done with subclasses of Obj. |
797 | | // - To avoid invalid-offsetof warnings when computing GC masks. |
798 | | |
799 | | template <typename T, int N> |
800 | | class GlobalList { |
801 | | public: |
802 | | OBJ_HEADER() |
803 | | int len_; |
804 | | int capacity_; |
805 | | GlobalSlab<T, N>* slab_; |
806 | | }; |
807 | | |
808 | | #define GLOBAL_LIST(T, N, name, array) \ |
809 | | gc_heap::GlobalSlab<T, N> _slab_##name = { \ |
810 | | Tag::Global, 0, gc_heap::kZeroMask, gc_heap::kNoObjLen, array}; \ |
811 | | gc_heap::GlobalList<T, N> _list_##name = { \ |
812 | | Tag::Global, 0, gc_heap::kZeroMask, gc_heap::kNoObjLen, \ |
813 | | N, N, &_slab_##name}; \ |
814 | | List<T>* name = reinterpret_cast<List<T>*>(&_list_##name); |
815 | | |
816 | | // A list has one Slab pointer which we need to follow. |
817 | 446 | constexpr uint16_t maskof_List() { |
818 | 446 | return maskbit(offsetof(GlobalList<int COMMA 1>, slab_)); |
819 | 446 | } |
820 | | |
821 | | template <typename T> |
822 | | class List : public gc_heap::Obj { |
823 | | // TODO: Move methods that don't allocate or resize: out of gc_heap? |
824 | | // - allocate: append(), extend() |
825 | | // - resize: pop(), clear() |
826 | | // - neither: reverse(), sort() -- these are more like functions. Except |
827 | | // sort() is a templated method that depends on type param T. |
828 | | // - neither: index(), slice() |
829 | | |
830 | | public: |
831 | 446 | List() : Obj(Tag::FixedSize, maskof_List(), sizeof(List<T>)) { |
832 | | // Ensured by heap zeroing. It's never directly on the stack. |
833 | 446 | assert(len_ == 0); |
834 | 0 | assert(capacity_ == 0); |
835 | 0 | assert(slab_ == nullptr); |
836 | 446 | } Line | Count | Source | 831 | 315 | List() : Obj(Tag::FixedSize, maskof_List(), sizeof(List<T>)) { | 832 | | // Ensured by heap zeroing. It's never directly on the stack. | 833 | 315 | assert(len_ == 0); | 834 | 0 | assert(capacity_ == 0); | 835 | 0 | assert(slab_ == nullptr); | 836 | 315 | } |
_ZN7gc_heap4ListIPNS_3StrEEC2Ev Line | Count | Source | 831 | 59 | List() : Obj(Tag::FixedSize, maskof_List(), sizeof(List<T>)) { | 832 | | // Ensured by heap zeroing. It's never directly on the stack. | 833 | 59 | assert(len_ == 0); | 834 | 0 | assert(capacity_ == 0); | 835 | 0 | assert(slab_ == nullptr); | 836 | 59 | } |
Line | Count | Source | 831 | 1 | List() : Obj(Tag::FixedSize, maskof_List(), sizeof(List<T>)) { | 832 | | // Ensured by heap zeroing. It's never directly on the stack. | 833 | 1 | assert(len_ == 0); | 834 | 0 | assert(capacity_ == 0); | 835 | 0 | assert(slab_ == nullptr); | 836 | 1 | } |
Unexecuted instantiation: _ZN7gc_heap4ListIdEC2Ev _ZN7gc_heap4ListIP6Tuple2IPNS_3StrEiEEC2Ev Line | Count | Source | 831 | 1 | List() : Obj(Tag::FixedSize, maskof_List(), sizeof(List<T>)) { | 832 | | // Ensured by heap zeroing. It's never directly on the stack. | 833 | 1 | assert(len_ == 0); | 834 | 0 | assert(capacity_ == 0); | 835 | 0 | assert(slab_ == nullptr); | 836 | 1 | } |
_ZN7gc_heap4ListIP6Tuple2IiPNS_3StrEEEC2Ev Line | Count | Source | 831 | 2 | List() : Obj(Tag::FixedSize, maskof_List(), sizeof(List<T>)) { | 832 | | // Ensured by heap zeroing. It's never directly on the stack. | 833 | 2 | assert(len_ == 0); | 834 | 0 | assert(capacity_ == 0); | 835 | 0 | assert(slab_ == nullptr); | 836 | 2 | } |
_ZN7gc_heap4ListIPN10hnode_asdl5fieldEEC2Ev Line | Count | Source | 831 | 34 | List() : Obj(Tag::FixedSize, maskof_List(), sizeof(List<T>)) { | 832 | | // Ensured by heap zeroing. It's never directly on the stack. | 833 | 34 | assert(len_ == 0); | 834 | 0 | assert(capacity_ == 0); | 835 | 0 | assert(slab_ == nullptr); | 836 | 34 | } |
_ZN7gc_heap4ListIPN10hnode_asdl7hnode_tEEC2Ev Line | Count | Source | 831 | 34 | List() : Obj(Tag::FixedSize, maskof_List(), sizeof(List<T>)) { | 832 | | // Ensured by heap zeroing. It's never directly on the stack. | 833 | 34 | assert(len_ == 0); | 834 | 0 | assert(capacity_ == 0); | 835 | 0 | assert(slab_ == nullptr); | 836 | 34 | } |
|
837 | | |
838 | | // Implements L[i] |
839 | 402 | T index_(int i) { |
840 | 402 | if (i < 0) { |
841 | 4 | i += len_; |
842 | 4 | } |
843 | 402 | if (i < len_) { |
844 | 402 | return slab_->items_[i]; |
845 | 402 | } |
846 | | |
847 | 0 | log("i = %d, len_ = %d", i, len_); |
848 | 0 | InvalidCodePath(); // Out of bounds |
849 | 0 | } _ZN7gc_heap4ListIiE6index_Ei Line | Count | Source | 839 | 23 | T index_(int i) { | 840 | 23 | if (i < 0) { | 841 | 1 | i += len_; | 842 | 1 | } | 843 | 23 | if (i < len_) { | 844 | 23 | return slab_->items_[i]; | 845 | 23 | } | 846 | | | 847 | 0 | log("i = %d, len_ = %d", i, len_); | 848 | 0 | InvalidCodePath(); // Out of bounds | 849 | 0 | } |
_ZN7gc_heap4ListIPNS_3StrEE6index_Ei Line | Count | Source | 839 | 379 | T index_(int i) { | 840 | 379 | if (i < 0) { | 841 | 3 | i += len_; | 842 | 3 | } | 843 | 379 | if (i < len_) { | 844 | 379 | return slab_->items_[i]; | 845 | 379 | } | 846 | | | 847 | 0 | log("i = %d, len_ = %d", i, len_); | 848 | 0 | InvalidCodePath(); // Out of bounds | 849 | 0 | } |
Unexecuted instantiation: _ZN7gc_heap4ListIbE6index_Ei Unexecuted instantiation: _ZN7gc_heap4ListIdE6index_Ei |
850 | | |
851 | | // Implements L[i] = item |
852 | | // Note: Unlike Dict::set(), we don't need to specialize List::set() on T for |
853 | | // StackRoots because it doesn't allocate. |
854 | 2.81k | void set(int i, T item) { |
855 | 2.81k | slab_->items_[i] = item; |
856 | 2.81k | } _ZN7gc_heap4ListIiE3setEii Line | Count | Source | 854 | 2.54k | void set(int i, T item) { | 855 | 2.54k | slab_->items_[i] = item; | 856 | 2.54k | } |
_ZN7gc_heap4ListIPNS_3StrEE3setEiS2_ Line | Count | Source | 854 | 205 | void set(int i, T item) { | 855 | 205 | slab_->items_[i] = item; | 856 | 205 | } |
_ZN7gc_heap4ListIbE3setEib Line | Count | Source | 854 | 2 | void set(int i, T item) { | 855 | 2 | slab_->items_[i] = item; | 856 | 2 | } |
Unexecuted instantiation: _ZN7gc_heap4ListIdE3setEid _ZN7gc_heap4ListIP6Tuple2IPNS_3StrEiEE3setEiS5_ Line | Count | Source | 854 | 2 | void set(int i, T item) { | 855 | 2 | slab_->items_[i] = item; | 856 | 2 | } |
_ZN7gc_heap4ListIP6Tuple2IiPNS_3StrEEE3setEiS5_ Line | Count | Source | 854 | 3 | void set(int i, T item) { | 855 | 3 | slab_->items_[i] = item; | 856 | 3 | } |
_ZN7gc_heap4ListIPN10hnode_asdl5fieldEE3setEiS3_ Line | Count | Source | 854 | 60 | void set(int i, T item) { | 855 | 60 | slab_->items_[i] = item; | 856 | 60 | } |
|
857 | | |
858 | | // L[begin:] |
859 | 304 | List* slice(int begin) { |
860 | 304 | auto self = this; |
861 | 304 | List<T>* result = nullptr; |
862 | 304 | StackRoots _roots({&self, &result}); |
863 | | |
864 | 304 | if (begin == 0) { |
865 | 0 | return self; |
866 | 0 | } |
867 | 304 | if (begin < 0) { |
868 | 0 | begin = self->len_ + begin; |
869 | 0 | } |
870 | | |
871 | 304 | result = Alloc<List<T>>(); // TODO: initialize with size |
872 | 1.51k | for (int i = begin; i < self->len_; i++) { |
873 | 1.20k | result->append(self->slab_->items_[i]); |
874 | 1.20k | } |
875 | 304 | return result; |
876 | 304 | } _ZN7gc_heap4ListIiE5sliceEi Line | Count | Source | 859 | 301 | List* slice(int begin) { | 860 | 301 | auto self = this; | 861 | 301 | List<T>* result = nullptr; | 862 | 301 | StackRoots _roots({&self, &result}); | 863 | | | 864 | 301 | if (begin == 0) { | 865 | 0 | return self; | 866 | 0 | } | 867 | 301 | if (begin < 0) { | 868 | 0 | begin = self->len_ + begin; | 869 | 0 | } | 870 | | | 871 | 301 | result = Alloc<List<T>>(); // TODO: initialize with size | 872 | 1.50k | for (int i = begin; i < self->len_; i++) { | 873 | 1.20k | result->append(self->slab_->items_[i]); | 874 | 1.20k | } | 875 | 301 | return result; | 876 | 301 | } |
_ZN7gc_heap4ListIPNS_3StrEE5sliceEi Line | Count | Source | 859 | 3 | List* slice(int begin) { | 860 | 3 | auto self = this; | 861 | 3 | List<T>* result = nullptr; | 862 | 3 | StackRoots _roots({&self, &result}); | 863 | | | 864 | 3 | if (begin == 0) { | 865 | 0 | return self; | 866 | 0 | } | 867 | 3 | if (begin < 0) { | 868 | 0 | begin = self->len_ + begin; | 869 | 0 | } | 870 | | | 871 | 3 | result = Alloc<List<T>>(); // TODO: initialize with size | 872 | 9 | for (int i = begin; i < self->len_; i++) { | 873 | 6 | result->append(self->slab_->items_[i]); | 874 | 6 | } | 875 | 3 | return result; | 876 | 3 | } |
|
877 | | |
878 | | // L[begin:end] |
879 | | // TODO: Can this be optimized? |
880 | 1 | List* slice(int begin, int end) { |
881 | 1 | auto self = this; |
882 | 1 | List<T>* result = nullptr; |
883 | 1 | StackRoots _roots({&self, &result}); |
884 | | |
885 | 1 | if (begin < 0) { |
886 | 0 | begin = self->len_ + begin; |
887 | 0 | } |
888 | 1 | if (end < 0) { |
889 | 1 | end = self->len_ + end; |
890 | 1 | } |
891 | | |
892 | 1 | result = Alloc<List<T>>(); // TODO: initialize with size |
893 | 3 | for (int i = begin; i < end; i++) { |
894 | 2 | result->append(self->slab_->items_[i]); |
895 | 2 | } |
896 | 1 | return result; |
897 | 1 | } |
898 | | |
899 | | // Should we have a separate API that doesn't return it? |
900 | | // https://stackoverflow.com/questions/12600330/pop-back-return-value |
901 | 3 | T pop() { |
902 | 3 | assert(len_ > 0); |
903 | 0 | len_--; |
904 | 3 | T result = slab_->items_[len_]; |
905 | 3 | slab_->items_[len_] = 0; // zero for GC scan |
906 | 3 | return result; |
907 | 3 | } Unexecuted instantiation: _ZN7gc_heap4ListIiE3popEv _ZN7gc_heap4ListIPNS_3StrEE3popEv Line | Count | Source | 901 | 3 | T pop() { | 902 | 3 | assert(len_ > 0); | 903 | 0 | len_--; | 904 | 3 | T result = slab_->items_[len_]; | 905 | 3 | slab_->items_[len_] = 0; // zero for GC scan | 906 | 3 | return result; | 907 | 3 | } |
|
908 | | |
909 | | // Used in osh/word_parse.py to remove from front |
910 | | // TODO: Don't accept an arbitrary index? |
911 | 0 | T pop(int i) { |
912 | 0 | assert(len_ > 0); |
913 | 0 | assert(i == 0); // only support popping the first item |
914 | | |
915 | 0 | len_--; |
916 | 0 | T result = index_(0); |
917 | | |
918 | | // Shift everything by one |
919 | 0 | memmove(slab_->items_, slab_->items_ + 1, len_ * sizeof(T)); |
920 | | /* |
921 | | for (int j = 0; j < len_; j++) { |
922 | | slab_->items_[j] = slab_->items_[j+1]; |
923 | | } |
924 | | */ |
925 | |
|
926 | 0 | slab_->items_[len_] = 0; // zero for GC scan |
927 | 0 | return result; |
928 | 0 | } |
929 | | |
930 | 2 | void clear() { |
931 | 2 | memset(slab_->items_, 0, len_ * sizeof(T)); // zero for GC scan |
932 | 2 | len_ = 0; |
933 | 2 | } _ZN7gc_heap4ListIiE5clearEv Line | Count | Source | 930 | 1 | void clear() { | 931 | 1 | memset(slab_->items_, 0, len_ * sizeof(T)); // zero for GC scan | 932 | 1 | len_ = 0; | 933 | 1 | } |
_ZN7gc_heap4ListIPNS_3StrEE5clearEv Line | Count | Source | 930 | 1 | void clear() { | 931 | 1 | memset(slab_->items_, 0, len_ * sizeof(T)); // zero for GC scan | 932 | 1 | len_ = 0; | 933 | 1 | } |
|
934 | | |
935 | | // Used in osh/string_ops.py |
936 | 0 | void reverse() { |
937 | 0 | for (int i = 0; i < len_ / 2; ++i) { |
938 | | // log("swapping %d and %d", i, n-i); |
939 | 0 | T tmp = slab_->items_[i]; |
940 | 0 | int j = len_ - 1 - i; |
941 | 0 | slab_->items_[i] = slab_->items_[j]; |
942 | 0 | slab_->items_[j] = tmp; |
943 | 0 | } |
944 | 0 | } Unexecuted instantiation: _ZN7gc_heap4ListIiE7reverseEv Unexecuted instantiation: _ZN7gc_heap4ListIPNS_3StrEE7reverseEv |
945 | | |
946 | | // Templated function |
947 | | void sort(); |
948 | | |
949 | | // 8 / 4 = 2 items, or 8 / 8 = 1 item |
950 | | static const int kCapacityAdjust = kSlabHeaderSize / sizeof(T); |
951 | | static_assert(kSlabHeaderSize % sizeof(T) == 0, |
952 | | "Slab header size should be multiple of item size"); |
953 | | |
954 | | // Ensure that there's space for a number of items |
955 | 2.76k | void reserve(int n) { |
956 | | // log("reserve capacity = %d, n = %d", capacity_, n); |
957 | 2.76k | auto self = this; |
958 | 2.76k | StackRoots _roots({&self}); |
959 | | |
960 | | // Don't do anything if there's already enough space. |
961 | 2.76k | if (self->capacity_ >= n) return; |
962 | | |
963 | | // Example: The user asks for space for 7 integers. Account for the |
964 | | // header, and say we need 9 to determine the obj length. 9 is |
965 | | // rounded up to 16, for a 64-byte obj. Then we actually have space |
966 | | // for 14 items. |
967 | 421 | self->capacity_ = RoundUp(n + kCapacityAdjust) - kCapacityAdjust; |
968 | 421 | auto new_slab = NewSlab<T>(self->capacity_); |
969 | | |
970 | 421 | if (self->len_ > 0) { |
971 | | // log("Copying %d bytes", len_ * sizeof(T)); |
972 | 13 | memcpy(new_slab->items_, self->slab_->items_, self->len_ * sizeof(T)); |
973 | 13 | } |
974 | 421 | self->slab_ = new_slab; |
975 | 421 | } _ZN7gc_heap4ListIiE7reserveEi Line | Count | Source | 955 | 2.52k | void reserve(int n) { | 956 | | // log("reserve capacity = %d, n = %d", capacity_, n); | 957 | 2.52k | auto self = this; | 958 | 2.52k | StackRoots _roots({&self}); | 959 | | | 960 | | // Don't do anything if there's already enough space. | 961 | 2.52k | if (self->capacity_ >= n) return; | 962 | | | 963 | | // Example: The user asks for space for 7 integers. Account for the | 964 | | // header, and say we need 9 to determine the obj length. 9 is | 965 | | // rounded up to 16, for a 64-byte obj. Then we actually have space | 966 | | // for 14 items. | 967 | 321 | self->capacity_ = RoundUp(n + kCapacityAdjust) - kCapacityAdjust; | 968 | 321 | auto new_slab = NewSlab<T>(self->capacity_); | 969 | | | 970 | 321 | if (self->len_ > 0) { | 971 | | // log("Copying %d bytes", len_ * sizeof(T)); | 972 | 8 | memcpy(new_slab->items_, self->slab_->items_, self->len_ * sizeof(T)); | 973 | 8 | } | 974 | 321 | self->slab_ = new_slab; | 975 | 321 | } |
_ZN7gc_heap4ListIPNS_3StrEE7reserveEi Line | Count | Source | 955 | 181 | void reserve(int n) { | 956 | | // log("reserve capacity = %d, n = %d", capacity_, n); | 957 | 181 | auto self = this; | 958 | 181 | StackRoots _roots({&self}); | 959 | | | 960 | | // Don't do anything if there's already enough space. | 961 | 181 | if (self->capacity_ >= n) return; | 962 | | | 963 | | // Example: The user asks for space for 7 integers. Account for the | 964 | | // header, and say we need 9 to determine the obj length. 9 is | 965 | | // rounded up to 16, for a 64-byte obj. Then we actually have space | 966 | | // for 14 items. | 967 | 62 | self->capacity_ = RoundUp(n + kCapacityAdjust) - kCapacityAdjust; | 968 | 62 | auto new_slab = NewSlab<T>(self->capacity_); | 969 | | | 970 | 62 | if (self->len_ > 0) { | 971 | | // log("Copying %d bytes", len_ * sizeof(T)); | 972 | 5 | memcpy(new_slab->items_, self->slab_->items_, self->len_ * sizeof(T)); | 973 | 5 | } | 974 | 62 | self->slab_ = new_slab; | 975 | 62 | } |
_ZN7gc_heap4ListIbE7reserveEi Line | Count | Source | 955 | 1 | void reserve(int n) { | 956 | | // log("reserve capacity = %d, n = %d", capacity_, n); | 957 | 1 | auto self = this; | 958 | 1 | StackRoots _roots({&self}); | 959 | | | 960 | | // Don't do anything if there's already enough space. | 961 | 1 | if (self->capacity_ >= n) return; | 962 | | | 963 | | // Example: The user asks for space for 7 integers. Account for the | 964 | | // header, and say we need 9 to determine the obj length. 9 is | 965 | | // rounded up to 16, for a 64-byte obj. Then we actually have space | 966 | | // for 14 items. | 967 | 1 | self->capacity_ = RoundUp(n + kCapacityAdjust) - kCapacityAdjust; | 968 | 1 | auto new_slab = NewSlab<T>(self->capacity_); | 969 | | | 970 | 1 | if (self->len_ > 0) { | 971 | | // log("Copying %d bytes", len_ * sizeof(T)); | 972 | 0 | memcpy(new_slab->items_, self->slab_->items_, self->len_ * sizeof(T)); | 973 | 0 | } | 974 | 1 | self->slab_ = new_slab; | 975 | 1 | } |
Unexecuted instantiation: _ZN7gc_heap4ListIdE7reserveEi _ZN7gc_heap4ListIP6Tuple2IPNS_3StrEiEE7reserveEi Line | Count | Source | 955 | 1 | void reserve(int n) { | 956 | | // log("reserve capacity = %d, n = %d", capacity_, n); | 957 | 1 | auto self = this; | 958 | 1 | StackRoots _roots({&self}); | 959 | | | 960 | | // Don't do anything if there's already enough space. | 961 | 1 | if (self->capacity_ >= n) return; | 962 | | | 963 | | // Example: The user asks for space for 7 integers. Account for the | 964 | | // header, and say we need 9 to determine the obj length. 9 is | 965 | | // rounded up to 16, for a 64-byte obj. Then we actually have space | 966 | | // for 14 items. | 967 | 1 | self->capacity_ = RoundUp(n + kCapacityAdjust) - kCapacityAdjust; | 968 | 1 | auto new_slab = NewSlab<T>(self->capacity_); | 969 | | | 970 | 1 | if (self->len_ > 0) { | 971 | | // log("Copying %d bytes", len_ * sizeof(T)); | 972 | 0 | memcpy(new_slab->items_, self->slab_->items_, self->len_ * sizeof(T)); | 973 | 0 | } | 974 | 1 | self->slab_ = new_slab; | 975 | 1 | } |
_ZN7gc_heap4ListIP6Tuple2IiPNS_3StrEEE7reserveEi Line | Count | Source | 955 | 2 | void reserve(int n) { | 956 | | // log("reserve capacity = %d, n = %d", capacity_, n); | 957 | 2 | auto self = this; | 958 | 2 | StackRoots _roots({&self}); | 959 | | | 960 | | // Don't do anything if there's already enough space. | 961 | 2 | if (self->capacity_ >= n) return; | 962 | | | 963 | | // Example: The user asks for space for 7 integers. Account for the | 964 | | // header, and say we need 9 to determine the obj length. 9 is | 965 | | // rounded up to 16, for a 64-byte obj. Then we actually have space | 966 | | // for 14 items. | 967 | 2 | self->capacity_ = RoundUp(n + kCapacityAdjust) - kCapacityAdjust; | 968 | 2 | auto new_slab = NewSlab<T>(self->capacity_); | 969 | | | 970 | 2 | if (self->len_ > 0) { | 971 | | // log("Copying %d bytes", len_ * sizeof(T)); | 972 | 0 | memcpy(new_slab->items_, self->slab_->items_, self->len_ * sizeof(T)); | 973 | 0 | } | 974 | 2 | self->slab_ = new_slab; | 975 | 2 | } |
_ZN7gc_heap4ListIPN10hnode_asdl5fieldEE7reserveEi Line | Count | Source | 955 | 60 | void reserve(int n) { | 956 | | // log("reserve capacity = %d, n = %d", capacity_, n); | 957 | 60 | auto self = this; | 958 | 60 | StackRoots _roots({&self}); | 959 | | | 960 | | // Don't do anything if there's already enough space. | 961 | 60 | if (self->capacity_ >= n) return; | 962 | | | 963 | | // Example: The user asks for space for 7 integers. Account for the | 964 | | // header, and say we need 9 to determine the obj length. 9 is | 965 | | // rounded up to 16, for a 64-byte obj. Then we actually have space | 966 | | // for 14 items. | 967 | 34 | self->capacity_ = RoundUp(n + kCapacityAdjust) - kCapacityAdjust; | 968 | 34 | auto new_slab = NewSlab<T>(self->capacity_); | 969 | | | 970 | 34 | if (self->len_ > 0) { | 971 | | // log("Copying %d bytes", len_ * sizeof(T)); | 972 | 0 | memcpy(new_slab->items_, self->slab_->items_, self->len_ * sizeof(T)); | 973 | 0 | } | 974 | 34 | self->slab_ = new_slab; | 975 | 34 | } |
|
976 | | |
977 | | // Append a single element to this list. Must be specialized List<int> vs |
978 | | // List<Str*>. |
979 | | void append(T item); |
980 | | |
981 | | // Extend this list with multiple elements. |
982 | 2 | void extend(List<T>* other) { |
983 | 2 | auto self = this; |
984 | 2 | StackRoots _roots({&self, &other}); |
985 | | |
986 | 2 | int n = other->len_; |
987 | 2 | int new_len = self->len_ + n; |
988 | 2 | self->reserve(new_len); |
989 | | |
990 | 9 | for (int i = 0; i < n; ++i) { |
991 | 7 | self->set(self->len_ + i, other->slab_->items_[i]); |
992 | 7 | } |
993 | 2 | self->len_ = new_len; |
994 | 2 | } _ZN7gc_heap4ListIiE6extendEPS1_ Line | Count | Source | 982 | 2 | void extend(List<T>* other) { | 983 | 2 | auto self = this; | 984 | 2 | StackRoots _roots({&self, &other}); | 985 | | | 986 | 2 | int n = other->len_; | 987 | 2 | int new_len = self->len_ + n; | 988 | 2 | self->reserve(new_len); | 989 | | | 990 | 9 | for (int i = 0; i < n; ++i) { | 991 | 7 | self->set(self->len_ + i, other->slab_->items_[i]); | 992 | 7 | } | 993 | 2 | self->len_ = new_len; | 994 | 2 | } |
Unexecuted instantiation: _ZN7gc_heap4ListIPNS_3StrEE6extendEPS3_ |
995 | | |
996 | | int len_; // number of entries |
997 | | int capacity_; // max entries before resizing |
998 | | |
999 | | // The container may be resized, so this field isn't in-line. |
1000 | | Slab<T>* slab_; |
1001 | | |
1002 | | DISALLOW_COPY_AND_ASSIGN(List) |
1003 | | }; |
1004 | | |
1005 | | // "Constructors" as free functions since we can't allocate within a |
1006 | | // constructor. Allocation may cause garbage collection, which interferes with |
1007 | | // placement new. |
1008 | | |
1009 | | template <typename T> |
1010 | 5 | List<T>* NewList() { |
1011 | 5 | return Alloc<List<T>>(); |
1012 | 5 | } _ZN7gc_heap7NewListIiEEPNS_4ListIT_EEv Line | Count | Source | 1010 | 3 | List<T>* NewList() { | 1011 | 3 | return Alloc<List<T>>(); | 1012 | 3 | } |
_ZN7gc_heap7NewListIPNS_3StrEEEPNS_4ListIT_EEv Line | Count | Source | 1010 | 2 | List<T>* NewList() { | 1011 | 2 | return Alloc<List<T>>(); | 1012 | 2 | } |
Unexecuted instantiation: _ZN7gc_heap7NewListIPN10hnode_asdl5fieldEEEPNS_4ListIT_EEv Unexecuted instantiation: _ZN7gc_heap7NewListIPN10hnode_asdl7hnode_tEEEPNS_4ListIT_EEv |
1013 | | |
1014 | | // Literal ['foo', 'bar'] |
1015 | | template <typename T> |
1016 | 16 | List<T>* NewList(std::initializer_list<T> init) { |
1017 | 16 | auto self = Alloc<List<T>>(); |
1018 | 16 | StackRoots _roots({&self}); |
1019 | | |
1020 | 16 | int n = init.size(); |
1021 | 16 | self->reserve(n); |
1022 | | |
1023 | 16 | int i = 0; |
1024 | 54 | for (auto item : init) { |
1025 | 54 | self->set(i, item); |
1026 | 54 | ++i; |
1027 | 54 | } |
1028 | 16 | self->len_ = n; |
1029 | 16 | return self; |
1030 | 16 | } _ZN7gc_heap7NewListIiEEPNS_4ListIT_EESt16initializer_listIS2_E Line | Count | Source | 1016 | 5 | List<T>* NewList(std::initializer_list<T> init) { | 1017 | 5 | auto self = Alloc<List<T>>(); | 1018 | 5 | StackRoots _roots({&self}); | 1019 | | | 1020 | 5 | int n = init.size(); | 1021 | 5 | self->reserve(n); | 1022 | | | 1023 | 5 | int i = 0; | 1024 | 18 | for (auto item : init) { | 1025 | 18 | self->set(i, item); | 1026 | 18 | ++i; | 1027 | 18 | } | 1028 | 5 | self->len_ = n; | 1029 | 5 | return self; | 1030 | 5 | } |
_ZN7gc_heap7NewListIPNS_3StrEEEPNS_4ListIT_EESt16initializer_listIS4_E Line | Count | Source | 1016 | 8 | List<T>* NewList(std::initializer_list<T> init) { | 1017 | 8 | auto self = Alloc<List<T>>(); | 1018 | 8 | StackRoots _roots({&self}); | 1019 | | | 1020 | 8 | int n = init.size(); | 1021 | 8 | self->reserve(n); | 1022 | | | 1023 | 8 | int i = 0; | 1024 | 30 | for (auto item : init) { | 1025 | 30 | self->set(i, item); | 1026 | 30 | ++i; | 1027 | 30 | } | 1028 | 8 | self->len_ = n; | 1029 | 8 | return self; | 1030 | 8 | } |
Unexecuted instantiation: _ZN7gc_heap7NewListIdEEPNS_4ListIT_EESt16initializer_listIS2_E _ZN7gc_heap7NewListIP6Tuple2IPNS_3StrEiEEEPNS_4ListIT_EESt16initializer_listIS7_E Line | Count | Source | 1016 | 1 | List<T>* NewList(std::initializer_list<T> init) { | 1017 | 1 | auto self = Alloc<List<T>>(); | 1018 | 1 | StackRoots _roots({&self}); | 1019 | | | 1020 | 1 | int n = init.size(); | 1021 | 1 | self->reserve(n); | 1022 | | | 1023 | 1 | int i = 0; | 1024 | 2 | for (auto item : init) { | 1025 | 2 | self->set(i, item); | 1026 | 2 | ++i; | 1027 | 2 | } | 1028 | 1 | self->len_ = n; | 1029 | 1 | return self; | 1030 | 1 | } |
_ZN7gc_heap7NewListIP6Tuple2IiPNS_3StrEEEEPNS_4ListIT_EESt16initializer_listIS7_E Line | Count | Source | 1016 | 1 | List<T>* NewList(std::initializer_list<T> init) { | 1017 | 1 | auto self = Alloc<List<T>>(); | 1018 | 1 | StackRoots _roots({&self}); | 1019 | | | 1020 | 1 | int n = init.size(); | 1021 | 1 | self->reserve(n); | 1022 | | | 1023 | 1 | int i = 0; | 1024 | 2 | for (auto item : init) { | 1025 | 2 | self->set(i, item); | 1026 | 2 | ++i; | 1027 | 2 | } | 1028 | 1 | self->len_ = n; | 1029 | 1 | return self; | 1030 | 1 | } |
_ZN7gc_heap7NewListIbEEPNS_4ListIT_EESt16initializer_listIS2_E Line | Count | Source | 1016 | 1 | List<T>* NewList(std::initializer_list<T> init) { | 1017 | 1 | auto self = Alloc<List<T>>(); | 1018 | 1 | StackRoots _roots({&self}); | 1019 | | | 1020 | 1 | int n = init.size(); | 1021 | 1 | self->reserve(n); | 1022 | | | 1023 | 1 | int i = 0; | 1024 | 2 | for (auto item : init) { | 1025 | 2 | self->set(i, item); | 1026 | 2 | ++i; | 1027 | 2 | } | 1028 | 1 | self->len_ = n; | 1029 | 1 | return self; | 1030 | 1 | } |
|
1031 | | |
1032 | | // ['foo'] * 3 |
1033 | | template <typename T> |
1034 | 3 | List<T>* NewList(T item, int times) { |
1035 | 3 | auto self = Alloc<List<T>>(); |
1036 | 3 | StackRoots _roots({&self}); |
1037 | | |
1038 | 3 | self->reserve(times); |
1039 | 3 | self->len_ = times; |
1040 | 12 | for (int i = 0; i < times; ++i) { |
1041 | 9 | self->set(i, item); |
1042 | 9 | } |
1043 | 3 | return self; |
1044 | 3 | } _ZN7gc_heap7NewListIiEEPNS_4ListIT_EES2_i Line | Count | Source | 1034 | 2 | List<T>* NewList(T item, int times) { | 1035 | 2 | auto self = Alloc<List<T>>(); | 1036 | 2 | StackRoots _roots({&self}); | 1037 | | | 1038 | 2 | self->reserve(times); | 1039 | 2 | self->len_ = times; | 1040 | 8 | for (int i = 0; i < times; ++i) { | 1041 | 6 | self->set(i, item); | 1042 | 6 | } | 1043 | 2 | return self; | 1044 | 2 | } |
_ZN7gc_heap7NewListIPNS_3StrEEEPNS_4ListIT_EES4_i Line | Count | Source | 1034 | 1 | List<T>* NewList(T item, int times) { | 1035 | 1 | auto self = Alloc<List<T>>(); | 1036 | 1 | StackRoots _roots({&self}); | 1037 | | | 1038 | 1 | self->reserve(times); | 1039 | 1 | self->len_ = times; | 1040 | 4 | for (int i = 0; i < times; ++i) { | 1041 | 3 | self->set(i, item); | 1042 | 3 | } | 1043 | 1 | return self; | 1044 | 1 | } |
Unexecuted instantiation: _ZN7gc_heap7NewListIbEEPNS_4ListIT_EES2_i |
1045 | | |
1046 | | // e.g. List<int> |
1047 | | template <typename T> |
1048 | 2.51k | void list_append(List<T>* self, T item) { |
1049 | 2.51k | StackRoots _roots({&self}); |
1050 | | |
1051 | 2.51k | self->reserve(self->len_ + 1); |
1052 | 2.51k | self->set(self->len_, item); |
1053 | 2.51k | ++self->len_; |
1054 | 2.51k | } |
1055 | | |
1056 | | // e.g. List<Str*> |
1057 | | template <typename T> |
1058 | 233 | void list_append(List<T*>* self, T* item) { |
1059 | 233 | StackRoots _roots({&self, &item}); |
1060 | | |
1061 | 233 | self->reserve(self->len_ + 1); |
1062 | 233 | self->set(self->len_, item); |
1063 | 233 | ++self->len_; |
1064 | 233 | } _ZN7gc_heap11list_appendINS_3StrEEEvPNS_4ListIPT_EES4_ Line | Count | Source | 1058 | 172 | void list_append(List<T*>* self, T* item) { | 1059 | 172 | StackRoots _roots({&self, &item}); | 1060 | | | 1061 | 172 | self->reserve(self->len_ + 1); | 1062 | 172 | self->set(self->len_, item); | 1063 | 172 | ++self->len_; | 1064 | 172 | } |
Unexecuted instantiation: _ZN7gc_heap11list_appendI6Tuple2IPNS_3StrEiEEEvPNS_4ListIPT_EES7_ _ZN7gc_heap11list_appendIN10hnode_asdl5fieldEEEvPNS_4ListIPT_EES5_ Line | Count | Source | 1058 | 60 | void list_append(List<T*>* self, T* item) { | 1059 | 60 | StackRoots _roots({&self, &item}); | 1060 | | | 1061 | 60 | self->reserve(self->len_ + 1); | 1062 | 60 | self->set(self->len_, item); | 1063 | 60 | ++self->len_; | 1064 | 60 | } |
_ZN7gc_heap11list_appendI6Tuple2IiPNS_3StrEEEEvPNS_4ListIPT_EES7_ Line | Count | Source | 1058 | 1 | void list_append(List<T*>* self, T* item) { | 1059 | 1 | StackRoots _roots({&self, &item}); | 1060 | | | 1061 | 1 | self->reserve(self->len_ + 1); | 1062 | 1 | self->set(self->len_, item); | 1063 | 1 | ++self->len_; | 1064 | 1 | } |
|
1065 | | |
1066 | | template <typename T> |
1067 | 2.74k | void List<T>::append(T item) { |
1068 | 2.74k | list_append(this, item); |
1069 | 2.74k | } _ZN7gc_heap4ListIiE6appendEi Line | Count | Source | 1067 | 2.51k | void List<T>::append(T item) { | 1068 | 2.51k | list_append(this, item); | 1069 | 2.51k | } |
_ZN7gc_heap4ListIPNS_3StrEE6appendES2_ Line | Count | Source | 1067 | 172 | void List<T>::append(T item) { | 1068 | 172 | list_append(this, item); | 1069 | 172 | } |
Unexecuted instantiation: _ZN7gc_heap4ListIP6Tuple2IPNS_3StrEiEE6appendES5_ _ZN7gc_heap4ListIPN10hnode_asdl5fieldEE6appendES3_ Line | Count | Source | 1067 | 60 | void List<T>::append(T item) { | 1068 | 60 | list_append(this, item); | 1069 | 60 | } |
_ZN7gc_heap4ListIP6Tuple2IiPNS_3StrEEE6appendES5_ Line | Count | Source | 1067 | 1 | void List<T>::append(T item) { | 1068 | 1 | list_append(this, item); | 1069 | 1 | } |
|
1070 | | |
1071 | | // |
1072 | | // Dict<K, V> |
1073 | | // |
1074 | | |
1075 | | // Non-negative entries in entry_ are array indices into keys_ and values_. |
1076 | | // There are two special negative entries. |
1077 | | |
1078 | | // index that means this Dict item was deleted (a tombstone). |
1079 | | const int kDeletedEntry = -1; |
1080 | | |
1081 | | // index that means this Dict entry is free. Because we have Dict[int, int], |
1082 | | // we can't use a sentinel entry in keys_. It has to be a sentinel entry in |
1083 | | // entry_. |
1084 | | const int kEmptyEntry = -2; |
1085 | | |
1086 | | // Helper for keys() and values() |
1087 | | template <typename T> |
1088 | 0 | List<T>* ListFromDictSlab(Slab<int>* index, Slab<T>* slab, int n) { |
1089 | | // TODO: Reserve the right amount of space |
1090 | 0 | List<T>* result = nullptr; |
1091 | 0 | StackRoots _roots({&index, &slab, &result}); |
1092 | |
|
1093 | 0 | result = Alloc<List<T>>(); |
1094 | |
|
1095 | 0 | for (int i = 0; i < n; ++i) { |
1096 | 0 | int special = index->items_[i]; |
1097 | 0 | if (special == kDeletedEntry) { |
1098 | 0 | continue; |
1099 | 0 | } |
1100 | 0 | if (special == kEmptyEntry) { |
1101 | 0 | break; |
1102 | 0 | } |
1103 | 0 | result->append(slab->items_[i]); |
1104 | 0 | } |
1105 | 0 | return result; |
1106 | 0 | } Unexecuted instantiation: _ZN7gc_heap16ListFromDictSlabIPNS_3StrEEEPNS_4ListIT_EEPNS_4SlabIiEEPNS7_IS4_EEi Unexecuted instantiation: _ZN7gc_heap16ListFromDictSlabIiEEPNS_4ListIT_EEPNS_4SlabIiEEPNS5_IS2_EEi |
1107 | | |
1108 | 153 | inline bool keys_equal(int left, int right) { |
1109 | 153 | return left == right; |
1110 | 153 | } |
1111 | | |
1112 | 794 | inline bool keys_equal(Str* left, Str* right) { |
1113 | 794 | return str_equals(left, right); |
1114 | 794 | } |
1115 | | |
1116 | | // Type that is layout-compatible with List to avoid invalid-offsetof warnings. |
1117 | | // Unit tests assert that they have the same layout. |
1118 | | class _DummyDict { |
1119 | | public: |
1120 | | OBJ_HEADER() |
1121 | | int len_; |
1122 | | int capacity_; |
1123 | | void* entry_; |
1124 | | void* keys_; |
1125 | | void* values_; |
1126 | | }; |
1127 | | |
1128 | | // A list has one Slab pointer which we need to follow. |
1129 | 9 | constexpr uint16_t maskof_Dict() { |
1130 | 9 | return maskbit(offsetof(_DummyDict, entry_)) | |
1131 | 9 | maskbit(offsetof(_DummyDict, keys_)) | |
1132 | 9 | maskbit(offsetof(_DummyDict, values_)); |
1133 | 9 | } |
1134 | | |
1135 | | template <class K, class V> |
1136 | | class Dict : public gc_heap::Obj { |
1137 | | public: |
1138 | 9 | Dict() : gc_heap::Obj(Tag::FixedSize, maskof_Dict(), sizeof(Dict)) { |
1139 | 9 | assert(len_ == 0); |
1140 | 0 | assert(capacity_ == 0); |
1141 | 0 | assert(entry_ == nullptr); |
1142 | 0 | assert(keys_ == nullptr); |
1143 | 0 | assert(values_ == nullptr); |
1144 | 9 | } Line | Count | Source | 1138 | 1 | Dict() : gc_heap::Obj(Tag::FixedSize, maskof_Dict(), sizeof(Dict)) { | 1139 | 1 | assert(len_ == 0); | 1140 | 0 | assert(capacity_ == 0); | 1141 | 0 | assert(entry_ == nullptr); | 1142 | 0 | assert(keys_ == nullptr); | 1143 | 0 | assert(values_ == nullptr); | 1144 | 1 | } |
_ZN7gc_heap4DictIPNS_3StrES2_EC2Ev Line | Count | Source | 1138 | 1 | Dict() : gc_heap::Obj(Tag::FixedSize, maskof_Dict(), sizeof(Dict)) { | 1139 | 1 | assert(len_ == 0); | 1140 | 0 | assert(capacity_ == 0); | 1141 | 0 | assert(entry_ == nullptr); | 1142 | 0 | assert(keys_ == nullptr); | 1143 | 0 | assert(values_ == nullptr); | 1144 | 1 | } |
_ZN7gc_heap4DictIPNS_3StrEiEC2Ev Line | Count | Source | 1138 | 6 | Dict() : gc_heap::Obj(Tag::FixedSize, maskof_Dict(), sizeof(Dict)) { | 1139 | 6 | assert(len_ == 0); | 1140 | 0 | assert(capacity_ == 0); | 1141 | 0 | assert(entry_ == nullptr); | 1142 | 0 | assert(keys_ == nullptr); | 1143 | 0 | assert(values_ == nullptr); | 1144 | 6 | } |
_ZN7gc_heap4DictIiPNS_3StrEEC2Ev Line | Count | Source | 1138 | 1 | Dict() : gc_heap::Obj(Tag::FixedSize, maskof_Dict(), sizeof(Dict)) { | 1139 | 1 | assert(len_ == 0); | 1140 | 0 | assert(capacity_ == 0); | 1141 | 0 | assert(entry_ == nullptr); | 1142 | 0 | assert(keys_ == nullptr); | 1143 | 0 | assert(values_ == nullptr); | 1144 | 1 | } |
|
1145 | | |
1146 | | // This relies on the fact that containers of 4-byte ints are reduced by 2 |
1147 | | // items, which is greater than (or equal to) the reduction of any other |
1148 | | // type |
1149 | | static const int kCapacityAdjust = kSlabHeaderSize / sizeof(int); |
1150 | | static_assert(kSlabHeaderSize % sizeof(int) == 0, |
1151 | | "Slab header size should be multiple of key size"); |
1152 | | |
1153 | 67 | void reserve(int n) { |
1154 | 67 | auto self = this; |
1155 | 67 | Slab<int>* new_i = nullptr; |
1156 | 67 | Slab<K>* new_k = nullptr; |
1157 | 67 | Slab<V>* new_v = nullptr; |
1158 | 67 | StackRoots _roots({&self, &new_i, &new_k, &new_v}); |
1159 | | |
1160 | | // log("--- reserve %d", capacity_); |
1161 | | // |
1162 | 67 | if (self->capacity_ < n) { // TODO: use load factor, not exact fit |
1163 | | // calculate the number of keys and values we should have |
1164 | 14 | self->capacity_ = RoundUp(n + kCapacityAdjust) - kCapacityAdjust; |
1165 | | |
1166 | | // TODO: This is SPARSE. How to compute a size that ensures a decent |
1167 | | // load factor? |
1168 | 14 | int index_len = self->capacity_; |
1169 | 14 | new_i = NewSlab<int>(index_len); |
1170 | | |
1171 | | // For the linear search to work |
1172 | 218 | for (int i = 0; i < index_len; ++i) { |
1173 | 204 | new_i->items_[i] = kEmptyEntry; |
1174 | 204 | } |
1175 | | |
1176 | | // These are DENSE. |
1177 | 14 | new_k = NewSlab<K>(self->capacity_); |
1178 | 14 | new_v = NewSlab<V>(self->capacity_); |
1179 | | |
1180 | 14 | if (self->keys_ != nullptr) { |
1181 | | // Right now the index is the same size as keys and values. |
1182 | 5 | memcpy(new_i->items_, self->entry_->items_, self->len_ * sizeof(int)); |
1183 | | |
1184 | 5 | memcpy(new_k->items_, self->keys_->items_, self->len_ * sizeof(K)); |
1185 | 5 | memcpy(new_v->items_, self->values_->items_, self->len_ * sizeof(V)); |
1186 | 5 | } |
1187 | | |
1188 | 14 | self->entry_ = new_i; |
1189 | 14 | self->keys_ = new_k; |
1190 | 14 | self->values_ = new_v; |
1191 | 14 | } |
1192 | 67 | } _ZN7gc_heap4DictIiiE7reserveEi Line | Count | Source | 1153 | 16 | void reserve(int n) { | 1154 | 16 | auto self = this; | 1155 | 16 | Slab<int>* new_i = nullptr; | 1156 | 16 | Slab<K>* new_k = nullptr; | 1157 | 16 | Slab<V>* new_v = nullptr; | 1158 | 16 | StackRoots _roots({&self, &new_i, &new_k, &new_v}); | 1159 | | | 1160 | | // log("--- reserve %d", capacity_); | 1161 | | // | 1162 | 16 | if (self->capacity_ < n) { // TODO: use load factor, not exact fit | 1163 | | // calculate the number of keys and values we should have | 1164 | 3 | self->capacity_ = RoundUp(n + kCapacityAdjust) - kCapacityAdjust; | 1165 | | | 1166 | | // TODO: This is SPARSE. How to compute a size that ensures a decent | 1167 | | // load factor? | 1168 | 3 | int index_len = self->capacity_; | 1169 | 3 | new_i = NewSlab<int>(index_len); | 1170 | | | 1171 | | // For the linear search to work | 1172 | 53 | for (int i = 0; i < index_len; ++i) { | 1173 | 50 | new_i->items_[i] = kEmptyEntry; | 1174 | 50 | } | 1175 | | | 1176 | | // These are DENSE. | 1177 | 3 | new_k = NewSlab<K>(self->capacity_); | 1178 | 3 | new_v = NewSlab<V>(self->capacity_); | 1179 | | | 1180 | 3 | if (self->keys_ != nullptr) { | 1181 | | // Right now the index is the same size as keys and values. | 1182 | 2 | memcpy(new_i->items_, self->entry_->items_, self->len_ * sizeof(int)); | 1183 | | | 1184 | 2 | memcpy(new_k->items_, self->keys_->items_, self->len_ * sizeof(K)); | 1185 | 2 | memcpy(new_v->items_, self->values_->items_, self->len_ * sizeof(V)); | 1186 | 2 | } | 1187 | | | 1188 | 3 | self->entry_ = new_i; | 1189 | 3 | self->keys_ = new_k; | 1190 | 3 | self->values_ = new_v; | 1191 | 3 | } | 1192 | 16 | } |
_ZN7gc_heap4DictIPNS_3StrES2_E7reserveEi Line | Count | Source | 1153 | 1 | void reserve(int n) { | 1154 | 1 | auto self = this; | 1155 | 1 | Slab<int>* new_i = nullptr; | 1156 | 1 | Slab<K>* new_k = nullptr; | 1157 | 1 | Slab<V>* new_v = nullptr; | 1158 | 1 | StackRoots _roots({&self, &new_i, &new_k, &new_v}); | 1159 | | | 1160 | | // log("--- reserve %d", capacity_); | 1161 | | // | 1162 | 1 | if (self->capacity_ < n) { // TODO: use load factor, not exact fit | 1163 | | // calculate the number of keys and values we should have | 1164 | 1 | self->capacity_ = RoundUp(n + kCapacityAdjust) - kCapacityAdjust; | 1165 | | | 1166 | | // TODO: This is SPARSE. How to compute a size that ensures a decent | 1167 | | // load factor? | 1168 | 1 | int index_len = self->capacity_; | 1169 | 1 | new_i = NewSlab<int>(index_len); | 1170 | | | 1171 | | // For the linear search to work | 1172 | 7 | for (int i = 0; i < index_len; ++i) { | 1173 | 6 | new_i->items_[i] = kEmptyEntry; | 1174 | 6 | } | 1175 | | | 1176 | | // These are DENSE. | 1177 | 1 | new_k = NewSlab<K>(self->capacity_); | 1178 | 1 | new_v = NewSlab<V>(self->capacity_); | 1179 | | | 1180 | 1 | if (self->keys_ != nullptr) { | 1181 | | // Right now the index is the same size as keys and values. | 1182 | 0 | memcpy(new_i->items_, self->entry_->items_, self->len_ * sizeof(int)); | 1183 | |
| 1184 | 0 | memcpy(new_k->items_, self->keys_->items_, self->len_ * sizeof(K)); | 1185 | 0 | memcpy(new_v->items_, self->values_->items_, self->len_ * sizeof(V)); | 1186 | 0 | } | 1187 | | | 1188 | 1 | self->entry_ = new_i; | 1189 | 1 | self->keys_ = new_k; | 1190 | 1 | self->values_ = new_v; | 1191 | 1 | } | 1192 | 1 | } |
_ZN7gc_heap4DictIPNS_3StrEiE7reserveEi Line | Count | Source | 1153 | 49 | void reserve(int n) { | 1154 | 49 | auto self = this; | 1155 | 49 | Slab<int>* new_i = nullptr; | 1156 | 49 | Slab<K>* new_k = nullptr; | 1157 | 49 | Slab<V>* new_v = nullptr; | 1158 | 49 | StackRoots _roots({&self, &new_i, &new_k, &new_v}); | 1159 | | | 1160 | | // log("--- reserve %d", capacity_); | 1161 | | // | 1162 | 49 | if (self->capacity_ < n) { // TODO: use load factor, not exact fit | 1163 | | // calculate the number of keys and values we should have | 1164 | 9 | self->capacity_ = RoundUp(n + kCapacityAdjust) - kCapacityAdjust; | 1165 | | | 1166 | | // TODO: This is SPARSE. How to compute a size that ensures a decent | 1167 | | // load factor? | 1168 | 9 | int index_len = self->capacity_; | 1169 | 9 | new_i = NewSlab<int>(index_len); | 1170 | | | 1171 | | // For the linear search to work | 1172 | 151 | for (int i = 0; i < index_len; ++i) { | 1173 | 142 | new_i->items_[i] = kEmptyEntry; | 1174 | 142 | } | 1175 | | | 1176 | | // These are DENSE. | 1177 | 9 | new_k = NewSlab<K>(self->capacity_); | 1178 | 9 | new_v = NewSlab<V>(self->capacity_); | 1179 | | | 1180 | 9 | if (self->keys_ != nullptr) { | 1181 | | // Right now the index is the same size as keys and values. | 1182 | 3 | memcpy(new_i->items_, self->entry_->items_, self->len_ * sizeof(int)); | 1183 | | | 1184 | 3 | memcpy(new_k->items_, self->keys_->items_, self->len_ * sizeof(K)); | 1185 | 3 | memcpy(new_v->items_, self->values_->items_, self->len_ * sizeof(V)); | 1186 | 3 | } | 1187 | | | 1188 | 9 | self->entry_ = new_i; | 1189 | 9 | self->keys_ = new_k; | 1190 | 9 | self->values_ = new_v; | 1191 | 9 | } | 1192 | 49 | } |
_ZN7gc_heap4DictIiPNS_3StrEE7reserveEi Line | Count | Source | 1153 | 1 | void reserve(int n) { | 1154 | 1 | auto self = this; | 1155 | 1 | Slab<int>* new_i = nullptr; | 1156 | 1 | Slab<K>* new_k = nullptr; | 1157 | 1 | Slab<V>* new_v = nullptr; | 1158 | 1 | StackRoots _roots({&self, &new_i, &new_k, &new_v}); | 1159 | | | 1160 | | // log("--- reserve %d", capacity_); | 1161 | | // | 1162 | 1 | if (self->capacity_ < n) { // TODO: use load factor, not exact fit | 1163 | | // calculate the number of keys and values we should have | 1164 | 1 | self->capacity_ = RoundUp(n + kCapacityAdjust) - kCapacityAdjust; | 1165 | | | 1166 | | // TODO: This is SPARSE. How to compute a size that ensures a decent | 1167 | | // load factor? | 1168 | 1 | int index_len = self->capacity_; | 1169 | 1 | new_i = NewSlab<int>(index_len); | 1170 | | | 1171 | | // For the linear search to work | 1172 | 7 | for (int i = 0; i < index_len; ++i) { | 1173 | 6 | new_i->items_[i] = kEmptyEntry; | 1174 | 6 | } | 1175 | | | 1176 | | // These are DENSE. | 1177 | 1 | new_k = NewSlab<K>(self->capacity_); | 1178 | 1 | new_v = NewSlab<V>(self->capacity_); | 1179 | | | 1180 | 1 | if (self->keys_ != nullptr) { | 1181 | | // Right now the index is the same size as keys and values. | 1182 | 0 | memcpy(new_i->items_, self->entry_->items_, self->len_ * sizeof(int)); | 1183 | |
| 1184 | 0 | memcpy(new_k->items_, self->keys_->items_, self->len_ * sizeof(K)); | 1185 | 0 | memcpy(new_v->items_, self->values_->items_, self->len_ * sizeof(V)); | 1186 | 0 | } | 1187 | | | 1188 | 1 | self->entry_ = new_i; | 1189 | 1 | self->keys_ = new_k; | 1190 | 1 | self->values_ = new_v; | 1191 | 1 | } | 1192 | 1 | } |
|
1193 | | |
1194 | | // d[key] in Python: raises KeyError if not found |
1195 | 21 | V index_(K key) { |
1196 | 21 | int pos = position_of_key(key); |
1197 | 21 | if (pos == -1) { |
1198 | 0 | InvalidCodePath(); // NOTE(Jesse): Should we really crash if asking for a |
1199 | | // key not in a dict? |
1200 | 21 | } else { |
1201 | 21 | return values_->items_[pos]; |
1202 | 21 | } |
1203 | 21 | } _ZN7gc_heap4DictIiiE6index_Ei Line | Count | Source | 1195 | 17 | V index_(K key) { | 1196 | 17 | int pos = position_of_key(key); | 1197 | 17 | if (pos == -1) { | 1198 | 0 | InvalidCodePath(); // NOTE(Jesse): Should we really crash if asking for a | 1199 | | // key not in a dict? | 1200 | 17 | } else { | 1201 | 17 | return values_->items_[pos]; | 1202 | 17 | } | 1203 | 17 | } |
_ZN7gc_heap4DictIPNS_3StrES2_E6index_ES2_ Line | Count | Source | 1195 | 1 | V index_(K key) { | 1196 | 1 | int pos = position_of_key(key); | 1197 | 1 | if (pos == -1) { | 1198 | 0 | InvalidCodePath(); // NOTE(Jesse): Should we really crash if asking for a | 1199 | | // key not in a dict? | 1200 | 1 | } else { | 1201 | 1 | return values_->items_[pos]; | 1202 | 1 | } | 1203 | 1 | } |
Unexecuted instantiation: _ZN7gc_heap4DictIiPNS_3StrEE6index_Ei _ZN7gc_heap4DictIPNS_3StrEiE6index_ES2_ Line | Count | Source | 1195 | 3 | V index_(K key) { | 1196 | 3 | int pos = position_of_key(key); | 1197 | 3 | if (pos == -1) { | 1198 | 0 | InvalidCodePath(); // NOTE(Jesse): Should we really crash if asking for a | 1199 | | // key not in a dict? | 1200 | 3 | } else { | 1201 | 3 | return values_->items_[pos]; | 1202 | 3 | } | 1203 | 3 | } |
|
1204 | | |
1205 | | // Get a key. |
1206 | | // Returns nullptr if not found (Can't use this for non-pointer types?) |
1207 | 0 | V get(K key) { |
1208 | 0 | int pos = position_of_key(key); |
1209 | 0 | if (pos == -1) { |
1210 | 0 | return nullptr; |
1211 | 0 | } else { |
1212 | 0 | return values_->items_[pos]; |
1213 | 0 | } |
1214 | 0 | } |
1215 | | |
1216 | | // Get a key, but return a default if not found. |
1217 | | // expr_parse.py uses this with OTHER_BALANCE |
1218 | 0 | V get(K key, V default_val) { |
1219 | 0 | int pos = position_of_key(key); |
1220 | 0 | if (pos == -1) { |
1221 | 0 | return default_val; |
1222 | 0 | } else { |
1223 | 0 | return values_->items_[pos]; |
1224 | 0 | } |
1225 | 0 | } Unexecuted instantiation: _ZN7gc_heap4DictIiPNS_3StrEE3getEiS2_ Unexecuted instantiation: _ZN7gc_heap4DictIPNS_3StrEiE3getES2_i |
1226 | | |
1227 | | // Implements d[k] = v. May resize the dictionary. |
1228 | | // |
1229 | | // TODO: Need to specialize this for StackRoots! Gah! |
1230 | | void set(K key, V val); |
1231 | | |
1232 | 0 | List<K>* keys() { |
1233 | 0 | return ListFromDictSlab<K>(entry_, keys_, capacity_); |
1234 | 0 | } Unexecuted instantiation: _ZN7gc_heap4DictIPNS_3StrEiE4keysEv Unexecuted instantiation: _ZN7gc_heap4DictIPNS_3StrES2_E4keysEv |
1235 | | |
1236 | | // For AssocArray transformations |
1237 | 0 | List<V>* values() { |
1238 | 0 | return ListFromDictSlab<V>(entry_, values_, capacity_); |
1239 | 0 | } Unexecuted instantiation: _ZN7gc_heap4DictIPNS_3StrEiE6valuesEv Unexecuted instantiation: _ZN7gc_heap4DictIPNS_3StrES2_E6valuesEv |
1240 | | |
1241 | 0 | void clear() { |
1242 | | // Maintain invariant |
1243 | 0 | for (int i = 0; i < capacity_; ++i) { |
1244 | 0 | entry_->items_[i] = kEmptyEntry; |
1245 | 0 | } |
1246 | |
|
1247 | 0 | memset(keys_->items_, 0, len_ * sizeof(K)); // zero for GC scan |
1248 | 0 | memset(values_->items_, 0, len_ * sizeof(V)); // zero for GC scan |
1249 | 0 | len_ = 0; |
1250 | 0 | } |
1251 | | |
1252 | | // Returns the position in the array. Used by dict_contains(), index(), |
1253 | | // get(), and set(). |
1254 | | // |
1255 | | // For now this does a linear search. |
1256 | | // TODO: |
1257 | | // - hash functions, and linear probing. |
1258 | | // - resizing based on load factor |
1259 | | // - which requires rehashing (re-insert all items) |
1260 | | // - Special case to intern Str* when it's hashed? How? |
1261 | | // - Should we have wrappers like: |
1262 | | // - V GetAndIntern<V>(D, &string_key) |
1263 | | // - SetAndIntern<V>(D, &string_key, value) |
1264 | | // This will enable duplicate copies of the string to be garbage collected |
1265 | 90 | int position_of_key(K key) { |
1266 | 90 | auto self = this; |
1267 | 90 | StackRoots _roots({&self}); |
1268 | | |
1269 | 1.01k | for (int i = 0; i < self->capacity_; ++i) { |
1270 | 1.00k | int special = self->entry_->items_[i]; // NOT an index now |
1271 | 1.00k | if (special == kDeletedEntry) { |
1272 | 0 | continue; // keep searching |
1273 | 0 | } |
1274 | 1.00k | if (special == kEmptyEntry) { |
1275 | 53 | return -1; // not found |
1276 | 53 | } |
1277 | 947 | if (keys_equal(self->keys_->items_[i], key)) { |
1278 | 23 | return i; |
1279 | 23 | } |
1280 | 947 | } |
1281 | 14 | return -1; // table is completely full? Does this happen? |
1282 | 90 | } _ZN7gc_heap4DictIiiE15position_of_keyEi Line | Count | Source | 1265 | 34 | int position_of_key(K key) { | 1266 | 34 | auto self = this; | 1267 | 34 | StackRoots _roots({&self}); | 1268 | | | 1269 | 169 | for (int i = 0; i < self->capacity_; ++i) { | 1270 | 166 | int special = self->entry_->items_[i]; // NOT an index now | 1271 | 166 | if (special == kDeletedEntry) { | 1272 | 0 | continue; // keep searching | 1273 | 0 | } | 1274 | 166 | if (special == kEmptyEntry) { | 1275 | 13 | return -1; // not found | 1276 | 13 | } | 1277 | 153 | if (keys_equal(self->keys_->items_[i], key)) { | 1278 | 18 | return i; | 1279 | 18 | } | 1280 | 153 | } | 1281 | 3 | return -1; // table is completely full? Does this happen? | 1282 | 34 | } |
_ZN7gc_heap4DictIPNS_3StrES2_E15position_of_keyES2_ Line | Count | Source | 1265 | 2 | int position_of_key(K key) { | 1266 | 2 | auto self = this; | 1267 | 2 | StackRoots _roots({&self}); | 1268 | | | 1269 | 2 | for (int i = 0; i < self->capacity_; ++i) { | 1270 | 1 | int special = self->entry_->items_[i]; // NOT an index now | 1271 | 1 | if (special == kDeletedEntry) { | 1272 | 0 | continue; // keep searching | 1273 | 0 | } | 1274 | 1 | if (special == kEmptyEntry) { | 1275 | 0 | return -1; // not found | 1276 | 0 | } | 1277 | 1 | if (keys_equal(self->keys_->items_[i], key)) { | 1278 | 1 | return i; | 1279 | 1 | } | 1280 | 1 | } | 1281 | 1 | return -1; // table is completely full? Does this happen? | 1282 | 2 | } |
_ZN7gc_heap4DictIPNS_3StrEiE15position_of_keyES2_ Line | Count | Source | 1265 | 53 | int position_of_key(K key) { | 1266 | 53 | auto self = this; | 1267 | 53 | StackRoots _roots({&self}); | 1268 | | | 1269 | 842 | for (int i = 0; i < self->capacity_; ++i) { | 1270 | 833 | int special = self->entry_->items_[i]; // NOT an index now | 1271 | 833 | if (special == kDeletedEntry) { | 1272 | 0 | continue; // keep searching | 1273 | 0 | } | 1274 | 833 | if (special == kEmptyEntry) { | 1275 | 40 | return -1; // not found | 1276 | 40 | } | 1277 | 793 | if (keys_equal(self->keys_->items_[i], key)) { | 1278 | 4 | return i; | 1279 | 4 | } | 1280 | 793 | } | 1281 | 9 | return -1; // table is completely full? Does this happen? | 1282 | 53 | } |
_ZN7gc_heap4DictIiPNS_3StrEE15position_of_keyEi Line | Count | Source | 1265 | 1 | int position_of_key(K key) { | 1266 | 1 | auto self = this; | 1267 | 1 | StackRoots _roots({&self}); | 1268 | | | 1269 | 1 | for (int i = 0; i < self->capacity_; ++i) { | 1270 | 0 | int special = self->entry_->items_[i]; // NOT an index now | 1271 | 0 | if (special == kDeletedEntry) { | 1272 | 0 | continue; // keep searching | 1273 | 0 | } | 1274 | 0 | if (special == kEmptyEntry) { | 1275 | 0 | return -1; // not found | 1276 | 0 | } | 1277 | 0 | if (keys_equal(self->keys_->items_[i], key)) { | 1278 | 0 | return i; | 1279 | 0 | } | 1280 | 0 | } | 1281 | 1 | return -1; // table is completely full? Does this happen? | 1282 | 1 | } |
|
1283 | | |
1284 | | // int index_size_; // size of index (sparse) |
1285 | | int len_; // number of entries (keys and values, almost dense) |
1286 | | int capacity_; // number of entries before resizing |
1287 | | |
1288 | | // These 3 slabs are resized at the same time. |
1289 | | Slab<int>* entry_; // NOW: kEmptyEntry, kDeletedEntry, or 0. |
1290 | | // LATER: indices which are themselves indexed by // hash |
1291 | | // value % capacity_ |
1292 | | Slab<K>* keys_; // Dict<int, V> |
1293 | | Slab<V>* values_; // Dict<K, int> |
1294 | | |
1295 | | DISALLOW_COPY_AND_ASSIGN(Dict) |
1296 | | }; |
1297 | | |
1298 | | // "Constructors" that allocate |
1299 | | |
1300 | | template <typename K, typename V> |
1301 | 5 | Dict<K, V>* NewDict() { |
1302 | 5 | auto self = Alloc<Dict<K, V>>(); |
1303 | 5 | return self; |
1304 | 5 | } _ZN7gc_heap7NewDictIiiEEPNS_4DictIT_T0_EEv Line | Count | Source | 1301 | 1 | Dict<K, V>* NewDict() { | 1302 | 1 | auto self = Alloc<Dict<K, V>>(); | 1303 | 1 | return self; | 1304 | 1 | } |
_ZN7gc_heap7NewDictIPNS_3StrES2_EEPNS_4DictIT_T0_EEv Line | Count | Source | 1301 | 1 | Dict<K, V>* NewDict() { | 1302 | 1 | auto self = Alloc<Dict<K, V>>(); | 1303 | 1 | return self; | 1304 | 1 | } |
_ZN7gc_heap7NewDictIPNS_3StrEiEEPNS_4DictIT_T0_EEv Line | Count | Source | 1301 | 2 | Dict<K, V>* NewDict() { | 1302 | 2 | auto self = Alloc<Dict<K, V>>(); | 1303 | 2 | return self; | 1304 | 2 | } |
_ZN7gc_heap7NewDictIiPNS_3StrEEEPNS_4DictIT_T0_EEv Line | Count | Source | 1301 | 1 | Dict<K, V>* NewDict() { | 1302 | 1 | auto self = Alloc<Dict<K, V>>(); | 1303 | 1 | return self; | 1304 | 1 | } |
|
1305 | | |
1306 | | template <typename K, typename V> |
1307 | | Dict<K, V>* NewDict(std::initializer_list<K> keys, |
1308 | 0 | std::initializer_list<V> values) { |
1309 | 0 | assert(keys.size() == values.size()); |
1310 | 0 | auto self = Alloc<Dict<K, V>>(); |
1311 | 0 | StackRoots _roots({&self}); |
1312 | |
|
1313 | 0 | auto v = values.begin(); // This simulates a "zip" loop |
1314 | 0 | for (auto key : keys) { |
1315 | 0 | self->set(key, *v); |
1316 | 0 | ++v; |
1317 | 0 | } |
1318 | |
|
1319 | 0 | return self; |
1320 | 0 | } |
1321 | | |
1322 | | // Four overloads for dict_set()! TODO: Is there a nicer way to do this? |
1323 | | |
1324 | | // e.g. Dict<int, int> |
1325 | | template <typename K, typename V> |
1326 | 16 | void dict_set(Dict<K, V>* self, K key, V val) { |
1327 | 16 | StackRoots _roots({&self}); |
1328 | | |
1329 | 16 | self->reserve(self->len_ + 1); |
1330 | 16 | self->keys_->items_[self->len_] = key; |
1331 | 16 | self->values_->items_[self->len_] = val; |
1332 | | |
1333 | 16 | self->entry_->items_[self->len_] = 0; // new special value |
1334 | | |
1335 | 16 | ++self->len_; |
1336 | 16 | } |
1337 | | |
1338 | | // e.g. Dict<Str*, int> |
1339 | | template <typename K, typename V> |
1340 | 49 | void dict_set(Dict<K*, V>* self, K* key, V val) { |
1341 | 49 | StackRoots _roots({&self, &key}); |
1342 | | |
1343 | 49 | self->reserve(self->len_ + 1); |
1344 | 49 | self->keys_->items_[self->len_] = key; |
1345 | 49 | self->values_->items_[self->len_] = val; |
1346 | | |
1347 | 49 | self->entry_->items_[self->len_] = 0; // new special value |
1348 | | |
1349 | 49 | ++self->len_; |
1350 | 49 | } |
1351 | | |
1352 | | // e.g. Dict<int, Str*> |
1353 | | template <typename K, typename V> |
1354 | 1 | void dict_set(Dict<K, V*>* self, K key, V* val) { |
1355 | 1 | StackRoots _roots({&self, &val}); |
1356 | | |
1357 | 1 | self->reserve(self->len_ + 1); |
1358 | 1 | self->keys_->items_[self->len_] = key; |
1359 | 1 | self->values_->items_[self->len_] = val; |
1360 | | |
1361 | 1 | self->entry_->items_[self->len_] = 0; // new special value |
1362 | | |
1363 | 1 | ++self->len_; |
1364 | 1 | } |
1365 | | |
1366 | | // e.g. Dict<Str*, Str*> |
1367 | | template <typename K, typename V> |
1368 | 1 | void dict_set(Dict<K*, V*>* self, K* key, V* val) { |
1369 | 1 | StackRoots _roots({&self, &key, &val}); |
1370 | | |
1371 | 1 | self->reserve(self->len_ + 1); |
1372 | 1 | self->keys_->items_[self->len_] = key; |
1373 | 1 | self->values_->items_[self->len_] = val; |
1374 | | |
1375 | 1 | self->entry_->items_[self->len_] = 0; // new special value |
1376 | | |
1377 | 1 | ++self->len_; |
1378 | 1 | } |
1379 | | |
1380 | | template <typename K, typename V> |
1381 | 69 | void Dict<K, V>::set(K key, V val) { |
1382 | 69 | auto self = this; |
1383 | 69 | StackRoots _roots({&self}); // May not need this here? |
1384 | | |
1385 | 69 | int pos = self->position_of_key(key); |
1386 | 69 | if (pos == -1) { // new pair |
1387 | 67 | dict_set(self, key, val); // ALLOCATES |
1388 | 67 | } else { |
1389 | 2 | self->values_->items_[pos] = val; |
1390 | 2 | } |
1391 | 69 | } _ZN7gc_heap4DictIiiE3setEii Line | Count | Source | 1381 | 17 | void Dict<K, V>::set(K key, V val) { | 1382 | 17 | auto self = this; | 1383 | 17 | StackRoots _roots({&self}); // May not need this here? | 1384 | | | 1385 | 17 | int pos = self->position_of_key(key); | 1386 | 17 | if (pos == -1) { // new pair | 1387 | 16 | dict_set(self, key, val); // ALLOCATES | 1388 | 16 | } else { | 1389 | 1 | self->values_->items_[pos] = val; | 1390 | 1 | } | 1391 | 17 | } |
_ZN7gc_heap4DictIPNS_3StrES2_E3setES2_S2_ Line | Count | Source | 1381 | 1 | void Dict<K, V>::set(K key, V val) { | 1382 | 1 | auto self = this; | 1383 | 1 | StackRoots _roots({&self}); // May not need this here? | 1384 | | | 1385 | 1 | int pos = self->position_of_key(key); | 1386 | 1 | if (pos == -1) { // new pair | 1387 | 1 | dict_set(self, key, val); // ALLOCATES | 1388 | 1 | } else { | 1389 | 0 | self->values_->items_[pos] = val; | 1390 | 0 | } | 1391 | 1 | } |
_ZN7gc_heap4DictIPNS_3StrEiE3setES2_i Line | Count | Source | 1381 | 50 | void Dict<K, V>::set(K key, V val) { | 1382 | 50 | auto self = this; | 1383 | 50 | StackRoots _roots({&self}); // May not need this here? | 1384 | | | 1385 | 50 | int pos = self->position_of_key(key); | 1386 | 50 | if (pos == -1) { // new pair | 1387 | 49 | dict_set(self, key, val); // ALLOCATES | 1388 | 49 | } else { | 1389 | 1 | self->values_->items_[pos] = val; | 1390 | 1 | } | 1391 | 50 | } |
_ZN7gc_heap4DictIiPNS_3StrEE3setEiS2_ Line | Count | Source | 1381 | 1 | void Dict<K, V>::set(K key, V val) { | 1382 | 1 | auto self = this; | 1383 | 1 | StackRoots _roots({&self}); // May not need this here? | 1384 | | | 1385 | 1 | int pos = self->position_of_key(key); | 1386 | 1 | if (pos == -1) { // new pair | 1387 | 1 | dict_set(self, key, val); // ALLOCATES | 1388 | 1 | } else { | 1389 | 0 | self->values_->items_[pos] = val; | 1390 | 0 | } | 1391 | 1 | } |
|
1392 | | |
1393 | | #if GC_DEBUG |
1394 | | void ShowFixedChildren(Obj* obj); |
1395 | | #endif |
1396 | | |
1397 | | #endif // MYLIB_LEAKY |
1398 | | |
1399 | | } // namespace gc_heap |
1400 | | |
1401 | | // |
1402 | | // Functions |
1403 | | // |
1404 | | |
1405 | | #ifndef MYLIB_LEAKY |
1406 | | |
1407 | | // Do some extra calculation to avoid storing redundant lengths. |
1408 | 6.34k | inline int len(const gc_heap::Str* s) { |
1409 | 6.34k | return s->obj_len_ - gc_heap::kStrHeaderSize - 1; |
1410 | 6.34k | } |
1411 | | |
1412 | | template <typename T> |
1413 | 1.37k | int len(const gc_heap::List<T>* L) { |
1414 | 1.37k | return L->len_; |
1415 | 1.37k | } _Z3lenIiEiPKN7gc_heap4ListIT_EE Line | Count | Source | 1413 | 1.31k | int len(const gc_heap::List<T>* L) { | 1414 | 1.31k | return L->len_; | 1415 | 1.31k | } |
_Z3lenIPN7gc_heap3StrEEiPKNS0_4ListIT_EE Line | Count | Source | 1413 | 60 | int len(const gc_heap::List<T>* L) { | 1414 | 60 | return L->len_; | 1415 | 60 | } |
Unexecuted instantiation: _Z3lenIbEiPKN7gc_heap4ListIT_EE Unexecuted instantiation: _Z3lenIdEiPKN7gc_heap4ListIT_EE _Z3lenIP6Tuple2IiPN7gc_heap3StrEEEiPKNS1_4ListIT_EE Line | Count | Source | 1413 | 1 | int len(const gc_heap::List<T>* L) { | 1414 | 1 | return L->len_; | 1415 | 1 | } |
|
1416 | | |
1417 | | template <typename K, typename V> |
1418 | 7 | inline int len(const gc_heap::Dict<K, V>* d) { |
1419 | 7 | return d->len_; |
1420 | 7 | } _Z3lenIiiEiPKN7gc_heap4DictIT_T0_EE Line | Count | Source | 1418 | 4 | inline int len(const gc_heap::Dict<K, V>* d) { | 1419 | 4 | return d->len_; | 1420 | 4 | } |
_Z3lenIPN7gc_heap3StrES2_EiPKNS0_4DictIT_T0_EE Line | Count | Source | 1418 | 2 | inline int len(const gc_heap::Dict<K, V>* d) { | 1419 | 2 | return d->len_; | 1420 | 2 | } |
_Z3lenIPN7gc_heap3StrEiEiPKNS0_4DictIT_T0_EE Line | Count | Source | 1418 | 1 | inline int len(const gc_heap::Dict<K, V>* d) { | 1419 | 1 | return d->len_; | 1420 | 1 | } |
Unexecuted instantiation: _Z3lenIiPN7gc_heap3StrEEiPKNS0_4DictIT_T0_EE |
1421 | | |
1422 | | #endif // MYLIB_LEAKY |
1423 | | |
1424 | | #endif // GC_HEAP_H |