mycpp

Coverage Report

Created: 2024-06-09 06:28

/home/uke/oil/mycpp/mark_sweep_heap.cc
Line
Count
Source (jump to first uncovered line)
1
#include "mycpp/mark_sweep_heap.h"
2
3
#include <inttypes.h>  // PRId64
4
#include <stdlib.h>    // getenv()
5
#include <string.h>    // strlen()
6
#include <sys/time.h>  // gettimeofday()
7
#include <time.h>      // clock_gettime(), CLOCK_PROCESS_CPUTIME_ID
8
#include <unistd.h>    // STDERR_FILENO
9
10
#include "_build/detected-cpp-config.h"  // for GC_TIMING
11
#include "mycpp/gc_builtins.h"           // StringToInt()
12
#include "mycpp/gc_slab.h"
13
14
// TODO: Remove this guard when we have separate binaries
15
#if MARK_SWEEP
16
17
17
void MarkSweepHeap::Init() {
18
17
  Init(1000);  // collect at 1000 objects in tests
19
17
}
20
21
18
void MarkSweepHeap::Init(int gc_threshold) {
22
18
  gc_threshold_ = gc_threshold;
23
24
18
  char* e;
25
18
  e = getenv("OILS_GC_THRESHOLD");
26
18
  if (e) {
27
0
    int result;
28
0
    if (StringToInt(e, strlen(e), 10, &result)) {
29
      // Override collection threshold
30
0
      gc_threshold_ = result;
31
0
    }
32
0
  }
33
34
  // only for developers
35
18
  e = getenv("_OILS_GC_VERBOSE");
36
18
  if (e && strcmp(e, "1") == 0) {
37
0
    gc_verbose_ = true;
38
0
  }
39
40
18
  live_objs_.reserve(KiB(10));
41
18
  roots_.reserve(KiB(1));  // prevent resizing in common case
42
18
}
43
44
2
int MarkSweepHeap::MaybeCollect() {
45
  // Maybe collect BEFORE allocation, because the new object won't be rooted
46
  #if GC_ALWAYS
47
  int result = Collect();
48
  #else
49
2
  int result = -1;
50
2
  if (num_live() > gc_threshold_) {
51
0
    result = Collect();
52
0
  }
53
2
  #endif
54
55
2
  num_gc_points_++;  // this is a manual collection point
56
2
  return result;
57
2
}
58
59
  #if defined(BUMP_SMALL)
60
    #include "mycpp/bump_leak_heap.h"
61
62
BumpLeakHeap gBumpLeak;
63
  #endif
64
65
// Allocate and update stats
66
// TODO: Make this interface nicer.
67
2.91k
void* MarkSweepHeap::Allocate(size_t num_bytes, int* obj_id, int* pool_id) {
68
  // log("Allocate %d", num_bytes);
69
2.91k
  #ifndef NO_POOL_ALLOC
70
2.91k
  if (num_bytes <= pool1_.kMaxObjSize) {
71
1.66k
    *pool_id = 1;
72
1.66k
    return pool1_.Allocate(obj_id);
73
1.66k
  }
74
1.25k
  if (num_bytes <= pool2_.kMaxObjSize) {
75
714
    *pool_id = 2;
76
714
    return pool2_.Allocate(obj_id);
77
714
  }
78
540
  *pool_id = 0;  // malloc(), not a pool
79
540
  #endif
80
81
  // Does the pool allocator approximate a bump allocator?  Use pool2_
82
  // threshold of 48 bytes.
83
  // These only work with GC off -- OILS_GC_THRESHOLD=[big]
84
  #ifdef BUMP_SMALL
85
  if (num_bytes <= 48) {
86
    return gBumpLeak.Allocate(num_bytes);
87
  }
88
  #endif
89
90
540
  if (to_free_.empty()) {
91
    // Use higher object IDs
92
540
    *obj_id = greatest_obj_id_;
93
540
    greatest_obj_id_++;
94
95
    // This check is ON in release mode
96
540
    CHECK(greatest_obj_id_ <= kMaxObjId);
97
540
  } else {
98
0
    ObjHeader* dead = to_free_.back();
99
0
    to_free_.pop_back();
100
101
0
    *obj_id = dead->obj_id;  // reuse the dead object's ID
102
103
0
    free(dead);
104
0
  }
105
106
0
  void* result = malloc(num_bytes);
107
540
  DCHECK(result != nullptr);
108
109
0
  live_objs_.push_back(static_cast<ObjHeader*>(result));
110
111
540
  num_live_++;
112
540
  num_allocated_++;
113
540
  bytes_allocated_ += num_bytes;
114
115
540
  return result;
116
1.25k
}
117
118
  #if 0
119
void* MarkSweepHeap::Reallocate(void* p, size_t num_bytes) {
120
  FAIL(kNotImplemented);
121
  // This causes a double-free in the GC!
122
  // return realloc(p, num_bytes);
123
}
124
  #endif
125
126
// "Leaf" for marking / TraceChildren
127
//
128
// - Abort if nullptr
129
// - Find the header (get rid of this when remove ObjHeader member)
130
// - Tag::{Opaque,FixedSized,Scanned} have their mark bits set
131
// - Tag::{FixedSize,Scanned} are also pushed on the gray stack
132
133
65
void MarkSweepHeap::MaybeMarkAndPush(RawObject* obj) {
134
65
  ObjHeader* header = ObjHeader::FromObject(obj);
135
65
  if (header->heap_tag == HeapTag::Global) {  // don't mark or push
136
4
    return;
137
4
  }
138
139
61
  int obj_id = header->obj_id;
140
61
  #ifndef NO_POOL_ALLOC
141
61
  if (header->pool_id == 1) {
142
56
    if (pool1_.IsMarked(obj_id)) {
143
14
      return;
144
14
    }
145
42
    pool1_.Mark(obj_id);
146
42
  } else if (header->pool_id == 2) {
147
5
    if (pool2_.IsMarked(obj_id)) {
148
0
      return;
149
0
    }
150
5
    pool2_.Mark(obj_id);
151
5
  } else
152
0
  #endif
153
0
  {
154
0
    if (mark_set_.IsMarked(obj_id)) {
155
0
      return;
156
0
    }
157
0
    mark_set_.Mark(obj_id);
158
0
  }
159
160
47
  switch (header->heap_tag) {
161
16
  case HeapTag::Opaque:  // e.g. strings have no children
162
16
    break;
163
164
4
  case HeapTag::Scanned:  // these 2 types have children
165
31
  case HeapTag::FixedSize:
166
31
    gray_stack_.push_back(header);  // Push the header, not the object!
167
31
    break;
168
169
0
  default:
170
0
    FAIL(kShouldNotGetHere);
171
47
  }
172
47
}
173
174
38
void MarkSweepHeap::TraceChildren() {
175
69
  while (!gray_stack_.empty()) {
176
31
    ObjHeader* header = gray_stack_.back();
177
31
    gray_stack_.pop_back();
178
179
31
    switch (header->heap_tag) {
180
27
    case HeapTag::FixedSize: {
181
27
      auto fixed = reinterpret_cast<LayoutFixed*>(header->ObjectAddress());
182
27
      int mask = FIELD_MASK(*header);
183
184
675
      for (int i = 0; i < kFieldMaskBits; ++i) {
185
648
        if (mask & (1 << i)) {
186
21
          RawObject* child = fixed->children_[i];
187
21
          if (child) {
188
18
            MaybeMarkAndPush(child);
189
18
          }
190
21
        }
191
648
      }
192
27
      break;
193
0
    }
194
195
4
    case HeapTag::Scanned: {
196
4
      auto slab = reinterpret_cast<Slab<RawObject*>*>(header->ObjectAddress());
197
198
4
      int n = NUM_POINTERS(*header);
199
24
      for (int i = 0; i < n; ++i) {
200
20
        RawObject* child = slab->items_[i];
201
20
        if (child) {
202
5
          MaybeMarkAndPush(child);
203
5
        }
204
20
      }
205
4
      break;
206
0
    }
207
0
    default:
208
      // Only FixedSize and Scanned are pushed
209
0
      FAIL(kShouldNotGetHere);
210
31
    }
211
31
  }
212
38
}
213
214
38
void MarkSweepHeap::Sweep() {
215
38
  #ifndef NO_POOL_ALLOC
216
38
  pool1_.Sweep();
217
38
  pool2_.Sweep();
218
38
  #endif
219
220
38
  int last_live_index = 0;
221
38
  int num_objs = live_objs_.size();
222
578
  for (int i = 0; i < num_objs; ++i) {
223
540
    ObjHeader* obj = live_objs_[i];
224
540
    DCHECK(obj);  // malloc() shouldn't have returned nullptr
225
226
0
    bool is_live = mark_set_.IsMarked(obj->obj_id);
227
228
    // Compact live_objs_ and populate to_free_.  Note: doing the reverse could
229
    // be more efficient when many objects are dead.
230
540
    if (is_live) {
231
0
      live_objs_[last_live_index++] = obj;
232
540
    } else {
233
540
      to_free_.push_back(obj);
234
      // free(obj);
235
540
      num_live_--;
236
540
    }
237
540
  }
238
38
  live_objs_.resize(last_live_index);  // remove dangling objects
239
240
38
  num_collections_++;
241
38
  max_survived_ = std::max(max_survived_, num_live());
242
38
}
243
244
38
int MarkSweepHeap::Collect() {
245
38
  #ifdef GC_TIMING
246
38
  struct timespec start, end;
247
38
  if (clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &start) < 0) {
248
0
    FAIL("clock_gettime failed");
249
0
  }
250
0
  #endif
251
252
0
  int num_roots = roots_.size();
253
38
  int num_globals = global_roots_.size();
254
255
38
  if (gc_verbose_) {
256
0
    log("");
257
0
    log("%2d. GC with %d roots (%d global) and %d live objects",
258
0
        num_collections_, num_roots + num_globals, num_globals, num_live());
259
0
  }
260
261
  // Resize it
262
38
  mark_set_.ReInit(greatest_obj_id_);
263
38
  #ifndef NO_POOL_ALLOC
264
38
  pool1_.PrepareForGc();
265
38
  pool2_.PrepareForGc();
266
38
  #endif
267
268
  // Mark roots.
269
  // Note: It might be nice to get rid of double pointers
270
105
  for (int i = 0; i < num_roots; ++i) {
271
67
    RawObject* root = *(roots_[i]);
272
67
    if (root) {
273
42
      MaybeMarkAndPush(root);
274
42
    }
275
67
  }
276
277
38
  for (int i = 0; i < num_globals; ++i) {
278
0
    RawObject* root = global_roots_[i];
279
0
    if (root) {
280
0
      MaybeMarkAndPush(root);
281
0
    }
282
0
  }
283
284
  // Traverse object graph.
285
38
  TraceChildren();
286
287
38
  Sweep();
288
289
38
  if (gc_verbose_) {
290
0
    log("    %d live after sweep", num_live());
291
0
  }
292
293
  // We know how many are live.  If the number of objects is close to the
294
  // threshold (above 75%), then set the threshold to 2 times the number of
295
  // live objects.  This is an ad hoc policy that removes observed "thrashing"
296
  // -- being at 99% of the threshold and doing FUTILE mark and sweep.
297
298
38
  int water_mark = (gc_threshold_ * 3) / 4;
299
38
  if (num_live() > water_mark) {
300
0
    gc_threshold_ = num_live() * 2;
301
0
    num_growths_++;
302
0
    if (gc_verbose_) {
303
0
      log("    exceeded %d live objects; gc_threshold set to %d", water_mark,
304
0
          gc_threshold_);
305
0
    }
306
0
  }
307
308
38
  #ifdef GC_TIMING
309
38
  if (clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &end) < 0) {
310
0
    FAIL("clock_gettime failed");
311
0
  }
312
313
0
  double start_secs = start.tv_sec + start.tv_nsec / 1e9;
314
38
  double end_secs = end.tv_sec + end.tv_nsec / 1e9;
315
38
  double gc_millis = (end_secs - start_secs) * 1000.0;
316
317
38
  if (gc_verbose_) {
318
0
    log("    %.1f ms GC", gc_millis);
319
0
  }
320
321
38
  total_gc_millis_ += gc_millis;
322
38
  if (gc_millis > max_gc_millis_) {
323
12
    max_gc_millis_ = gc_millis;
324
12
  }
325
38
  #endif
326
327
38
  return num_live();  // for unit tests only
328
38
}
329
330
3
void MarkSweepHeap::PrintStats(int fd) {
331
3
  dprintf(fd, "  num live         = %10d\n", num_live());
332
  // max survived_ can be less than num_live(), because leave off the last GC
333
3
  dprintf(fd, "  max survived     = %10d\n", max_survived_);
334
3
  dprintf(fd, "\n");
335
336
3
  #ifndef NO_POOL_ALLOC
337
3
  dprintf(fd, "  num allocated    = %10d\n",
338
3
          num_allocated_ + pool1_.num_allocated() + pool2_.num_allocated());
339
3
  dprintf(fd, "  num in heap      = %10d\n", num_allocated_);
340
  #else
341
  dprintf(fd, "  num allocated    = %10d\n", num_allocated_);
342
  #endif
343
344
3
  #ifndef NO_POOL_ALLOC
345
3
  dprintf(fd, "  num in pool 1    = %10d\n", pool1_.num_allocated());
346
3
  dprintf(fd, "  num in pool 2    = %10d\n", pool2_.num_allocated());
347
3
  dprintf(
348
3
      fd, "bytes allocated    = %10" PRId64 "\n",
349
3
      bytes_allocated_ + pool1_.bytes_allocated() + pool2_.bytes_allocated());
350
  #else
351
  dprintf(fd, "bytes allocated    = %10" PRId64 "\n", bytes_allocated_);
352
  #endif
353
354
3
  dprintf(fd, "\n");
355
3
  dprintf(fd, "  num gc points    = %10d\n", num_gc_points_);
356
3
  dprintf(fd, "  num collections  = %10d\n", num_collections_);
357
3
  dprintf(fd, "\n");
358
3
  dprintf(fd, "   gc threshold    = %10d\n", gc_threshold_);
359
3
  dprintf(fd, "  num growths      = %10d\n", num_growths_);
360
3
  dprintf(fd, "\n");
361
3
  dprintf(fd, "  max gc millis    = %10.1f\n", max_gc_millis_);
362
3
  dprintf(fd, "total gc millis    = %10.1f\n", total_gc_millis_);
363
3
  dprintf(fd, "\n");
364
3
  dprintf(fd, "roots capacity     = %10d\n",
365
3
          static_cast<int>(roots_.capacity()));
366
3
  dprintf(fd, " objs capacity     = %10d\n",
367
3
          static_cast<int>(live_objs_.capacity()));
368
3
}
369
370
// Cleanup at the end of main() to remain ASAN-safe
371
11
void MarkSweepHeap::MaybePrintStats() {
372
11
  int stats_fd = -1;
373
11
  char* e = getenv("OILS_GC_STATS");
374
11
  if (e && strlen(e)) {  // env var set and non-empty
375
0
    stats_fd = STDERR_FILENO;
376
11
  } else {
377
    // A raw file descriptor lets benchmarks extract stats even if the script
378
    // writes to stdout and stderr.  Shells can't use open() without potential
379
    // conflicts.
380
381
11
    e = getenv("OILS_GC_STATS_FD");
382
11
    if (e && strlen(e)) {
383
      // Try setting 'stats_fd'.  If there's an error, it will be unchanged, and
384
      // we don't PrintStats();
385
0
      StringToInt(e, strlen(e), 10, &stats_fd);
386
0
    }
387
11
  }
388
389
11
  if (stats_fd != -1) {
390
0
    PrintStats(stats_fd);
391
0
  }
392
11
}
393
394
10
void MarkSweepHeap::FreeEverything() {
395
10
  roots_.clear();
396
10
  global_roots_.clear();
397
398
10
  Collect();
399
400
  // Collect() told us what to free()
401
540
  for (auto obj : to_free_) {
402
540
    free(obj);
403
540
  }
404
10
  #ifndef NO_POOL_ALLOC
405
10
  pool1_.Free();
406
10
  pool2_.Free();
407
10
  #endif
408
10
}
409
410
10
void MarkSweepHeap::CleanProcessExit() {
411
10
  char* e = getenv("OILS_GC_ON_EXIT");
412
  // collect by default; OILS_GC_ON_EXIT=0 overrides
413
10
  if (e && strcmp(e, "0") == 0) {
414
0
    ;
415
10
  } else {
416
10
    FreeEverything();
417
10
  }
418
10
  MaybePrintStats();
419
10
}
420
421
// for the main binary
422
1
void MarkSweepHeap::ProcessExit() {
423
  #ifdef CLEAN_PROCESS_EXIT
424
  FreeEverything();
425
  #else
426
1
  char* e = getenv("OILS_GC_ON_EXIT");
427
  // don't collect by default; OILS_GC_ON_EXIT=1 overrides
428
1
  if (e && strcmp(e, "1") == 0) {
429
0
    FreeEverything();
430
0
  }
431
1
  #endif
432
433
1
  MaybePrintStats();
434
1
}
435
436
MarkSweepHeap gHeap;
437
438
#endif  // MARK_SWEEP