cpp

Coverage Report

Created: 2024-07-28 06:14

/home/uke/oil/mycpp/mark_sweep_heap.cc
Line
Count
Source (jump to first uncovered line)
1
#include "mycpp/mark_sweep_heap.h"
2
3
#include <inttypes.h>  // PRId64
4
#include <stdlib.h>    // getenv()
5
#include <string.h>    // strlen()
6
#include <sys/time.h>  // gettimeofday()
7
#include <time.h>      // clock_gettime(), CLOCK_PROCESS_CPUTIME_ID
8
#include <unistd.h>    // STDERR_FILENO
9
10
#include "_build/detected-cpp-config.h"  // for GC_TIMING
11
#include "mycpp/gc_builtins.h"           // StringToInt()
12
#include "mycpp/gc_slab.h"
13
14
// TODO: Remove this guard when we have separate binaries
15
#if MARK_SWEEP
16
17
9
void MarkSweepHeap::Init() {
18
9
  Init(1000);  // collect at 1000 objects in tests
19
9
}
20
21
9
void MarkSweepHeap::Init(int gc_threshold) {
22
9
  gc_threshold_ = gc_threshold;
23
24
9
  char* e;
25
9
  e = getenv("OILS_GC_THRESHOLD");
26
9
  if (e) {
27
0
    int result;
28
0
    if (StringToInt(e, strlen(e), 10, &result)) {
29
      // Override collection threshold
30
0
      gc_threshold_ = result;
31
0
    }
32
0
  }
33
34
  // only for developers
35
9
  e = getenv("_OILS_GC_VERBOSE");
36
9
  if (e && strcmp(e, "1") == 0) {
37
0
    gc_verbose_ = true;
38
0
  }
39
40
9
  live_objs_.reserve(KiB(10));
41
9
  roots_.reserve(KiB(1));  // prevent resizing in common case
42
9
}
43
44
0
int MarkSweepHeap::MaybeCollect() {
45
  // Maybe collect BEFORE allocation, because the new object won't be rooted
46
  #if GC_ALWAYS
47
  int result = Collect();
48
  #else
49
0
  int result = -1;
50
0
  if (num_live() > gc_threshold_) {
51
0
    result = Collect();
52
0
  }
53
0
  #endif
54
55
0
  num_gc_points_++;  // this is a manual collection point
56
0
  return result;
57
0
}
58
59
  #if defined(BUMP_SMALL)
60
    #include "mycpp/bump_leak_heap.h"
61
62
BumpLeakHeap gBumpLeak;
63
  #endif
64
65
// Allocate and update stats
66
// TODO: Make this interface nicer.
67
815
void* MarkSweepHeap::Allocate(size_t num_bytes, int* obj_id, int* pool_id) {
68
  // log("Allocate %d", num_bytes);
69
815
  #ifndef NO_POOL_ALLOC
70
815
  if (num_bytes <= pool1_.kMaxObjSize) {
71
443
    *pool_id = 1;
72
443
    return pool1_.Allocate(obj_id);
73
443
  }
74
372
  if (num_bytes <= pool2_.kMaxObjSize) {
75
253
    *pool_id = 2;
76
253
    return pool2_.Allocate(obj_id);
77
253
  }
78
119
  *pool_id = 0;  // malloc(), not a pool
79
119
  #endif
80
81
  // Does the pool allocator approximate a bump allocator?  Use pool2_
82
  // threshold of 48 bytes.
83
  // These only work with GC off -- OILS_GC_THRESHOLD=[big]
84
  #ifdef BUMP_SMALL
85
  if (num_bytes <= 48) {
86
    return gBumpLeak.Allocate(num_bytes);
87
  }
88
  #endif
89
90
119
  if (to_free_.empty()) {
91
    // Use higher object IDs
92
119
    *obj_id = greatest_obj_id_;
93
119
    greatest_obj_id_++;
94
95
    // This check is ON in release mode
96
119
    CHECK(greatest_obj_id_ <= kMaxObjId);
97
119
  } else {
98
0
    ObjHeader* dead = to_free_.back();
99
0
    to_free_.pop_back();
100
101
0
    *obj_id = dead->obj_id;  // reuse the dead object's ID
102
103
0
    free(dead);
104
0
  }
105
106
0
  void* result = malloc(num_bytes);
107
119
  DCHECK(result != nullptr);
108
109
0
  live_objs_.push_back(static_cast<ObjHeader*>(result));
110
111
119
  num_live_++;
112
119
  num_allocated_++;
113
119
  bytes_allocated_ += num_bytes;
114
115
119
  return result;
116
372
}
117
118
  #if 0
119
void* MarkSweepHeap::Reallocate(void* p, size_t num_bytes) {
120
  FAIL(kNotImplemented);
121
  // This causes a double-free in the GC!
122
  // return realloc(p, num_bytes);
123
}
124
  #endif
125
126
// "Leaf" for marking / TraceChildren
127
//
128
// - Abort if nullptr
129
// - Find the header (get rid of this when remove ObjHeader member)
130
// - Tag::{Opaque,FixedSized,Scanned} have their mark bits set
131
// - Tag::{FixedSize,Scanned} are also pushed on the gray stack
132
133
0
void MarkSweepHeap::MaybeMarkAndPush(RawObject* obj) {
134
0
  ObjHeader* header = ObjHeader::FromObject(obj);
135
0
  if (header->heap_tag == HeapTag::Global) {  // don't mark or push
136
0
    return;
137
0
  }
138
139
0
  int obj_id = header->obj_id;
140
0
  #ifndef NO_POOL_ALLOC
141
0
  if (header->pool_id == 1) {
142
0
    if (pool1_.IsMarked(obj_id)) {
143
0
      return;
144
0
    }
145
0
    pool1_.Mark(obj_id);
146
0
  } else if (header->pool_id == 2) {
147
0
    if (pool2_.IsMarked(obj_id)) {
148
0
      return;
149
0
    }
150
0
    pool2_.Mark(obj_id);
151
0
  } else
152
0
  #endif
153
0
  {
154
0
    if (mark_set_.IsMarked(obj_id)) {
155
0
      return;
156
0
    }
157
0
    mark_set_.Mark(obj_id);
158
0
  }
159
160
0
  switch (header->heap_tag) {
161
0
  case HeapTag::Opaque:  // e.g. strings have no children
162
0
    break;
163
164
0
  case HeapTag::Scanned:  // these 2 types have children
165
0
  case HeapTag::FixedSize:
166
0
    gray_stack_.push_back(header);  // Push the header, not the object!
167
0
    break;
168
169
0
  default:
170
0
    FAIL(kShouldNotGetHere);
171
0
  }
172
0
}
173
174
9
void MarkSweepHeap::TraceChildren() {
175
9
  while (!gray_stack_.empty()) {
176
0
    ObjHeader* header = gray_stack_.back();
177
0
    gray_stack_.pop_back();
178
179
0
    switch (header->heap_tag) {
180
0
    case HeapTag::FixedSize: {
181
0
      auto fixed = reinterpret_cast<LayoutFixed*>(header->ObjectAddress());
182
0
      int mask = FIELD_MASK(*header);
183
184
0
      for (int i = 0; i < kFieldMaskBits; ++i) {
185
0
        if (mask & (1 << i)) {
186
0
          RawObject* child = fixed->children_[i];
187
0
          if (child) {
188
0
            MaybeMarkAndPush(child);
189
0
          }
190
0
        }
191
0
      }
192
0
      break;
193
0
    }
194
195
0
    case HeapTag::Scanned: {
196
0
      auto slab = reinterpret_cast<Slab<RawObject*>*>(header->ObjectAddress());
197
198
0
      int n = NUM_POINTERS(*header);
199
0
      for (int i = 0; i < n; ++i) {
200
0
        RawObject* child = slab->items_[i];
201
0
        if (child) {
202
0
          MaybeMarkAndPush(child);
203
0
        }
204
0
      }
205
0
      break;
206
0
    }
207
0
    default:
208
      // Only FixedSize and Scanned are pushed
209
0
      FAIL(kShouldNotGetHere);
210
0
    }
211
0
  }
212
9
}
213
214
9
void MarkSweepHeap::Sweep() {
215
9
  #ifndef NO_POOL_ALLOC
216
9
  pool1_.Sweep();
217
9
  pool2_.Sweep();
218
9
  #endif
219
220
9
  int last_live_index = 0;
221
9
  int num_objs = live_objs_.size();
222
128
  for (int i = 0; i < num_objs; ++i) {
223
119
    ObjHeader* obj = live_objs_[i];
224
119
    DCHECK(obj);  // malloc() shouldn't have returned nullptr
225
226
0
    bool is_live = mark_set_.IsMarked(obj->obj_id);
227
228
    // Compact live_objs_ and populate to_free_.  Note: doing the reverse could
229
    // be more efficient when many objects are dead.
230
119
    if (is_live) {
231
0
      live_objs_[last_live_index++] = obj;
232
119
    } else {
233
119
      to_free_.push_back(obj);
234
      // free(obj);
235
119
      num_live_--;
236
119
    }
237
119
  }
238
9
  live_objs_.resize(last_live_index);  // remove dangling objects
239
240
9
  num_collections_++;
241
9
  max_survived_ = std::max(max_survived_, num_live());
242
9
}
243
244
9
int MarkSweepHeap::Collect() {
245
9
  #ifdef GC_TIMING
246
9
  struct timespec start, end;
247
9
  if (clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &start) < 0) {
248
0
    FAIL("clock_gettime failed");
249
0
  }
250
0
  #endif
251
252
0
  int num_roots = roots_.size();
253
9
  int num_globals = global_roots_.size();
254
255
9
  if (gc_verbose_) {
256
0
    log("");
257
0
    log("%2d. GC with %d roots (%d global) and %d live objects",
258
0
        num_collections_, num_roots + num_globals, num_globals, num_live());
259
0
  }
260
261
  // Resize it
262
9
  mark_set_.ReInit(greatest_obj_id_);
263
9
  #ifndef NO_POOL_ALLOC
264
9
  pool1_.PrepareForGc();
265
9
  pool2_.PrepareForGc();
266
9
  #endif
267
268
  // Mark roots.
269
  // Note: It might be nice to get rid of double pointers
270
9
  for (int i = 0; i < num_roots; ++i) {
271
0
    RawObject* root = *(roots_[i]);
272
0
    if (root) {
273
0
      MaybeMarkAndPush(root);
274
0
    }
275
0
  }
276
277
9
  for (int i = 0; i < num_globals; ++i) {
278
0
    RawObject* root = global_roots_[i];
279
0
    if (root) {
280
0
      MaybeMarkAndPush(root);
281
0
    }
282
0
  }
283
284
  // Traverse object graph.
285
9
  TraceChildren();
286
287
9
  Sweep();
288
289
9
  if (gc_verbose_) {
290
0
    log("    %d live after sweep", num_live());
291
0
  }
292
293
  // We know how many are live.  If the number of objects is close to the
294
  // threshold (above 75%), then set the threshold to 2 times the number of
295
  // live objects.  This is an ad hoc policy that removes observed "thrashing"
296
  // -- being at 99% of the threshold and doing FUTILE mark and sweep.
297
298
9
  int water_mark = (gc_threshold_ * 3) / 4;
299
9
  if (num_live() > water_mark) {
300
0
    gc_threshold_ = num_live() * 2;
301
0
    num_growths_++;
302
0
    if (gc_verbose_) {
303
0
      log("    exceeded %d live objects; gc_threshold set to %d", water_mark,
304
0
          gc_threshold_);
305
0
    }
306
0
  }
307
308
9
  #ifdef GC_TIMING
309
9
  if (clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &end) < 0) {
310
0
    FAIL("clock_gettime failed");
311
0
  }
312
313
0
  double start_secs = start.tv_sec + start.tv_nsec / 1e9;
314
9
  double end_secs = end.tv_sec + end.tv_nsec / 1e9;
315
9
  double gc_millis = (end_secs - start_secs) * 1000.0;
316
317
9
  if (gc_verbose_) {
318
0
    log("    %.1f ms GC", gc_millis);
319
0
  }
320
321
9
  total_gc_millis_ += gc_millis;
322
9
  if (gc_millis > max_gc_millis_) {
323
9
    max_gc_millis_ = gc_millis;
324
9
  }
325
9
  #endif
326
327
9
  return num_live();  // for unit tests only
328
9
}
329
330
0
void MarkSweepHeap::PrintStats(int fd) {
331
0
  dprintf(fd, "  num live         = %10d\n", num_live());
332
  // max survived_ can be less than num_live(), because leave off the last GC
333
0
  dprintf(fd, "  max survived     = %10d\n", max_survived_);
334
0
  dprintf(fd, "\n");
335
336
0
  #ifndef NO_POOL_ALLOC
337
0
  dprintf(fd, "  num allocated    = %10d\n",
338
0
          num_allocated_ + pool1_.num_allocated() + pool2_.num_allocated());
339
0
  dprintf(fd, "  num in heap      = %10d\n", num_allocated_);
340
  #else
341
  dprintf(fd, "  num allocated    = %10d\n", num_allocated_);
342
  #endif
343
344
0
  #ifndef NO_POOL_ALLOC
345
0
  dprintf(fd, "  num in pool 1    = %10d\n", pool1_.num_allocated());
346
0
  dprintf(fd, "  num in pool 2    = %10d\n", pool2_.num_allocated());
347
0
  dprintf(
348
0
      fd, "bytes allocated    = %10" PRId64 "\n",
349
0
      bytes_allocated_ + pool1_.bytes_allocated() + pool2_.bytes_allocated());
350
  #else
351
  dprintf(fd, "bytes allocated    = %10" PRId64 "\n", bytes_allocated_);
352
  #endif
353
354
0
  dprintf(fd, "\n");
355
0
  dprintf(fd, "  num gc points    = %10d\n", num_gc_points_);
356
0
  dprintf(fd, "  num collections  = %10d\n", num_collections_);
357
0
  dprintf(fd, "\n");
358
0
  dprintf(fd, "   gc threshold    = %10d\n", gc_threshold_);
359
0
  dprintf(fd, "  num growths      = %10d\n", num_growths_);
360
0
  dprintf(fd, "\n");
361
0
  dprintf(fd, "  max gc millis    = %10.1f\n", max_gc_millis_);
362
0
  dprintf(fd, "total gc millis    = %10.1f\n", total_gc_millis_);
363
0
  dprintf(fd, "\n");
364
0
  dprintf(fd, "roots capacity     = %10d\n",
365
0
          static_cast<int>(roots_.capacity()));
366
0
  dprintf(fd, " objs capacity     = %10d\n",
367
0
          static_cast<int>(live_objs_.capacity()));
368
0
}
369
370
// Cleanup at the end of main() to remain ASAN-safe
371
9
void MarkSweepHeap::MaybePrintStats() {
372
9
  int stats_fd = -1;
373
9
  char* e = getenv("OILS_GC_STATS");
374
9
  if (e && strlen(e)) {  // env var set and non-empty
375
0
    stats_fd = STDERR_FILENO;
376
9
  } else {
377
    // A raw file descriptor lets benchmarks extract stats even if the script
378
    // writes to stdout and stderr.  Shells can't use open() without potential
379
    // conflicts.
380
381
9
    e = getenv("OILS_GC_STATS_FD");
382
9
    if (e && strlen(e)) {
383
      // Try setting 'stats_fd'.  If there's an error, it will be unchanged, and
384
      // we don't PrintStats();
385
0
      StringToInt(e, strlen(e), 10, &stats_fd);
386
0
    }
387
9
  }
388
389
9
  if (stats_fd != -1) {
390
0
    PrintStats(stats_fd);
391
0
  }
392
9
}
393
394
9
void MarkSweepHeap::FreeEverything() {
395
9
  roots_.clear();
396
9
  global_roots_.clear();
397
398
9
  Collect();
399
400
  // Collect() told us what to free()
401
119
  for (auto obj : to_free_) {
402
119
    free(obj);
403
119
  }
404
9
  #ifndef NO_POOL_ALLOC
405
9
  pool1_.Free();
406
9
  pool2_.Free();
407
9
  #endif
408
9
}
409
410
9
void MarkSweepHeap::CleanProcessExit() {
411
9
  char* e = getenv("OILS_GC_ON_EXIT");
412
  // collect by default; OILS_GC_ON_EXIT=0 overrides
413
9
  if (e && strcmp(e, "0") == 0) {
414
0
    ;
415
9
  } else {
416
9
    FreeEverything();
417
9
  }
418
9
  MaybePrintStats();
419
9
}
420
421
// for the main binary
422
0
void MarkSweepHeap::ProcessExit() {
423
  #ifdef CLEAN_PROCESS_EXIT
424
  FreeEverything();
425
  #else
426
0
  char* e = getenv("OILS_GC_ON_EXIT");
427
  // don't collect by default; OILS_GC_ON_EXIT=1 overrides
428
0
  if (e && strcmp(e, "1") == 0) {
429
0
    FreeEverything();
430
0
  }
431
0
  #endif
432
433
0
  MaybePrintStats();
434
0
}
435
436
MarkSweepHeap gHeap;
437
438
#endif  // MARK_SWEEP