OILS / core / executor.py View on Github | oilshell.org

685 lines, 367 significant
1"""executor.py."""
2from __future__ import print_function
3
4from errno import EINTR
5
6from _devbuild.gen.id_kind_asdl import Id
7from _devbuild.gen.option_asdl import builtin_i
8from _devbuild.gen.runtime_asdl import RedirValue, trace
9from _devbuild.gen.syntax_asdl import (
10 command,
11 command_e,
12 CommandSub,
13 CompoundWord,
14 loc,
15 loc_t,
16)
17from _devbuild.gen.value_asdl import value
18from builtin import hay_ysh
19from core import dev
20from core import error
21from core import process
22from core.error import e_die, e_die_status
23from core import pyos
24from core import state
25from core import ui
26from core import vm
27from frontend import consts
28from frontend import lexer
29from mycpp.mylib import log
30
31import posix_ as posix
32
33from typing import cast, Dict, List, Optional, TYPE_CHECKING
34if TYPE_CHECKING:
35 from _devbuild.gen.runtime_asdl import (cmd_value, CommandStatus,
36 StatusArray)
37 from _devbuild.gen.syntax_asdl import command_t
38 from builtin import trap_osh
39 from core import optview
40 from core import state
41 from core.vm import _Builtin
42
43_ = log
44
45
46class _ProcessSubFrame(object):
47 """To keep track of diff <(cat 1) <(cat 2) > >(tac)"""
48
49 def __init__(self):
50 # type: () -> None
51
52 # These objects appear unconditionally in the main loop, and aren't
53 # commonly used, so we manually optimize [] into None.
54
55 self._to_wait = [] # type: List[process.Process]
56 self._to_close = [] # type: List[int] # file descriptors
57 self._locs = [] # type: List[loc_t]
58 self._modified = False
59
60 def WasModified(self):
61 # type: () -> bool
62 return self._modified
63
64 def Append(self, p, fd, status_loc):
65 # type: (process.Process, int, loc_t) -> None
66 self._modified = True
67
68 self._to_wait.append(p)
69 self._to_close.append(fd)
70 self._locs.append(status_loc)
71
72 def MaybeWaitOnProcessSubs(self, waiter, status_array):
73 # type: (process.Waiter, StatusArray) -> None
74
75 # Wait in the same order that they were evaluated. That seems fine.
76 for fd in self._to_close:
77 posix.close(fd)
78
79 codes = [] # type: List[int]
80 locs = [] # type: List[loc_t]
81 for i, p in enumerate(self._to_wait):
82 #log('waiting for %s', p)
83 st = p.Wait(waiter)
84 codes.append(st)
85 locs.append(self._locs[i])
86
87 status_array.codes = codes
88 status_array.locs = locs
89
90
91# Big flgas for RunSimpleCommand
92DO_FORK = 1 << 1
93NO_CALL_PROCS = 1 << 2 # command ls suppresses function lookup
94USE_DEFAULT_PATH = 1 << 3 # for command -p ls changes the path
95
96# Copied from var.c in dash
97DEFAULT_PATH = [
98 '/usr/local/sbin', '/usr/local/bin', '/usr/sbin', '/usr/bin', '/sbin',
99 '/bin'
100]
101
102
103class ShellExecutor(vm._Executor):
104 """An executor combined with the OSH language evaluators in osh/ to create
105 a shell interpreter."""
106
107 def __init__(
108 self,
109 mem, # type: state.Mem
110 exec_opts, # type: optview.Exec
111 mutable_opts, # type: state.MutableOpts
112 procs, # type: Dict[str, value.Proc]
113 hay_state, # type: hay_ysh.HayState
114 builtins, # type: Dict[int, _Builtin]
115 search_path, # type: state.SearchPath
116 ext_prog, # type: process.ExternalProgram
117 waiter, # type: process.Waiter
118 tracer, # type: dev.Tracer
119 job_control, # type: process.JobControl
120 job_list, # type: process.JobList
121 fd_state, # type: process.FdState
122 trap_state, # type: trap_osh.TrapState
123 errfmt # type: ui.ErrorFormatter
124 ):
125 # type: (...) -> None
126 vm._Executor.__init__(self)
127 self.mem = mem
128 self.exec_opts = exec_opts
129 self.mutable_opts = mutable_opts # for IsDisabled(), not mutating
130 self.procs = procs
131 self.hay_state = hay_state
132 self.builtins = builtins
133 self.search_path = search_path
134 self.ext_prog = ext_prog
135 self.waiter = waiter
136 self.tracer = tracer
137 self.job_control = job_control
138 # sleep 5 & puts a (PID, job#) entry here. And then "jobs" displays it.
139 self.job_list = job_list
140 self.fd_state = fd_state
141 self.trap_state = trap_state
142 self.errfmt = errfmt
143 self.process_sub_stack = [] # type: List[_ProcessSubFrame]
144 self.clean_frame_pool = [] # type: List[_ProcessSubFrame]
145
146 # When starting a pipeline in the foreground, we need to pass a handle to it
147 # through the evaluation of the last node back to ourselves for execution.
148 # We use this handle to make sure any processes forked for the last part of
149 # the pipeline are placed into the same process group as the rest of the
150 # pipeline. Since there is, by design, only ever one foreground pipeline and
151 # any pipelines started within subshells run in their parent's process
152 # group, we only need one pointer here, not some collection.
153 self.fg_pipeline = None # type: Optional[process.Pipeline]
154
155 def CheckCircularDeps(self):
156 # type: () -> None
157 assert self.cmd_ev is not None
158
159 def _MakeProcess(self, node, inherit_errexit=True):
160 # type: (command_t, bool) -> process.Process
161 """Assume we will run the node in another process.
162
163 Return a process.
164 """
165 UP_node = node
166 if node.tag() == command_e.ControlFlow:
167 node = cast(command.ControlFlow, UP_node)
168 # Pipeline or subshells with control flow are invalid, e.g.:
169 # - break | less
170 # - continue | less
171 # - ( return )
172 # NOTE: This could be done at parse time too.
173 if node.keyword.id != Id.ControlFlow_Exit:
174 e_die(
175 'Invalid control flow %r in pipeline / subshell / background'
176 % lexer.TokenVal(node.keyword), node.keyword)
177
178 # NOTE: If ErrExit(), we could be verbose about subprogram errors? This
179 # only really matters when executing 'exit 42', because the child shell
180 # inherits errexit and will be verbose. Other notes:
181 #
182 # - We might want errors to fit on a single line so they don't get #
183 # interleaved.
184 # - We could turn the `exit` builtin into a error.FatalRuntime exception
185 # and get this check for "free".
186 thunk = process.SubProgramThunk(self.cmd_ev,
187 node,
188 self.trap_state,
189 inherit_errexit=inherit_errexit)
190 p = process.Process(thunk, self.job_control, self.job_list,
191 self.tracer)
192 return p
193
194 def RunBuiltin(self, builtin_id, cmd_val):
195 # type: (int, cmd_value.Argv) -> int
196 """Run a builtin.
197
198 Also called by the 'builtin' builtin.
199 """
200 self.tracer.OnBuiltin(builtin_id, cmd_val.argv)
201
202 builtin_func = self.builtins[builtin_id]
203
204 with vm.ctx_FlushStdout():
205 # note: could be second word, like 'builtin read'
206 with ui.ctx_Location(self.errfmt, cmd_val.arg_locs[0]):
207 try:
208 status = builtin_func.Run(cmd_val)
209 assert isinstance(status, int)
210 except error.Usage as e:
211 arg0 = cmd_val.argv[0]
212 # e.g. 'type' doesn't accept flag '-x'
213 self.errfmt.PrefixPrint(e.msg, '%r ' % arg0, e.location)
214 status = 2 # consistent error code for usage error
215
216 return status
217
218 def RunSimpleCommand(self, cmd_val, cmd_st, run_flags):
219 # type: (cmd_value.Argv, CommandStatus, int) -> int
220 """Run builtins, functions, external commands.
221
222 Possible variations:
223 - YSH might have different, simpler rules. No special builtins, etc.
224 - YSH might have OILS_PATH = :| /bin /usr/bin | or something.
225 - Interpreters might want to define all their own builtins.
226
227 Args:
228 call_procs: whether to look up procs.
229 """
230 argv = cmd_val.argv
231 if len(cmd_val.arg_locs):
232 arg0_loc = cmd_val.arg_locs[0] # type: loc_t
233 else:
234 arg0_loc = loc.Missing
235
236 # This happens when you write "$@" but have no arguments.
237 if len(argv) == 0:
238 if self.exec_opts.strict_argv():
239 e_die("Command evaluated to an empty argv array", arg0_loc)
240 else:
241 return 0 # status 0, or skip it?
242
243 arg0 = argv[0]
244
245 builtin_id = consts.LookupAssignBuiltin(arg0)
246 if builtin_id != consts.NO_INDEX:
247 # command readonly is disallowed, for technical reasons. Could relax it
248 # later.
249 self.errfmt.Print_("Can't run assignment builtin recursively",
250 arg0_loc)
251 return 1
252
253 builtin_id = consts.LookupSpecialBuiltin(arg0)
254 if builtin_id != consts.NO_INDEX:
255 cmd_st.show_code = True # this is a "leaf" for errors
256 status = self.RunBuiltin(builtin_id, cmd_val)
257 # TODO: Enable this and fix spec test failures.
258 # Also update _SPECIAL_BUILTINS in osh/builtin.py.
259 #if status != 0:
260 # e_die_status(status, 'special builtin failed')
261 return status
262
263 call_procs = not (run_flags & NO_CALL_PROCS)
264 # Builtins like 'true' can be redefined as functions.
265 if call_procs:
266 proc_node = self.procs.get(arg0)
267 if proc_node is not None:
268 if self.exec_opts.strict_errexit():
269 disabled_tok = self.mutable_opts.ErrExitDisabledToken()
270 if disabled_tok:
271 self.errfmt.Print_(
272 'errexit was disabled for this construct',
273 disabled_tok)
274 self.errfmt.StderrLine('')
275 e_die(
276 "Can't run a proc while errexit is disabled. "
277 "Use 'try' or wrap it in a process with $0 myproc",
278 arg0_loc)
279
280 with dev.ctx_Tracer(self.tracer, 'proc', argv):
281 # NOTE: Functions could call 'exit 42' directly, etc.
282 status = self.cmd_ev.RunProc(proc_node, cmd_val)
283 return status
284
285 # Notes:
286 # - procs shadow hay names
287 # - hay names shadow normal builtins? Should we limit to CAPS or no?
288 if self.hay_state.Resolve(arg0):
289 return self.RunBuiltin(builtin_i.haynode, cmd_val)
290
291 builtin_id = consts.LookupNormalBuiltin(arg0)
292
293 if self.exec_opts._running_hay():
294 # Hay: limit the builtins that can be run
295 # - declare 'use dialect'
296 # - echo and write for debugging
297 # - no JSON?
298 if builtin_id in (builtin_i.haynode, builtin_i.use, builtin_i.echo,
299 builtin_i.write):
300 cmd_st.show_code = True # this is a "leaf" for errors
301 return self.RunBuiltin(builtin_id, cmd_val)
302
303 self.errfmt.Print_('Unknown command %r while running hay' % arg0,
304 arg0_loc)
305 return 127
306
307 if builtin_id != consts.NO_INDEX:
308 cmd_st.show_code = True # this is a "leaf" for errors
309 return self.RunBuiltin(builtin_id, cmd_val)
310
311 environ = self.mem.GetExported() # Include temporary variables
312
313 if cmd_val.typed_args:
314 e_die(
315 '%r appears to be external. External commands don\'t accept typed args (OILS-ERR-200)'
316 % arg0, cmd_val.typed_args.left)
317
318 # Resolve argv[0] BEFORE forking.
319 if run_flags & USE_DEFAULT_PATH:
320 argv0_path = state.LookupExecutable(arg0, DEFAULT_PATH)
321 else:
322 argv0_path = self.search_path.CachedLookup(arg0)
323 if argv0_path is None:
324 self.errfmt.Print_('%r not found' % arg0, arg0_loc)
325 return 127
326
327 # Normal case: ls /
328 if run_flags & DO_FORK:
329 thunk = process.ExternalThunk(self.ext_prog, argv0_path, cmd_val,
330 environ)
331 p = process.Process(thunk, self.job_control, self.job_list,
332 self.tracer)
333
334 if self.job_control.Enabled():
335 if self.fg_pipeline is not None:
336 pgid = self.fg_pipeline.ProcessGroupId()
337 # If job control is enabled, this should be true
338 assert pgid != process.INVALID_PGID
339
340 change = process.SetPgid(pgid, self.tracer)
341 self.fg_pipeline = None # clear to avoid confusion in subshells
342 else:
343 change = process.SetPgid(process.OWN_LEADER, self.tracer)
344 p.AddStateChange(change)
345
346 status = p.RunProcess(self.waiter, trace.External(cmd_val.argv))
347
348 # this is close to a "leaf" for errors
349 # problem: permission denied EACCESS prints duplicate messages
350 # TODO: add message command 'ls' failed
351 cmd_st.show_code = True
352
353 return status
354
355 self.tracer.OnExec(cmd_val.argv)
356
357 # Already forked for pipeline: ls / | wc -l
358 self.ext_prog.Exec(argv0_path, cmd_val, environ) # NEVER RETURNS
359
360 raise AssertionError('for -Wreturn-type in C++')
361
362 def RunBackgroundJob(self, node):
363 # type: (command_t) -> int
364 """For & etc."""
365 # Special case for pipeline. There is some evidence here:
366 # https://www.gnu.org/software/libc/manual/html_node/Launching-Jobs.html#Launching-Jobs
367 #
368 # "You can either make all the processes in the process group be children
369 # of the shell process, or you can make one process in group be the
370 # ancestor of all the other processes in that group. The sample shell
371 # program presented in this chapter uses the first approach because it
372 # makes bookkeeping somewhat simpler."
373 UP_node = node
374
375 if UP_node.tag() == command_e.Pipeline:
376 node = cast(command.Pipeline, UP_node)
377 pi = process.Pipeline(self.exec_opts.sigpipe_status_ok(),
378 self.job_control, self.job_list, self.tracer)
379 for child in node.children:
380 p = self._MakeProcess(child)
381 p.Init_ParentPipeline(pi)
382 pi.Add(p)
383
384 pi.StartPipeline(self.waiter)
385 pi.SetBackground()
386 last_pid = pi.LastPid()
387 self.mem.last_bg_pid = last_pid # for $!
388
389 self.job_list.AddJob(pi) # show in 'jobs' list
390
391 else:
392 # Problem: to get the 'set -b' behavior of immediate notifications, we
393 # have to register SIGCHLD. But then that introduces race conditions.
394 # If we haven't called Register yet, then we won't know who to notify.
395
396 p = self._MakeProcess(node)
397 if self.job_control.Enabled():
398 p.AddStateChange(
399 process.SetPgid(process.OWN_LEADER, self.tracer))
400
401 p.SetBackground()
402 pid = p.StartProcess(trace.Fork)
403 self.mem.last_bg_pid = pid # for $!
404 self.job_list.AddJob(p) # show in 'jobs' list
405 return 0
406
407 def RunPipeline(self, node, status_out):
408 # type: (command.Pipeline, CommandStatus) -> None
409
410 pi = process.Pipeline(self.exec_opts.sigpipe_status_ok(),
411 self.job_control, self.job_list, self.tracer)
412
413 # initialized with CommandStatus.CreateNull()
414 pipe_locs = [] # type: List[loc_t]
415
416 # First n-1 processes (which is empty when n == 1)
417 n = len(node.children)
418 for i in xrange(n - 1):
419 child = node.children[i]
420
421 # TODO: determine these locations at parse time?
422 pipe_locs.append(loc.Command(child))
423
424 p = self._MakeProcess(child)
425 p.Init_ParentPipeline(pi)
426 pi.Add(p)
427
428 last_child = node.children[n - 1]
429 # Last piece of code is in THIS PROCESS. 'echo foo | read line; echo $line'
430 pi.AddLast((self.cmd_ev, last_child))
431 pipe_locs.append(loc.Command(last_child))
432
433 with dev.ctx_Tracer(self.tracer, 'pipeline', None):
434 pi.StartPipeline(self.waiter)
435 self.fg_pipeline = pi
436 status_out.pipe_status = pi.RunLastPart(self.waiter, self.fd_state)
437 self.fg_pipeline = None # clear in case we didn't end up forking
438
439 status_out.pipe_locs = pipe_locs
440
441 def RunSubshell(self, node):
442 # type: (command_t) -> int
443 p = self._MakeProcess(node)
444 if self.job_control.Enabled():
445 p.AddStateChange(process.SetPgid(process.OWN_LEADER, self.tracer))
446
447 return p.RunProcess(self.waiter, trace.ForkWait)
448
449 def RunCommandSub(self, cs_part):
450 # type: (CommandSub) -> str
451
452 if not self.exec_opts._allow_command_sub():
453 # _allow_command_sub is used in two places. Only one of them turns off _allow_process_sub
454 if not self.exec_opts._allow_process_sub():
455 why = "status wouldn't be checked (strict_errexit)"
456 else:
457 why = 'eval_unsafe_arith is off'
458
459 e_die("Command subs not allowed here because %s" % why,
460 loc.WordPart(cs_part))
461
462 node = cs_part.child
463
464 # Hack for weird $(<file) construct
465 if node.tag() == command_e.Simple:
466 simple = cast(command.Simple, node)
467 # Detect '< file'
468 if (len(simple.words) == 0 and len(simple.redirects) == 1 and
469 simple.redirects[0].op.id == Id.Redir_Less):
470 # change it to __cat < file
471 # TODO: change to 'internal cat' (issue 1013)
472 tok = lexer.DummyToken(Id.Lit_Chars, '__cat')
473 cat_word = CompoundWord([tok])
474 # MUTATE the command.Simple node. This will only be done the first
475 # time in the parent process.
476 simple.words.append(cat_word)
477
478 p = self._MakeProcess(node,
479 inherit_errexit=self.exec_opts.inherit_errexit())
480 # Shell quirk: Command subs remain part of the shell's process group, so we
481 # don't use p.AddStateChange(process.SetPgid(...))
482
483 r, w = posix.pipe()
484 p.AddStateChange(process.StdoutToPipe(r, w))
485
486 p.StartProcess(trace.CommandSub)
487 #log('Command sub started %d', pid)
488
489 chunks = [] # type: List[str]
490 posix.close(w) # not going to write
491 while True:
492 n, err_num = pyos.Read(r, 4096, chunks)
493
494 if n < 0:
495 if err_num == EINTR:
496 pass # retry
497 else:
498 # Like the top level IOError handler
499 e_die_status(
500 2,
501 'osh I/O error (read): %s' % posix.strerror(err_num))
502
503 elif n == 0: # EOF
504 break
505 posix.close(r)
506
507 status = p.Wait(self.waiter)
508
509 # OSH has the concept of aborting in the middle of a WORD. We're not
510 # waiting until the command is over!
511 if self.exec_opts.command_sub_errexit():
512 if status != 0:
513 msg = 'Command Sub exited with status %d' % status
514 raise error.ErrExit(status, msg, loc.WordPart(cs_part))
515
516 else:
517 # Set a flag so we check errexit at the same time as bash. Example:
518 #
519 # a=$(false)
520 # echo foo # no matter what comes here, the flag is reset
521 #
522 # Set ONLY until this command node has finished executing.
523
524 # HACK: move this
525 self.cmd_ev.check_command_sub_status = True
526 self.mem.SetLastStatus(status)
527
528 # Runtime errors test case: # $("echo foo > $@")
529 # Why rstrip()?
530 # https://unix.stackexchange.com/questions/17747/why-does-shell-command-substitution-gobble-up-a-trailing-newline-char
531 return ''.join(chunks).rstrip('\n')
532
533 def RunProcessSub(self, cs_part):
534 # type: (CommandSub) -> str
535 """Process sub creates a forks a process connected to a pipe.
536
537 The pipe is typically passed to another process via a /dev/fd/$FD path.
538
539 Life cycle of a process substitution:
540
541 1. Start with this code
542
543 diff <(seq 3) <(seq 4)
544
545 2. To evaluate the command line, we evaluate every word. The
546 NormalWordEvaluator this method, RunProcessSub(), which does 3 things:
547
548 a. Create a pipe(), getting r and w
549 b. Starts the seq process, which inherits r and w
550 It has a StdoutToPipe() redirect, which means that it dup2(w, 1)
551 and close(r)
552 c. Close the w FD, because neither the shell or 'diff' will write to it.
553 However we must retain 'r', because 'diff' hasn't opened /dev/fd yet!
554 d. We evaluate <(seq 3) to /dev/fd/$r, so "diff" can read from it
555
556 3. Now we're done evaluating every word, so we know the command line of
557 diff, which looks like
558
559 diff /dev/fd/64 /dev/fd/65
560
561 Those are the FDs for the read ends of the pipes we created.
562
563 4. diff inherits a copy of the read end of bot pipes. But it actually
564 calls open() both files passed as argv. (I think this is fine.)
565
566 5. wait() for the diff process.
567
568 6. The shell closes both the read ends of both pipes. Neither us or
569 'diffd' will read again.
570
571 7. The shell waits for both 'seq' processes.
572
573 Related:
574 shopt -s process_sub_fail
575 _process_sub_status
576 """
577 cs_loc = loc.WordPart(cs_part)
578
579 if not self.exec_opts._allow_process_sub():
580 e_die(
581 "Process subs not allowed here because status wouldn't be checked (strict_errexit)",
582 cs_loc)
583
584 p = self._MakeProcess(cs_part.child)
585
586 r, w = posix.pipe()
587 #log('pipe = %d, %d', r, w)
588
589 op_id = cs_part.left_token.id
590 if op_id == Id.Left_ProcSubIn:
591 # Example: cat < <(head foo.txt)
592 #
593 # The head process should write its stdout to a pipe.
594 redir = process.StdoutToPipe(r,
595 w) # type: process.ChildStateChange
596
597 elif op_id == Id.Left_ProcSubOut:
598 # Example: head foo.txt > >(tac)
599 #
600 # The tac process should read its stdin from a pipe.
601
602 # Note: this example sometimes requires you to hit "enter" in bash and
603 # zsh. WHy?
604 redir = process.StdinFromPipe(r, w)
605
606 else:
607 raise AssertionError()
608
609 p.AddStateChange(redir)
610
611 if self.job_control.Enabled():
612 p.AddStateChange(process.SetPgid(process.OWN_LEADER, self.tracer))
613
614 # Fork, letting the child inherit the pipe file descriptors.
615 p.StartProcess(trace.ProcessSub)
616
617 ps_frame = self.process_sub_stack[-1]
618
619 # Note: bash never waits() on the process, but zsh does. The calling
620 # program needs to read() before we can wait, e.g.
621 # diff <(sort left.txt) <(sort right.txt)
622
623 # After forking, close the end of the pipe we're not using.
624 if op_id == Id.Left_ProcSubIn:
625 posix.close(w) # cat < <(head foo.txt)
626 ps_frame.Append(p, r, cs_loc) # close later
627 elif op_id == Id.Left_ProcSubOut:
628 posix.close(r)
629 #log('Left_ProcSubOut closed %d', r)
630 ps_frame.Append(p, w, cs_loc) # close later
631 else:
632 raise AssertionError()
633
634 # Is /dev Linux-specific?
635 if op_id == Id.Left_ProcSubIn:
636 return '/dev/fd/%d' % r
637
638 elif op_id == Id.Left_ProcSubOut:
639 return '/dev/fd/%d' % w
640
641 else:
642 raise AssertionError()
643
644 def PushRedirects(self, redirects, err_out):
645 # type: (List[RedirValue], List[error.IOError_OSError]) -> None
646 if len(redirects) == 0: # Optimized to avoid allocs
647 return
648 self.fd_state.Push(redirects, err_out)
649
650 def PopRedirects(self, num_redirects, err_out):
651 # type: (int, List[error.IOError_OSError]) -> None
652 if num_redirects == 0: # Optimized to avoid allocs
653 return
654 self.fd_state.Pop(err_out)
655
656 def PushProcessSub(self):
657 # type: () -> None
658 if len(self.clean_frame_pool):
659 # Optimized to avoid allocs
660 new_frame = self.clean_frame_pool.pop()
661 else:
662 new_frame = _ProcessSubFrame()
663 self.process_sub_stack.append(new_frame)
664
665 def PopProcessSub(self, compound_st):
666 # type: (StatusArray) -> None
667 """This method is called by a context manager, which means we always
668 wait() on the way out, which I think is the right thing.
669
670 We don't always set _process_sub_status, e.g. if some fatal
671 error occurs first, but we always wait.
672 """
673 frame = self.process_sub_stack.pop()
674 if frame.WasModified():
675 frame.MaybeWaitOnProcessSubs(self.waiter, compound_st)
676 else:
677 # Optimized to avoid allocs
678 self.clean_frame_pool.append(frame)
679
680 # Note: the 3 lists in _ProcessSubFrame are hot in our profiles. It would
681 # be nice to somehow "destroy" them here, rather than letting them become
682 # garbage that needs to be traced.
683
684 # The CommandEvaluator could have a ProcessSubStack, which supports Push(),
685 # Pop(), and Top() of VALUES rather than GC objects?