OILS / core / executor.py View on Github | oilshell.org

687 lines, 369 significant
1"""executor.py."""
2from __future__ import print_function
3
4from errno import EINTR
5
6from _devbuild.gen.id_kind_asdl import Id
7from _devbuild.gen.option_asdl import builtin_i
8from _devbuild.gen.runtime_asdl import RedirValue, trace
9from _devbuild.gen.syntax_asdl import (
10 command,
11 command_e,
12 CommandSub,
13 CompoundWord,
14 loc,
15 loc_t,
16)
17from _devbuild.gen.value_asdl import value
18from builtin import hay_ysh
19from core import dev
20from core import error
21from core import process
22from core.error import e_die, e_die_status
23from core import pyos
24from core import state
25from core import ui
26from core import vm
27from frontend import consts
28from frontend import lexer
29from mycpp.mylib import log
30
31import posix_ as posix
32
33from typing import cast, Dict, List, Optional, TYPE_CHECKING
34if TYPE_CHECKING:
35 from _devbuild.gen.runtime_asdl import (cmd_value, CommandStatus,
36 StatusArray)
37 from _devbuild.gen.syntax_asdl import command_t
38 from builtin import trap_osh
39 from core import optview
40 from core import state
41 from core.vm import _Builtin
42
43_ = log
44
45
46class _ProcessSubFrame(object):
47 """To keep track of diff <(cat 1) <(cat 2) > >(tac)"""
48
49 def __init__(self):
50 # type: () -> None
51
52 # These objects appear unconditionally in the main loop, and aren't
53 # commonly used, so we manually optimize [] into None.
54
55 self._to_wait = [] # type: List[process.Process]
56 self._to_close = [] # type: List[int] # file descriptors
57 self._locs = [] # type: List[loc_t]
58 self._modified = False
59
60 def WasModified(self):
61 # type: () -> bool
62 return self._modified
63
64 def Append(self, p, fd, status_loc):
65 # type: (process.Process, int, loc_t) -> None
66 self._modified = True
67
68 self._to_wait.append(p)
69 self._to_close.append(fd)
70 self._locs.append(status_loc)
71
72 def MaybeWaitOnProcessSubs(self, waiter, status_array):
73 # type: (process.Waiter, StatusArray) -> None
74
75 # Wait in the same order that they were evaluated. That seems fine.
76 for fd in self._to_close:
77 posix.close(fd)
78
79 codes = [] # type: List[int]
80 locs = [] # type: List[loc_t]
81 for i, p in enumerate(self._to_wait):
82 #log('waiting for %s', p)
83 st = p.Wait(waiter)
84 codes.append(st)
85 locs.append(self._locs[i])
86
87 status_array.codes = codes
88 status_array.locs = locs
89
90
91# Big flgas for RunSimpleCommand
92DO_FORK = 1 << 1
93NO_CALL_PROCS = 1 << 2 # command ls suppresses function lookup
94USE_DEFAULT_PATH = 1 << 3 # for command -p ls changes the path
95
96# Copied from var.c in dash
97DEFAULT_PATH = [
98 '/usr/local/sbin', '/usr/local/bin', '/usr/sbin', '/usr/bin', '/sbin',
99 '/bin'
100]
101
102
103class ShellExecutor(vm._Executor):
104 """An executor combined with the OSH language evaluators in osh/ to create
105 a shell interpreter."""
106
107 def __init__(
108 self,
109 mem, # type: state.Mem
110 exec_opts, # type: optview.Exec
111 mutable_opts, # type: state.MutableOpts
112 procs, # type: Dict[str, value.Proc]
113 hay_state, # type: hay_ysh.HayState
114 builtins, # type: Dict[int, _Builtin]
115 search_path, # type: state.SearchPath
116 ext_prog, # type: process.ExternalProgram
117 waiter, # type: process.Waiter
118 tracer, # type: dev.Tracer
119 job_control, # type: process.JobControl
120 job_list, # type: process.JobList
121 fd_state, # type: process.FdState
122 trap_state, # type: trap_osh.TrapState
123 errfmt # type: ui.ErrorFormatter
124 ):
125 # type: (...) -> None
126 vm._Executor.__init__(self)
127 self.mem = mem
128 self.exec_opts = exec_opts
129 self.mutable_opts = mutable_opts # for IsDisabled(), not mutating
130 self.procs = procs
131 self.hay_state = hay_state
132 self.builtins = builtins
133 self.search_path = search_path
134 self.ext_prog = ext_prog
135 self.waiter = waiter
136 self.tracer = tracer
137 self.multi_trace = tracer.multi_trace
138 self.job_control = job_control
139 # sleep 5 & puts a (PID, job#) entry here. And then "jobs" displays it.
140 self.job_list = job_list
141 self.fd_state = fd_state
142 self.trap_state = trap_state
143 self.errfmt = errfmt
144 self.process_sub_stack = [] # type: List[_ProcessSubFrame]
145 self.clean_frame_pool = [] # type: List[_ProcessSubFrame]
146
147 # When starting a pipeline in the foreground, we need to pass a handle to it
148 # through the evaluation of the last node back to ourselves for execution.
149 # We use this handle to make sure any processes forked for the last part of
150 # the pipeline are placed into the same process group as the rest of the
151 # pipeline. Since there is, by design, only ever one foreground pipeline and
152 # any pipelines started within subshells run in their parent's process
153 # group, we only need one pointer here, not some collection.
154 self.fg_pipeline = None # type: Optional[process.Pipeline]
155
156 def CheckCircularDeps(self):
157 # type: () -> None
158 assert self.cmd_ev is not None
159
160 def _MakeProcess(self, node, inherit_errexit=True):
161 # type: (command_t, bool) -> process.Process
162 """Assume we will run the node in another process.
163
164 Return a process.
165 """
166 UP_node = node
167 if node.tag() == command_e.ControlFlow:
168 node = cast(command.ControlFlow, UP_node)
169 # Pipeline or subshells with control flow are invalid, e.g.:
170 # - break | less
171 # - continue | less
172 # - ( return )
173 # NOTE: This could be done at parse time too.
174 if node.keyword.id != Id.ControlFlow_Exit:
175 e_die(
176 'Invalid control flow %r in pipeline / subshell / background'
177 % lexer.TokenVal(node.keyword), node.keyword)
178
179 # NOTE: If ErrExit(), we could be verbose about subprogram errors? This
180 # only really matters when executing 'exit 42', because the child shell
181 # inherits errexit and will be verbose. Other notes:
182 #
183 # - We might want errors to fit on a single line so they don't get #
184 # interleaved.
185 # - We could turn the `exit` builtin into a error.FatalRuntime exception
186 # and get this check for "free".
187 thunk = process.SubProgramThunk(self.cmd_ev,
188 node,
189 self.trap_state,
190 self.multi_trace,
191 inherit_errexit=inherit_errexit)
192 p = process.Process(thunk, self.job_control, self.job_list,
193 self.tracer)
194 return p
195
196 def RunBuiltin(self, builtin_id, cmd_val):
197 # type: (int, cmd_value.Argv) -> int
198 """Run a builtin.
199
200 Also called by the 'builtin' builtin.
201 """
202 self.tracer.OnBuiltin(builtin_id, cmd_val.argv)
203
204 builtin_func = self.builtins[builtin_id]
205
206 with vm.ctx_FlushStdout():
207 # note: could be second word, like 'builtin read'
208 with ui.ctx_Location(self.errfmt, cmd_val.arg_locs[0]):
209 try:
210 status = builtin_func.Run(cmd_val)
211 assert isinstance(status, int)
212 except error.Usage as e:
213 arg0 = cmd_val.argv[0]
214 # e.g. 'type' doesn't accept flag '-x'
215 self.errfmt.PrefixPrint(e.msg, '%r ' % arg0, e.location)
216 status = 2 # consistent error code for usage error
217
218 return status
219
220 def RunSimpleCommand(self, cmd_val, cmd_st, run_flags):
221 # type: (cmd_value.Argv, CommandStatus, int) -> int
222 """Run builtins, functions, external commands.
223
224 Possible variations:
225 - YSH might have different, simpler rules. No special builtins, etc.
226 - YSH might have OILS_PATH = :| /bin /usr/bin | or something.
227 - Interpreters might want to define all their own builtins.
228
229 Args:
230 call_procs: whether to look up procs.
231 """
232 argv = cmd_val.argv
233 if len(cmd_val.arg_locs):
234 arg0_loc = cmd_val.arg_locs[0] # type: loc_t
235 else:
236 arg0_loc = loc.Missing
237
238 # This happens when you write "$@" but have no arguments.
239 if len(argv) == 0:
240 if self.exec_opts.strict_argv():
241 e_die("Command evaluated to an empty argv array", arg0_loc)
242 else:
243 return 0 # status 0, or skip it?
244
245 arg0 = argv[0]
246
247 builtin_id = consts.LookupAssignBuiltin(arg0)
248 if builtin_id != consts.NO_INDEX:
249 # command readonly is disallowed, for technical reasons. Could relax it
250 # later.
251 self.errfmt.Print_("Can't run assignment builtin recursively",
252 arg0_loc)
253 return 1
254
255 builtin_id = consts.LookupSpecialBuiltin(arg0)
256 if builtin_id != consts.NO_INDEX:
257 cmd_st.show_code = True # this is a "leaf" for errors
258 status = self.RunBuiltin(builtin_id, cmd_val)
259 # TODO: Enable this and fix spec test failures.
260 # Also update _SPECIAL_BUILTINS in osh/builtin.py.
261 #if status != 0:
262 # e_die_status(status, 'special builtin failed')
263 return status
264
265 call_procs = not (run_flags & NO_CALL_PROCS)
266 # Builtins like 'true' can be redefined as functions.
267 if call_procs:
268 proc_node = self.procs.get(arg0)
269 if proc_node is not None:
270 if self.exec_opts.strict_errexit():
271 disabled_tok = self.mutable_opts.ErrExitDisabledToken()
272 if disabled_tok:
273 self.errfmt.Print_(
274 'errexit was disabled for this construct',
275 disabled_tok)
276 self.errfmt.StderrLine('')
277 e_die(
278 "Can't run a proc while errexit is disabled. "
279 "Use 'try' or wrap it in a process with $0 myproc",
280 arg0_loc)
281
282 with dev.ctx_Tracer(self.tracer, 'proc', argv):
283 # NOTE: Functions could call 'exit 42' directly, etc.
284 status = self.cmd_ev.RunProc(proc_node, cmd_val)
285 return status
286
287 # Notes:
288 # - procs shadow hay names
289 # - hay names shadow normal builtins? Should we limit to CAPS or no?
290 if self.hay_state.Resolve(arg0):
291 return self.RunBuiltin(builtin_i.haynode, cmd_val)
292
293 builtin_id = consts.LookupNormalBuiltin(arg0)
294
295 if self.exec_opts._running_hay():
296 # Hay: limit the builtins that can be run
297 # - declare 'use dialect'
298 # - echo and write for debugging
299 # - no JSON?
300 if builtin_id in (builtin_i.haynode, builtin_i.use, builtin_i.echo,
301 builtin_i.write):
302 cmd_st.show_code = True # this is a "leaf" for errors
303 return self.RunBuiltin(builtin_id, cmd_val)
304
305 self.errfmt.Print_('Unknown command %r while running hay' % arg0,
306 arg0_loc)
307 return 127
308
309 if builtin_id != consts.NO_INDEX:
310 cmd_st.show_code = True # this is a "leaf" for errors
311 return self.RunBuiltin(builtin_id, cmd_val)
312
313 environ = self.mem.GetExported() # Include temporary variables
314
315 if cmd_val.typed_args:
316 e_die(
317 '%r appears to be external. External commands don\'t accept typed args (OILS-ERR-200)'
318 % arg0, cmd_val.typed_args.left)
319
320 # Resolve argv[0] BEFORE forking.
321 if run_flags & USE_DEFAULT_PATH:
322 argv0_path = state.LookupExecutable(arg0, DEFAULT_PATH)
323 else:
324 argv0_path = self.search_path.CachedLookup(arg0)
325 if argv0_path is None:
326 self.errfmt.Print_('%r not found' % arg0, arg0_loc)
327 return 127
328
329 # Normal case: ls /
330 if run_flags & DO_FORK:
331 thunk = process.ExternalThunk(self.ext_prog, argv0_path, cmd_val,
332 environ)
333 p = process.Process(thunk, self.job_control, self.job_list,
334 self.tracer)
335
336 if self.job_control.Enabled():
337 if self.fg_pipeline is not None:
338 pgid = self.fg_pipeline.ProcessGroupId()
339 # If job control is enabled, this should be true
340 assert pgid != process.INVALID_PGID
341
342 change = process.SetPgid(pgid, self.tracer)
343 self.fg_pipeline = None # clear to avoid confusion in subshells
344 else:
345 change = process.SetPgid(process.OWN_LEADER, self.tracer)
346 p.AddStateChange(change)
347
348 status = p.RunProcess(self.waiter, trace.External(cmd_val.argv))
349
350 # this is close to a "leaf" for errors
351 # problem: permission denied EACCESS prints duplicate messages
352 # TODO: add message command 'ls' failed
353 cmd_st.show_code = True
354
355 return status
356
357 self.tracer.OnExec(cmd_val.argv)
358
359 # Already forked for pipeline: ls / | wc -l
360 self.ext_prog.Exec(argv0_path, cmd_val, environ) # NEVER RETURNS
361
362 raise AssertionError('for -Wreturn-type in C++')
363
364 def RunBackgroundJob(self, node):
365 # type: (command_t) -> int
366 """For & etc."""
367 # Special case for pipeline. There is some evidence here:
368 # https://www.gnu.org/software/libc/manual/html_node/Launching-Jobs.html#Launching-Jobs
369 #
370 # "You can either make all the processes in the process group be children
371 # of the shell process, or you can make one process in group be the
372 # ancestor of all the other processes in that group. The sample shell
373 # program presented in this chapter uses the first approach because it
374 # makes bookkeeping somewhat simpler."
375 UP_node = node
376
377 if UP_node.tag() == command_e.Pipeline:
378 node = cast(command.Pipeline, UP_node)
379 pi = process.Pipeline(self.exec_opts.sigpipe_status_ok(),
380 self.job_control, self.job_list, self.tracer)
381 for child in node.children:
382 p = self._MakeProcess(child)
383 p.Init_ParentPipeline(pi)
384 pi.Add(p)
385
386 pi.StartPipeline(self.waiter)
387 pi.SetBackground()
388 last_pid = pi.LastPid()
389 self.mem.last_bg_pid = last_pid # for $!
390
391 self.job_list.AddJob(pi) # show in 'jobs' list
392
393 else:
394 # Problem: to get the 'set -b' behavior of immediate notifications, we
395 # have to register SIGCHLD. But then that introduces race conditions.
396 # If we haven't called Register yet, then we won't know who to notify.
397
398 p = self._MakeProcess(node)
399 if self.job_control.Enabled():
400 p.AddStateChange(
401 process.SetPgid(process.OWN_LEADER, self.tracer))
402
403 p.SetBackground()
404 pid = p.StartProcess(trace.Fork)
405 self.mem.last_bg_pid = pid # for $!
406 self.job_list.AddJob(p) # show in 'jobs' list
407 return 0
408
409 def RunPipeline(self, node, status_out):
410 # type: (command.Pipeline, CommandStatus) -> None
411
412 pi = process.Pipeline(self.exec_opts.sigpipe_status_ok(),
413 self.job_control, self.job_list, self.tracer)
414
415 # initialized with CommandStatus.CreateNull()
416 pipe_locs = [] # type: List[loc_t]
417
418 # First n-1 processes (which is empty when n == 1)
419 n = len(node.children)
420 for i in xrange(n - 1):
421 child = node.children[i]
422
423 # TODO: determine these locations at parse time?
424 pipe_locs.append(loc.Command(child))
425
426 p = self._MakeProcess(child)
427 p.Init_ParentPipeline(pi)
428 pi.Add(p)
429
430 last_child = node.children[n - 1]
431 # Last piece of code is in THIS PROCESS. 'echo foo | read line; echo $line'
432 pi.AddLast((self.cmd_ev, last_child))
433 pipe_locs.append(loc.Command(last_child))
434
435 with dev.ctx_Tracer(self.tracer, 'pipeline', None):
436 pi.StartPipeline(self.waiter)
437 self.fg_pipeline = pi
438 status_out.pipe_status = pi.RunLastPart(self.waiter, self.fd_state)
439 self.fg_pipeline = None # clear in case we didn't end up forking
440
441 status_out.pipe_locs = pipe_locs
442
443 def RunSubshell(self, node):
444 # type: (command_t) -> int
445 p = self._MakeProcess(node)
446 if self.job_control.Enabled():
447 p.AddStateChange(process.SetPgid(process.OWN_LEADER, self.tracer))
448
449 return p.RunProcess(self.waiter, trace.ForkWait)
450
451 def RunCommandSub(self, cs_part):
452 # type: (CommandSub) -> str
453
454 if not self.exec_opts._allow_command_sub():
455 # _allow_command_sub is used in two places. Only one of them turns off _allow_process_sub
456 if not self.exec_opts._allow_process_sub():
457 why = "status wouldn't be checked (strict_errexit)"
458 else:
459 why = 'eval_unsafe_arith is off'
460
461 e_die("Command subs not allowed here because %s" % why,
462 loc.WordPart(cs_part))
463
464 node = cs_part.child
465
466 # Hack for weird $(<file) construct
467 if node.tag() == command_e.Simple:
468 simple = cast(command.Simple, node)
469 # Detect '< file'
470 if (len(simple.words) == 0 and len(simple.redirects) == 1 and
471 simple.redirects[0].op.id == Id.Redir_Less):
472 # change it to __cat < file
473 # TODO: change to 'internal cat' (issue 1013)
474 tok = lexer.DummyToken(Id.Lit_Chars, '__cat')
475 cat_word = CompoundWord([tok])
476 # MUTATE the command.Simple node. This will only be done the first
477 # time in the parent process.
478 simple.words.append(cat_word)
479
480 p = self._MakeProcess(node,
481 inherit_errexit=self.exec_opts.inherit_errexit())
482 # Shell quirk: Command subs remain part of the shell's process group, so we
483 # don't use p.AddStateChange(process.SetPgid(...))
484
485 r, w = posix.pipe()
486 p.AddStateChange(process.StdoutToPipe(r, w))
487
488 p.StartProcess(trace.CommandSub)
489 #log('Command sub started %d', pid)
490
491 chunks = [] # type: List[str]
492 posix.close(w) # not going to write
493 while True:
494 n, err_num = pyos.Read(r, 4096, chunks)
495
496 if n < 0:
497 if err_num == EINTR:
498 pass # retry
499 else:
500 # Like the top level IOError handler
501 e_die_status(
502 2,
503 'osh I/O error (read): %s' % posix.strerror(err_num))
504
505 elif n == 0: # EOF
506 break
507 posix.close(r)
508
509 status = p.Wait(self.waiter)
510
511 # OSH has the concept of aborting in the middle of a WORD. We're not
512 # waiting until the command is over!
513 if self.exec_opts.command_sub_errexit():
514 if status != 0:
515 msg = 'Command Sub exited with status %d' % status
516 raise error.ErrExit(status, msg, loc.WordPart(cs_part))
517
518 else:
519 # Set a flag so we check errexit at the same time as bash. Example:
520 #
521 # a=$(false)
522 # echo foo # no matter what comes here, the flag is reset
523 #
524 # Set ONLY until this command node has finished executing.
525
526 # HACK: move this
527 self.cmd_ev.check_command_sub_status = True
528 self.mem.SetLastStatus(status)
529
530 # Runtime errors test case: # $("echo foo > $@")
531 # Why rstrip()?
532 # https://unix.stackexchange.com/questions/17747/why-does-shell-command-substitution-gobble-up-a-trailing-newline-char
533 return ''.join(chunks).rstrip('\n')
534
535 def RunProcessSub(self, cs_part):
536 # type: (CommandSub) -> str
537 """Process sub creates a forks a process connected to a pipe.
538
539 The pipe is typically passed to another process via a /dev/fd/$FD path.
540
541 Life cycle of a process substitution:
542
543 1. Start with this code
544
545 diff <(seq 3) <(seq 4)
546
547 2. To evaluate the command line, we evaluate every word. The
548 NormalWordEvaluator this method, RunProcessSub(), which does 3 things:
549
550 a. Create a pipe(), getting r and w
551 b. Starts the seq process, which inherits r and w
552 It has a StdoutToPipe() redirect, which means that it dup2(w, 1)
553 and close(r)
554 c. Close the w FD, because neither the shell or 'diff' will write to it.
555 However we must retain 'r', because 'diff' hasn't opened /dev/fd yet!
556 d. We evaluate <(seq 3) to /dev/fd/$r, so "diff" can read from it
557
558 3. Now we're done evaluating every word, so we know the command line of
559 diff, which looks like
560
561 diff /dev/fd/64 /dev/fd/65
562
563 Those are the FDs for the read ends of the pipes we created.
564
565 4. diff inherits a copy of the read end of bot pipes. But it actually
566 calls open() both files passed as argv. (I think this is fine.)
567
568 5. wait() for the diff process.
569
570 6. The shell closes both the read ends of both pipes. Neither us or
571 'diffd' will read again.
572
573 7. The shell waits for both 'seq' processes.
574
575 Related:
576 shopt -s process_sub_fail
577 _process_sub_status
578 """
579 cs_loc = loc.WordPart(cs_part)
580
581 if not self.exec_opts._allow_process_sub():
582 e_die(
583 "Process subs not allowed here because status wouldn't be checked (strict_errexit)",
584 cs_loc)
585
586 p = self._MakeProcess(cs_part.child)
587
588 r, w = posix.pipe()
589 #log('pipe = %d, %d', r, w)
590
591 op_id = cs_part.left_token.id
592 if op_id == Id.Left_ProcSubIn:
593 # Example: cat < <(head foo.txt)
594 #
595 # The head process should write its stdout to a pipe.
596 redir = process.StdoutToPipe(r,
597 w) # type: process.ChildStateChange
598
599 elif op_id == Id.Left_ProcSubOut:
600 # Example: head foo.txt > >(tac)
601 #
602 # The tac process should read its stdin from a pipe.
603
604 # Note: this example sometimes requires you to hit "enter" in bash and
605 # zsh. WHy?
606 redir = process.StdinFromPipe(r, w)
607
608 else:
609 raise AssertionError()
610
611 p.AddStateChange(redir)
612
613 if self.job_control.Enabled():
614 p.AddStateChange(process.SetPgid(process.OWN_LEADER, self.tracer))
615
616 # Fork, letting the child inherit the pipe file descriptors.
617 p.StartProcess(trace.ProcessSub)
618
619 ps_frame = self.process_sub_stack[-1]
620
621 # Note: bash never waits() on the process, but zsh does. The calling
622 # program needs to read() before we can wait, e.g.
623 # diff <(sort left.txt) <(sort right.txt)
624
625 # After forking, close the end of the pipe we're not using.
626 if op_id == Id.Left_ProcSubIn:
627 posix.close(w) # cat < <(head foo.txt)
628 ps_frame.Append(p, r, cs_loc) # close later
629 elif op_id == Id.Left_ProcSubOut:
630 posix.close(r)
631 #log('Left_ProcSubOut closed %d', r)
632 ps_frame.Append(p, w, cs_loc) # close later
633 else:
634 raise AssertionError()
635
636 # Is /dev Linux-specific?
637 if op_id == Id.Left_ProcSubIn:
638 return '/dev/fd/%d' % r
639
640 elif op_id == Id.Left_ProcSubOut:
641 return '/dev/fd/%d' % w
642
643 else:
644 raise AssertionError()
645
646 def PushRedirects(self, redirects, err_out):
647 # type: (List[RedirValue], List[error.IOError_OSError]) -> None
648 if len(redirects) == 0: # Optimized to avoid allocs
649 return
650 self.fd_state.Push(redirects, err_out)
651
652 def PopRedirects(self, num_redirects, err_out):
653 # type: (int, List[error.IOError_OSError]) -> None
654 if num_redirects == 0: # Optimized to avoid allocs
655 return
656 self.fd_state.Pop(err_out)
657
658 def PushProcessSub(self):
659 # type: () -> None
660 if len(self.clean_frame_pool):
661 # Optimized to avoid allocs
662 new_frame = self.clean_frame_pool.pop()
663 else:
664 new_frame = _ProcessSubFrame()
665 self.process_sub_stack.append(new_frame)
666
667 def PopProcessSub(self, compound_st):
668 # type: (StatusArray) -> None
669 """This method is called by a context manager, which means we always
670 wait() on the way out, which I think is the right thing.
671
672 We don't always set _process_sub_status, e.g. if some fatal
673 error occurs first, but we always wait.
674 """
675 frame = self.process_sub_stack.pop()
676 if frame.WasModified():
677 frame.MaybeWaitOnProcessSubs(self.waiter, compound_st)
678 else:
679 # Optimized to avoid allocs
680 self.clean_frame_pool.append(frame)
681
682 # Note: the 3 lists in _ProcessSubFrame are hot in our profiles. It would
683 # be nice to somehow "destroy" them here, rather than letting them become
684 # garbage that needs to be traced.
685
686 # The CommandEvaluator could have a ProcessSubStack, which supports Push(),
687 # Pop(), and Top() of VALUES rather than GC objects?