OILS / core / executor.py View on Github | oilshell.org

692 lines, 369 significant
1"""executor.py."""
2from __future__ import print_function
3
4from errno import EINTR
5
6from _devbuild.gen.id_kind_asdl import Id
7from _devbuild.gen.option_asdl import builtin_i
8from _devbuild.gen.runtime_asdl import RedirValue, trace
9from _devbuild.gen.syntax_asdl import (
10 command,
11 command_e,
12 CommandSub,
13 CompoundWord,
14 loc,
15 loc_t,
16)
17from _devbuild.gen.value_asdl import value
18from builtin import hay_ysh
19from core import dev
20from core import error
21from core import process
22from core.error import e_die, e_die_status
23from core import pyos
24from core import state
25from core import ui
26from core import vm
27from frontend import consts
28from frontend import lexer
29from mycpp.mylib import log
30
31import posix_ as posix
32
33from typing import cast, Dict, List, Optional, TYPE_CHECKING
34if TYPE_CHECKING:
35 from _devbuild.gen.runtime_asdl import (cmd_value, CommandStatus,
36 StatusArray)
37 from _devbuild.gen.syntax_asdl import command_t
38 from builtin import trap_osh
39 from core import optview
40 from core import state
41 from core.vm import _Builtin
42
43_ = log
44
45
46class _ProcessSubFrame(object):
47 """To keep track of diff <(cat 1) <(cat 2) > >(tac)"""
48
49 def __init__(self):
50 # type: () -> None
51
52 # These objects appear unconditionally in the main loop, and aren't
53 # commonly used, so we manually optimize [] into None.
54
55 self._to_wait = [] # type: List[process.Process]
56 self._to_close = [] # type: List[int] # file descriptors
57 self._locs = [] # type: List[loc_t]
58 self._modified = False
59
60 def WasModified(self):
61 # type: () -> bool
62 return self._modified
63
64 def Append(self, p, fd, status_loc):
65 # type: (process.Process, int, loc_t) -> None
66 self._modified = True
67
68 self._to_wait.append(p)
69 self._to_close.append(fd)
70 self._locs.append(status_loc)
71
72 def MaybeWaitOnProcessSubs(self, waiter, status_array):
73 # type: (process.Waiter, StatusArray) -> None
74
75 # Wait in the same order that they were evaluated. That seems fine.
76 for fd in self._to_close:
77 posix.close(fd)
78
79 codes = [] # type: List[int]
80 locs = [] # type: List[loc_t]
81 for i, p in enumerate(self._to_wait):
82 #log('waiting for %s', p)
83 st = p.Wait(waiter)
84 codes.append(st)
85 locs.append(self._locs[i])
86
87 status_array.codes = codes
88 status_array.locs = locs
89
90
91# Big flgas for RunSimpleCommand
92DO_FORK = 1 << 1
93NO_CALL_PROCS = 1 << 2 # command ls suppresses function lookup
94USE_DEFAULT_PATH = 1 << 3 # for command -p ls changes the path
95
96# Copied from var.c in dash
97DEFAULT_PATH = [
98 '/usr/local/sbin', '/usr/local/bin', '/usr/sbin', '/usr/bin', '/sbin',
99 '/bin'
100]
101
102
103class ShellExecutor(vm._Executor):
104 """An executor combined with the OSH language evaluators in osh/ to create
105 a shell interpreter."""
106
107 def __init__(
108 self,
109 mem, # type: state.Mem
110 exec_opts, # type: optview.Exec
111 mutable_opts, # type: state.MutableOpts
112 procs, # type: Dict[str, value.Proc]
113 hay_state, # type: hay_ysh.HayState
114 builtins, # type: Dict[int, _Builtin]
115 search_path, # type: state.SearchPath
116 ext_prog, # type: process.ExternalProgram
117 waiter, # type: process.Waiter
118 tracer, # type: dev.Tracer
119 job_control, # type: process.JobControl
120 job_list, # type: process.JobList
121 fd_state, # type: process.FdState
122 trap_state, # type: trap_osh.TrapState
123 errfmt # type: ui.ErrorFormatter
124 ):
125 # type: (...) -> None
126 vm._Executor.__init__(self)
127 self.mem = mem
128 self.exec_opts = exec_opts
129 self.mutable_opts = mutable_opts # for IsDisabled(), not mutating
130 self.procs = procs
131 self.hay_state = hay_state
132 self.builtins = builtins
133 self.search_path = search_path
134 self.ext_prog = ext_prog
135 self.waiter = waiter
136 self.tracer = tracer
137 self.multi_trace = tracer.multi_trace
138 self.job_control = job_control
139 # sleep 5 & puts a (PID, job#) entry here. And then "jobs" displays it.
140 self.job_list = job_list
141 self.fd_state = fd_state
142 self.trap_state = trap_state
143 self.errfmt = errfmt
144 self.process_sub_stack = [] # type: List[_ProcessSubFrame]
145 self.clean_frame_pool = [] # type: List[_ProcessSubFrame]
146
147 # When starting a pipeline in the foreground, we need to pass a handle to it
148 # through the evaluation of the last node back to ourselves for execution.
149 # We use this handle to make sure any processes forked for the last part of
150 # the pipeline are placed into the same process group as the rest of the
151 # pipeline. Since there is, by design, only ever one foreground pipeline and
152 # any pipelines started within subshells run in their parent's process
153 # group, we only need one pointer here, not some collection.
154 self.fg_pipeline = None # type: Optional[process.Pipeline]
155
156 def CheckCircularDeps(self):
157 # type: () -> None
158 assert self.cmd_ev is not None
159
160 def _MakeProcess(self, node, inherit_errexit=True):
161 # type: (command_t, bool) -> process.Process
162 """Assume we will run the node in another process.
163
164 Return a process.
165 """
166 UP_node = node
167 if node.tag() == command_e.ControlFlow:
168 node = cast(command.ControlFlow, UP_node)
169 # Pipeline or subshells with control flow are invalid, e.g.:
170 # - break | less
171 # - continue | less
172 # - ( return )
173 # NOTE: This could be done at parse time too.
174 if node.keyword.id != Id.ControlFlow_Exit:
175 e_die(
176 'Invalid control flow %r in pipeline / subshell / background'
177 % lexer.TokenVal(node.keyword), node.keyword)
178
179 # NOTE: If ErrExit(), we could be verbose about subprogram errors? This
180 # only really matters when executing 'exit 42', because the child shell
181 # inherits errexit and will be verbose. Other notes:
182 #
183 # - We might want errors to fit on a single line so they don't get #
184 # interleaved.
185 # - We could turn the `exit` builtin into a error.FatalRuntime exception
186 # and get this check for "free".
187 thunk = process.SubProgramThunk(self.cmd_ev,
188 node,
189 self.trap_state,
190 self.multi_trace,
191 inherit_errexit=inherit_errexit)
192 p = process.Process(thunk, self.job_control, self.job_list,
193 self.tracer)
194 return p
195
196 def RunBuiltin(self, builtin_id, cmd_val):
197 # type: (int, cmd_value.Argv) -> int
198 """Run a builtin.
199
200 Also called by the 'builtin' builtin.
201 """
202 self.tracer.OnBuiltin(builtin_id, cmd_val.argv)
203
204 builtin_func = self.builtins[builtin_id]
205
206 with vm.ctx_FlushStdout():
207 # note: could be second word, like 'builtin read'
208 with ui.ctx_Location(self.errfmt, cmd_val.arg_locs[0]):
209 try:
210 status = builtin_func.Run(cmd_val)
211 assert isinstance(status, int)
212 except error.Usage as e:
213 arg0 = cmd_val.argv[0]
214 # e.g. 'type' doesn't accept flag '-x'
215 self.errfmt.PrefixPrint(e.msg, '%r ' % arg0, e.location)
216 status = 2 # consistent error code for usage error
217
218 return status
219
220 def RunSimpleCommand(self, cmd_val, cmd_st, run_flags):
221 # type: (cmd_value.Argv, CommandStatus, int) -> int
222 """Run builtins, functions, external commands.
223
224 Possible variations:
225 - YSH might have different, simpler rules. No special builtins, etc.
226 - YSH might have OILS_PATH = :| /bin /usr/bin | or something.
227 - Interpreters might want to define all their own builtins.
228 """
229 argv = cmd_val.argv
230 if len(cmd_val.arg_locs):
231 arg0_loc = cmd_val.arg_locs[0] # type: loc_t
232 else:
233 arg0_loc = loc.Missing
234
235 # This happens when you write "$@" but have no arguments.
236 if len(argv) == 0:
237 if self.exec_opts.strict_argv():
238 e_die("Command evaluated to an empty argv array", arg0_loc)
239 else:
240 return 0 # status 0, or skip it?
241
242 arg0 = argv[0]
243
244 builtin_id = consts.LookupAssignBuiltin(arg0)
245 if builtin_id != consts.NO_INDEX:
246 # command readonly is disallowed, for technical reasons. Could relax it
247 # later.
248 self.errfmt.Print_("Can't run assignment builtin recursively",
249 arg0_loc)
250 return 1
251
252 builtin_id = consts.LookupSpecialBuiltin(arg0)
253 if builtin_id != consts.NO_INDEX:
254 cmd_st.show_code = True # this is a "leaf" for errors
255 status = self.RunBuiltin(builtin_id, cmd_val)
256 # TODO: Enable this and fix spec test failures.
257 # Also update _SPECIAL_BUILTINS in osh/builtin.py.
258 #if status != 0:
259 # e_die_status(status, 'special builtin failed')
260 return status
261
262 call_procs = not (run_flags & NO_CALL_PROCS)
263 # Builtins like 'true' can be redefined as functions.
264 if call_procs:
265 # TODO: Look shell functions in self.sh_funcs, but procs are
266 # value.Proc in the var namespace.
267 # Pitfall: What happens if there are two of the same name? I guess
268 # that's why you have = and 'type' inspect them
269
270 proc_node = self.procs.get(arg0)
271 if proc_node is not None:
272 if self.exec_opts.strict_errexit():
273 disabled_tok = self.mutable_opts.ErrExitDisabledToken()
274 if disabled_tok:
275 self.errfmt.Print_(
276 'errexit was disabled for this construct',
277 disabled_tok)
278 self.errfmt.StderrLine('')
279 e_die(
280 "Can't run a proc while errexit is disabled. "
281 "Use 'try' or wrap it in a process with $0 myproc",
282 arg0_loc)
283
284 with dev.ctx_Tracer(self.tracer, 'proc', argv):
285 # NOTE: Functions could call 'exit 42' directly, etc.
286 status = self.cmd_ev.RunProc(proc_node, cmd_val)
287 return status
288
289 # Notes:
290 # - procs shadow hay names
291 # - hay names shadow normal builtins? Should we limit to CAPS or no?
292 if self.hay_state.Resolve(arg0):
293 return self.RunBuiltin(builtin_i.haynode, cmd_val)
294
295 builtin_id = consts.LookupNormalBuiltin(arg0)
296
297 if self.exec_opts._running_hay():
298 # Hay: limit the builtins that can be run
299 # - declare 'use dialect'
300 # - echo and write for debugging
301 # - no JSON?
302 if builtin_id in (builtin_i.haynode, builtin_i.use, builtin_i.echo,
303 builtin_i.write):
304 cmd_st.show_code = True # this is a "leaf" for errors
305 return self.RunBuiltin(builtin_id, cmd_val)
306
307 self.errfmt.Print_('Unknown command %r while running hay' % arg0,
308 arg0_loc)
309 return 127
310
311 if builtin_id != consts.NO_INDEX:
312 cmd_st.show_code = True # this is a "leaf" for errors
313 return self.RunBuiltin(builtin_id, cmd_val)
314
315 environ = self.mem.GetExported() # Include temporary variables
316
317 if cmd_val.typed_args:
318 e_die(
319 '%r appears to be external. External commands don\'t accept typed args (OILS-ERR-200)'
320 % arg0, cmd_val.typed_args.left)
321
322 # Resolve argv[0] BEFORE forking.
323 if run_flags & USE_DEFAULT_PATH:
324 argv0_path = state.LookupExecutable(arg0, DEFAULT_PATH)
325 else:
326 argv0_path = self.search_path.CachedLookup(arg0)
327 if argv0_path is None:
328 self.errfmt.Print_('%r not found (OILS-ERR-100)' % arg0, arg0_loc)
329 return 127
330
331 # Normal case: ls /
332 if run_flags & DO_FORK:
333 thunk = process.ExternalThunk(self.ext_prog, argv0_path, cmd_val,
334 environ)
335 p = process.Process(thunk, self.job_control, self.job_list,
336 self.tracer)
337
338 if self.job_control.Enabled():
339 if self.fg_pipeline is not None:
340 pgid = self.fg_pipeline.ProcessGroupId()
341 # If job control is enabled, this should be true
342 assert pgid != process.INVALID_PGID
343
344 change = process.SetPgid(pgid, self.tracer)
345 self.fg_pipeline = None # clear to avoid confusion in subshells
346 else:
347 change = process.SetPgid(process.OWN_LEADER, self.tracer)
348 p.AddStateChange(change)
349
350 status = p.RunProcess(self.waiter, trace.External(cmd_val.argv))
351
352 # this is close to a "leaf" for errors
353 # problem: permission denied EACCESS prints duplicate messages
354 # TODO: add message command 'ls' failed
355 cmd_st.show_code = True
356
357 return status
358
359 self.tracer.OnExec(cmd_val.argv)
360
361 # Already forked for pipeline: ls / | wc -l
362 self.ext_prog.Exec(argv0_path, cmd_val, environ) # NEVER RETURNS
363
364 raise AssertionError('for -Wreturn-type in C++')
365
366 def RunBackgroundJob(self, node):
367 # type: (command_t) -> int
368 """For & etc."""
369 # Special case for pipeline. There is some evidence here:
370 # https://www.gnu.org/software/libc/manual/html_node/Launching-Jobs.html#Launching-Jobs
371 #
372 # "You can either make all the processes in the process group be children
373 # of the shell process, or you can make one process in group be the
374 # ancestor of all the other processes in that group. The sample shell
375 # program presented in this chapter uses the first approach because it
376 # makes bookkeeping somewhat simpler."
377 UP_node = node
378
379 if UP_node.tag() == command_e.Pipeline:
380 node = cast(command.Pipeline, UP_node)
381 pi = process.Pipeline(self.exec_opts.sigpipe_status_ok(),
382 self.job_control, self.job_list, self.tracer)
383 for child in node.children:
384 p = self._MakeProcess(child)
385 p.Init_ParentPipeline(pi)
386 pi.Add(p)
387
388 pi.StartPipeline(self.waiter)
389 pi.SetBackground()
390 last_pid = pi.LastPid()
391 self.mem.last_bg_pid = last_pid # for $!
392
393 self.job_list.AddJob(pi) # show in 'jobs' list
394
395 else:
396 # Problem: to get the 'set -b' behavior of immediate notifications, we
397 # have to register SIGCHLD. But then that introduces race conditions.
398 # If we haven't called Register yet, then we won't know who to notify.
399
400 p = self._MakeProcess(node)
401 if self.job_control.Enabled():
402 p.AddStateChange(
403 process.SetPgid(process.OWN_LEADER, self.tracer))
404
405 p.SetBackground()
406 pid = p.StartProcess(trace.Fork)
407 self.mem.last_bg_pid = pid # for $!
408 self.job_list.AddJob(p) # show in 'jobs' list
409 return 0
410
411 def RunPipeline(self, node, status_out):
412 # type: (command.Pipeline, CommandStatus) -> None
413
414 pi = process.Pipeline(self.exec_opts.sigpipe_status_ok(),
415 self.job_control, self.job_list, self.tracer)
416
417 # initialized with CommandStatus.CreateNull()
418 pipe_locs = [] # type: List[loc_t]
419
420 # First n-1 processes (which is empty when n == 1)
421 n = len(node.children)
422 for i in xrange(n - 1):
423 child = node.children[i]
424
425 # TODO: determine these locations at parse time?
426 pipe_locs.append(loc.Command(child))
427
428 p = self._MakeProcess(child)
429 p.Init_ParentPipeline(pi)
430 pi.Add(p)
431
432 last_child = node.children[n - 1]
433 # Last piece of code is in THIS PROCESS. 'echo foo | read line; echo $line'
434 pi.AddLast((self.cmd_ev, last_child))
435 pipe_locs.append(loc.Command(last_child))
436
437 with dev.ctx_Tracer(self.tracer, 'pipeline', None):
438 pi.StartPipeline(self.waiter)
439 self.fg_pipeline = pi
440 status_out.pipe_status = pi.RunLastPart(self.waiter, self.fd_state)
441 self.fg_pipeline = None # clear in case we didn't end up forking
442
443 status_out.pipe_locs = pipe_locs
444
445 def RunSubshell(self, node):
446 # type: (command_t) -> int
447 p = self._MakeProcess(node)
448 if self.job_control.Enabled():
449 p.AddStateChange(process.SetPgid(process.OWN_LEADER, self.tracer))
450
451 return p.RunProcess(self.waiter, trace.ForkWait)
452
453 def RunCommandSub(self, cs_part):
454 # type: (CommandSub) -> str
455
456 if not self.exec_opts._allow_command_sub():
457 # _allow_command_sub is used in two places. Only one of them turns off _allow_process_sub
458 if not self.exec_opts._allow_process_sub():
459 why = "status wouldn't be checked (strict_errexit)"
460 else:
461 why = 'eval_unsafe_arith is off'
462
463 e_die("Command subs not allowed here because %s" % why,
464 loc.WordPart(cs_part))
465
466 node = cs_part.child
467
468 # Hack for weird $(<file) construct
469 if node.tag() == command_e.Simple:
470 simple = cast(command.Simple, node)
471 # Detect '< file'
472 if (len(simple.words) == 0 and len(simple.redirects) == 1 and
473 simple.redirects[0].op.id == Id.Redir_Less):
474 # change it to __cat < file
475 # TODO: change to 'internal cat' (issue 1013)
476 tok = lexer.DummyToken(Id.Lit_Chars, '__cat')
477 cat_word = CompoundWord([tok])
478 # MUTATE the command.Simple node. This will only be done the first
479 # time in the parent process.
480 simple.words.append(cat_word)
481
482 p = self._MakeProcess(node,
483 inherit_errexit=self.exec_opts.inherit_errexit())
484 # Shell quirk: Command subs remain part of the shell's process group, so we
485 # don't use p.AddStateChange(process.SetPgid(...))
486
487 r, w = posix.pipe()
488 p.AddStateChange(process.StdoutToPipe(r, w))
489
490 p.StartProcess(trace.CommandSub)
491 #log('Command sub started %d', pid)
492
493 chunks = [] # type: List[str]
494 posix.close(w) # not going to write
495 while True:
496 n, err_num = pyos.Read(r, 4096, chunks)
497
498 if n < 0:
499 if err_num == EINTR:
500 pass # retry
501 else:
502 # Like the top level IOError handler
503 e_die_status(
504 2,
505 'osh I/O error (read): %s' % posix.strerror(err_num))
506
507 elif n == 0: # EOF
508 break
509 posix.close(r)
510
511 status = p.Wait(self.waiter)
512
513 # OSH has the concept of aborting in the middle of a WORD. We're not
514 # waiting until the command is over!
515 if self.exec_opts.command_sub_errexit():
516 if status != 0:
517 msg = 'Command Sub exited with status %d' % status
518 raise error.ErrExit(status, msg, loc.WordPart(cs_part))
519
520 else:
521 # Set a flag so we check errexit at the same time as bash. Example:
522 #
523 # a=$(false)
524 # echo foo # no matter what comes here, the flag is reset
525 #
526 # Set ONLY until this command node has finished executing.
527
528 # HACK: move this
529 self.cmd_ev.check_command_sub_status = True
530 self.mem.SetLastStatus(status)
531
532 # Runtime errors test case: # $("echo foo > $@")
533 # Why rstrip()?
534 # https://unix.stackexchange.com/questions/17747/why-does-shell-command-substitution-gobble-up-a-trailing-newline-char
535 return ''.join(chunks).rstrip('\n')
536
537 def RunProcessSub(self, cs_part):
538 # type: (CommandSub) -> str
539 """Process sub creates a forks a process connected to a pipe.
540
541 The pipe is typically passed to another process via a /dev/fd/$FD path.
542
543 Life cycle of a process substitution:
544
545 1. Start with this code
546
547 diff <(seq 3) <(seq 4)
548
549 2. To evaluate the command line, we evaluate every word. The
550 NormalWordEvaluator this method, RunProcessSub(), which does 3 things:
551
552 a. Create a pipe(), getting r and w
553 b. Starts the seq process, which inherits r and w
554 It has a StdoutToPipe() redirect, which means that it dup2(w, 1)
555 and close(r)
556 c. Close the w FD, because neither the shell or 'diff' will write to it.
557 However we must retain 'r', because 'diff' hasn't opened /dev/fd yet!
558 d. We evaluate <(seq 3) to /dev/fd/$r, so "diff" can read from it
559
560 3. Now we're done evaluating every word, so we know the command line of
561 diff, which looks like
562
563 diff /dev/fd/64 /dev/fd/65
564
565 Those are the FDs for the read ends of the pipes we created.
566
567 4. diff inherits a copy of the read end of bot pipes. But it actually
568 calls open() both files passed as argv. (I think this is fine.)
569
570 5. wait() for the diff process.
571
572 6. The shell closes both the read ends of both pipes. Neither us or
573 'diffd' will read again.
574
575 7. The shell waits for both 'seq' processes.
576
577 Related:
578 shopt -s process_sub_fail
579 _process_sub_status
580 """
581 cs_loc = loc.WordPart(cs_part)
582
583 if not self.exec_opts._allow_process_sub():
584 e_die(
585 "Process subs not allowed here because status wouldn't be checked (strict_errexit)",
586 cs_loc)
587
588 p = self._MakeProcess(cs_part.child)
589
590 r, w = posix.pipe()
591 #log('pipe = %d, %d', r, w)
592
593 op_id = cs_part.left_token.id
594 if op_id == Id.Left_ProcSubIn:
595 # Example: cat < <(head foo.txt)
596 #
597 # The head process should write its stdout to a pipe.
598 redir = process.StdoutToPipe(r,
599 w) # type: process.ChildStateChange
600
601 elif op_id == Id.Left_ProcSubOut:
602 # Example: head foo.txt > >(tac)
603 #
604 # The tac process should read its stdin from a pipe.
605
606 # Note: this example sometimes requires you to hit "enter" in bash and
607 # zsh. WHy?
608 redir = process.StdinFromPipe(r, w)
609
610 else:
611 raise AssertionError()
612
613 p.AddStateChange(redir)
614
615 if self.job_control.Enabled():
616 p.AddStateChange(process.SetPgid(process.OWN_LEADER, self.tracer))
617
618 # Fork, letting the child inherit the pipe file descriptors.
619 p.StartProcess(trace.ProcessSub)
620
621 ps_frame = self.process_sub_stack[-1]
622
623 # Note: bash never waits() on the process, but zsh does. The calling
624 # program needs to read() before we can wait, e.g.
625 # diff <(sort left.txt) <(sort right.txt)
626
627 # After forking, close the end of the pipe we're not using.
628 if op_id == Id.Left_ProcSubIn:
629 posix.close(w) # cat < <(head foo.txt)
630 ps_frame.Append(p, r, cs_loc) # close later
631 elif op_id == Id.Left_ProcSubOut:
632 posix.close(r)
633 #log('Left_ProcSubOut closed %d', r)
634 ps_frame.Append(p, w, cs_loc) # close later
635 else:
636 raise AssertionError()
637
638 # Is /dev Linux-specific?
639 if op_id == Id.Left_ProcSubIn:
640 return '/dev/fd/%d' % r
641
642 elif op_id == Id.Left_ProcSubOut:
643 return '/dev/fd/%d' % w
644
645 else:
646 raise AssertionError()
647
648 def PushRedirects(self, redirects, err_out):
649 # type: (List[RedirValue], List[error.IOError_OSError]) -> None
650 if len(redirects) == 0: # Optimized to avoid allocs
651 return
652 self.fd_state.Push(redirects, err_out)
653
654 def PopRedirects(self, num_redirects, err_out):
655 # type: (int, List[error.IOError_OSError]) -> None
656 if num_redirects == 0: # Optimized to avoid allocs
657 return
658 self.fd_state.Pop(err_out)
659
660 def PushProcessSub(self):
661 # type: () -> None
662 if len(self.clean_frame_pool):
663 # Optimized to avoid allocs
664 new_frame = self.clean_frame_pool.pop()
665 else:
666 new_frame = _ProcessSubFrame()
667 self.process_sub_stack.append(new_frame)
668
669 def PopProcessSub(self, compound_st):
670 # type: (StatusArray) -> None
671 """This method is called by a context manager, which means we always
672 wait() on the way out, which I think is the right thing.
673
674 We don't always set _process_sub_status, e.g. if some fatal
675 error occurs first, but we always wait.
676 """
677 frame = self.process_sub_stack.pop()
678 if frame.WasModified():
679 frame.MaybeWaitOnProcessSubs(self.waiter, compound_st)
680 else:
681 # Optimized to avoid allocs
682 self.clean_frame_pool.append(frame)
683
684 # Note: the 3 lists in _ProcessSubFrame are hot in our profiles. It would
685 # be nice to somehow "destroy" them here, rather than letting them become
686 # garbage that needs to be traced.
687
688 # The CommandEvaluator could have a ProcessSubStack, which supports Push(),
689 # Pop(), and Top() of VALUES rather than GC objects?
690
691
692# vim: sw=4