OILS / core / executor.py View on Github | oilshell.org

707 lines, 383 significant
1"""executor.py."""
2from __future__ import print_function
3
4from errno import EINTR
5
6from _devbuild.gen.id_kind_asdl import Id
7from _devbuild.gen.option_asdl import builtin_i
8from _devbuild.gen.runtime_asdl import RedirValue, trace
9from _devbuild.gen.syntax_asdl import (
10 command,
11 command_e,
12 CommandSub,
13 CompoundWord,
14 loc,
15 loc_t,
16)
17from _devbuild.gen.value_asdl import value
18from builtin import hay_ysh
19from core import dev
20from core import error
21from core import process
22from core.error import e_die, e_die_status
23from core import pyos
24from core import pyutil
25from core import state
26from core import ui
27from core import vm
28from frontend import consts
29from frontend import lexer
30from mycpp.mylib import log
31
32import posix_ as posix
33
34from typing import cast, Dict, List, Optional, TYPE_CHECKING
35if TYPE_CHECKING:
36 from _devbuild.gen.runtime_asdl import (cmd_value, CommandStatus,
37 StatusArray)
38 from _devbuild.gen.syntax_asdl import command_t
39 from builtin import trap_osh
40 from core import optview
41 from core import state
42 from core.vm import _Builtin
43
44_ = log
45
46
47class _ProcessSubFrame(object):
48 """To keep track of diff <(cat 1) <(cat 2) > >(tac)"""
49
50 def __init__(self):
51 # type: () -> None
52
53 # These objects appear unconditionally in the main loop, and aren't
54 # commonly used, so we manually optimize [] into None.
55
56 self._to_wait = [] # type: List[process.Process]
57 self._to_close = [] # type: List[int] # file descriptors
58 self._locs = [] # type: List[loc_t]
59 self._modified = False
60
61 def WasModified(self):
62 # type: () -> bool
63 return self._modified
64
65 def Append(self, p, fd, status_loc):
66 # type: (process.Process, int, loc_t) -> None
67 self._modified = True
68
69 self._to_wait.append(p)
70 self._to_close.append(fd)
71 self._locs.append(status_loc)
72
73 def MaybeWaitOnProcessSubs(self, waiter, status_array):
74 # type: (process.Waiter, StatusArray) -> None
75
76 # Wait in the same order that they were evaluated. That seems fine.
77 for fd in self._to_close:
78 posix.close(fd)
79
80 codes = [] # type: List[int]
81 locs = [] # type: List[loc_t]
82 for i, p in enumerate(self._to_wait):
83 #log('waiting for %s', p)
84 st = p.Wait(waiter)
85 codes.append(st)
86 locs.append(self._locs[i])
87
88 status_array.codes = codes
89 status_array.locs = locs
90
91
92# Big flgas for RunSimpleCommand
93DO_FORK = 1 << 1
94NO_CALL_PROCS = 1 << 2 # command ls suppresses function lookup
95USE_DEFAULT_PATH = 1 << 3 # for command -p ls changes the path
96
97# Copied from var.c in dash
98DEFAULT_PATH = [
99 '/usr/local/sbin', '/usr/local/bin', '/usr/sbin', '/usr/bin', '/sbin',
100 '/bin'
101]
102
103
104class ShellExecutor(vm._Executor):
105 """An executor combined with the OSH language evaluators in osh/ to create
106 a shell interpreter."""
107
108 def __init__(
109 self,
110 mem, # type: state.Mem
111 exec_opts, # type: optview.Exec
112 mutable_opts, # type: state.MutableOpts
113 procs, # type: Dict[str, value.Proc]
114 hay_state, # type: hay_ysh.HayState
115 builtins, # type: Dict[int, _Builtin]
116 search_path, # type: state.SearchPath
117 ext_prog, # type: process.ExternalProgram
118 waiter, # type: process.Waiter
119 tracer, # type: dev.Tracer
120 job_control, # type: process.JobControl
121 job_list, # type: process.JobList
122 fd_state, # type: process.FdState
123 trap_state, # type: trap_osh.TrapState
124 errfmt # type: ui.ErrorFormatter
125 ):
126 # type: (...) -> None
127 vm._Executor.__init__(self)
128 self.mem = mem
129 self.exec_opts = exec_opts
130 self.mutable_opts = mutable_opts # for IsDisabled(), not mutating
131 self.procs = procs
132 self.hay_state = hay_state
133 self.builtins = builtins
134 self.search_path = search_path
135 self.ext_prog = ext_prog
136 self.waiter = waiter
137 self.tracer = tracer
138 self.multi_trace = tracer.multi_trace
139 self.job_control = job_control
140 # sleep 5 & puts a (PID, job#) entry here. And then "jobs" displays it.
141 self.job_list = job_list
142 self.fd_state = fd_state
143 self.trap_state = trap_state
144 self.errfmt = errfmt
145 self.process_sub_stack = [] # type: List[_ProcessSubFrame]
146 self.clean_frame_pool = [] # type: List[_ProcessSubFrame]
147
148 # When starting a pipeline in the foreground, we need to pass a handle to it
149 # through the evaluation of the last node back to ourselves for execution.
150 # We use this handle to make sure any processes forked for the last part of
151 # the pipeline are placed into the same process group as the rest of the
152 # pipeline. Since there is, by design, only ever one foreground pipeline and
153 # any pipelines started within subshells run in their parent's process
154 # group, we only need one pointer here, not some collection.
155 self.fg_pipeline = None # type: Optional[process.Pipeline]
156
157 def CheckCircularDeps(self):
158 # type: () -> None
159 assert self.cmd_ev is not None
160
161 def _MakeProcess(self, node, inherit_errexit=True):
162 # type: (command_t, bool) -> process.Process
163 """Assume we will run the node in another process.
164
165 Return a process.
166 """
167 UP_node = node
168 if node.tag() == command_e.ControlFlow:
169 node = cast(command.ControlFlow, UP_node)
170 # Pipeline or subshells with control flow are invalid, e.g.:
171 # - break | less
172 # - continue | less
173 # - ( return )
174 # NOTE: This could be done at parse time too.
175 if node.keyword.id != Id.ControlFlow_Exit:
176 e_die(
177 'Invalid control flow %r in pipeline / subshell / background'
178 % lexer.TokenVal(node.keyword), node.keyword)
179
180 # NOTE: If ErrExit(), we could be verbose about subprogram errors? This
181 # only really matters when executing 'exit 42', because the child shell
182 # inherits errexit and will be verbose. Other notes:
183 #
184 # - We might want errors to fit on a single line so they don't get #
185 # interleaved.
186 # - We could turn the `exit` builtin into a error.FatalRuntime exception
187 # and get this check for "free".
188 thunk = process.SubProgramThunk(self.cmd_ev,
189 node,
190 self.trap_state,
191 self.multi_trace,
192 inherit_errexit=inherit_errexit)
193 p = process.Process(thunk, self.job_control, self.job_list,
194 self.tracer)
195 return p
196
197 def RunBuiltin(self, builtin_id, cmd_val):
198 # type: (int, cmd_value.Argv) -> int
199 """Run a builtin.
200
201 Also called by the 'builtin' builtin.
202 """
203 self.tracer.OnBuiltin(builtin_id, cmd_val.argv)
204
205 builtin_func = self.builtins[builtin_id]
206
207 io_errors = [] # type: List[error.IOError_OSError]
208 with vm.ctx_FlushStdout(io_errors):
209 # note: could be second word, like 'builtin read'
210 with ui.ctx_Location(self.errfmt, cmd_val.arg_locs[0]):
211 try:
212 status = builtin_func.Run(cmd_val)
213 assert isinstance(status, int)
214 except (IOError, OSError) as e:
215 self.errfmt.PrintMessage(
216 '%s builtin I/O error: %s' %
217 (cmd_val.argv[0], pyutil.strerror(e)),
218 cmd_val.arg_locs[0])
219 return 1
220 except error.Usage as e:
221 arg0 = cmd_val.argv[0]
222 # e.g. 'type' doesn't accept flag '-x'
223 self.errfmt.PrefixPrint(e.msg, '%r ' % arg0, e.location)
224 return 2 # consistent error code for usage error
225
226 if len(io_errors): # e.g. disk full, ulimit
227 self.errfmt.PrintMessage(
228 '%s builtin I/O error: %s' %
229 (cmd_val.argv[0], pyutil.strerror(io_errors[0])),
230 cmd_val.arg_locs[0])
231 return 1
232
233 return status
234
235 def RunSimpleCommand(self, cmd_val, cmd_st, run_flags):
236 # type: (cmd_value.Argv, CommandStatus, int) -> int
237 """Run builtins, functions, external commands.
238
239 Possible variations:
240 - YSH might have different, simpler rules. No special builtins, etc.
241 - YSH might have OILS_PATH = :| /bin /usr/bin | or something.
242 - Interpreters might want to define all their own builtins.
243 """
244 argv = cmd_val.argv
245 if len(cmd_val.arg_locs):
246 arg0_loc = cmd_val.arg_locs[0] # type: loc_t
247 else:
248 arg0_loc = loc.Missing
249
250 # This happens when you write "$@" but have no arguments.
251 if len(argv) == 0:
252 if self.exec_opts.strict_argv():
253 e_die("Command evaluated to an empty argv array", arg0_loc)
254 else:
255 return 0 # status 0, or skip it?
256
257 arg0 = argv[0]
258
259 builtin_id = consts.LookupAssignBuiltin(arg0)
260 if builtin_id != consts.NO_INDEX:
261 # command readonly is disallowed, for technical reasons. Could relax it
262 # later.
263 self.errfmt.Print_("Can't run assignment builtin recursively",
264 arg0_loc)
265 return 1
266
267 builtin_id = consts.LookupSpecialBuiltin(arg0)
268 if builtin_id != consts.NO_INDEX:
269 cmd_st.show_code = True # this is a "leaf" for errors
270 status = self.RunBuiltin(builtin_id, cmd_val)
271 # TODO: Enable this and fix spec test failures.
272 # Also update _SPECIAL_BUILTINS in osh/builtin.py.
273 #if status != 0:
274 # e_die_status(status, 'special builtin failed')
275 return status
276
277 call_procs = not (run_flags & NO_CALL_PROCS)
278 # Builtins like 'true' can be redefined as functions.
279 if call_procs:
280 # TODO: Look shell functions in self.sh_funcs, but procs are
281 # value.Proc in the var namespace.
282 # Pitfall: What happens if there are two of the same name? I guess
283 # that's why you have = and 'type' inspect them
284
285 proc_node = self.procs.get(arg0)
286 if proc_node is not None:
287 if self.exec_opts.strict_errexit():
288 disabled_tok = self.mutable_opts.ErrExitDisabledToken()
289 if disabled_tok:
290 self.errfmt.Print_(
291 'errexit was disabled for this construct',
292 disabled_tok)
293 self.errfmt.StderrLine('')
294 e_die(
295 "Can't run a proc while errexit is disabled. "
296 "Use 'try' or wrap it in a process with $0 myproc",
297 arg0_loc)
298
299 with dev.ctx_Tracer(self.tracer, 'proc', argv):
300 # NOTE: Functions could call 'exit 42' directly, etc.
301 status = self.cmd_ev.RunProc(proc_node, cmd_val)
302 return status
303
304 # Notes:
305 # - procs shadow hay names
306 # - hay names shadow normal builtins? Should we limit to CAPS or no?
307 if self.hay_state.Resolve(arg0):
308 return self.RunBuiltin(builtin_i.haynode, cmd_val)
309
310 builtin_id = consts.LookupNormalBuiltin(arg0)
311
312 if self.exec_opts._running_hay():
313 # Hay: limit the builtins that can be run
314 # - declare 'use dialect'
315 # - echo and write for debugging
316 # - no JSON?
317 if builtin_id in (builtin_i.haynode, builtin_i.use, builtin_i.echo,
318 builtin_i.write):
319 cmd_st.show_code = True # this is a "leaf" for errors
320 return self.RunBuiltin(builtin_id, cmd_val)
321
322 self.errfmt.Print_('Unknown command %r while running hay' % arg0,
323 arg0_loc)
324 return 127
325
326 if builtin_id != consts.NO_INDEX:
327 cmd_st.show_code = True # this is a "leaf" for errors
328 return self.RunBuiltin(builtin_id, cmd_val)
329
330 environ = self.mem.GetExported() # Include temporary variables
331
332 if cmd_val.typed_args:
333 e_die(
334 '%r appears to be external. External commands don\'t accept typed args (OILS-ERR-200)'
335 % arg0, cmd_val.typed_args.left)
336
337 # Resolve argv[0] BEFORE forking.
338 if run_flags & USE_DEFAULT_PATH:
339 argv0_path = state.LookupExecutable(arg0, DEFAULT_PATH)
340 else:
341 argv0_path = self.search_path.CachedLookup(arg0)
342 if argv0_path is None:
343 self.errfmt.Print_('%r not found (OILS-ERR-100)' % arg0, arg0_loc)
344 return 127
345
346 # Normal case: ls /
347 if run_flags & DO_FORK:
348 thunk = process.ExternalThunk(self.ext_prog, argv0_path, cmd_val,
349 environ)
350 p = process.Process(thunk, self.job_control, self.job_list,
351 self.tracer)
352
353 if self.job_control.Enabled():
354 if self.fg_pipeline is not None:
355 pgid = self.fg_pipeline.ProcessGroupId()
356 # If job control is enabled, this should be true
357 assert pgid != process.INVALID_PGID
358
359 change = process.SetPgid(pgid, self.tracer)
360 self.fg_pipeline = None # clear to avoid confusion in subshells
361 else:
362 change = process.SetPgid(process.OWN_LEADER, self.tracer)
363 p.AddStateChange(change)
364
365 status = p.RunProcess(self.waiter, trace.External(cmd_val.argv))
366
367 # this is close to a "leaf" for errors
368 # problem: permission denied EACCESS prints duplicate messages
369 # TODO: add message command 'ls' failed
370 cmd_st.show_code = True
371
372 return status
373
374 self.tracer.OnExec(cmd_val.argv)
375
376 # Already forked for pipeline: ls / | wc -l
377 self.ext_prog.Exec(argv0_path, cmd_val, environ) # NEVER RETURNS
378
379 raise AssertionError('for -Wreturn-type in C++')
380
381 def RunBackgroundJob(self, node):
382 # type: (command_t) -> int
383 """For & etc."""
384 # Special case for pipeline. There is some evidence here:
385 # https://www.gnu.org/software/libc/manual/html_node/Launching-Jobs.html#Launching-Jobs
386 #
387 # "You can either make all the processes in the process group be children
388 # of the shell process, or you can make one process in group be the
389 # ancestor of all the other processes in that group. The sample shell
390 # program presented in this chapter uses the first approach because it
391 # makes bookkeeping somewhat simpler."
392 UP_node = node
393
394 if UP_node.tag() == command_e.Pipeline:
395 node = cast(command.Pipeline, UP_node)
396 pi = process.Pipeline(self.exec_opts.sigpipe_status_ok(),
397 self.job_control, self.job_list, self.tracer)
398 for child in node.children:
399 p = self._MakeProcess(child)
400 p.Init_ParentPipeline(pi)
401 pi.Add(p)
402
403 pi.StartPipeline(self.waiter)
404 pi.SetBackground()
405 last_pid = pi.LastPid()
406 self.mem.last_bg_pid = last_pid # for $!
407
408 self.job_list.AddJob(pi) # show in 'jobs' list
409
410 else:
411 # Problem: to get the 'set -b' behavior of immediate notifications, we
412 # have to register SIGCHLD. But then that introduces race conditions.
413 # If we haven't called Register yet, then we won't know who to notify.
414
415 p = self._MakeProcess(node)
416 if self.job_control.Enabled():
417 p.AddStateChange(
418 process.SetPgid(process.OWN_LEADER, self.tracer))
419
420 p.SetBackground()
421 pid = p.StartProcess(trace.Fork)
422 self.mem.last_bg_pid = pid # for $!
423 self.job_list.AddJob(p) # show in 'jobs' list
424 return 0
425
426 def RunPipeline(self, node, status_out):
427 # type: (command.Pipeline, CommandStatus) -> None
428
429 pi = process.Pipeline(self.exec_opts.sigpipe_status_ok(),
430 self.job_control, self.job_list, self.tracer)
431
432 # initialized with CommandStatus.CreateNull()
433 pipe_locs = [] # type: List[loc_t]
434
435 # First n-1 processes (which is empty when n == 1)
436 n = len(node.children)
437 for i in xrange(n - 1):
438 child = node.children[i]
439
440 # TODO: determine these locations at parse time?
441 pipe_locs.append(loc.Command(child))
442
443 p = self._MakeProcess(child)
444 p.Init_ParentPipeline(pi)
445 pi.Add(p)
446
447 last_child = node.children[n - 1]
448 # Last piece of code is in THIS PROCESS. 'echo foo | read line; echo $line'
449 pi.AddLast((self.cmd_ev, last_child))
450 pipe_locs.append(loc.Command(last_child))
451
452 with dev.ctx_Tracer(self.tracer, 'pipeline', None):
453 pi.StartPipeline(self.waiter)
454 self.fg_pipeline = pi
455 status_out.pipe_status = pi.RunLastPart(self.waiter, self.fd_state)
456 self.fg_pipeline = None # clear in case we didn't end up forking
457
458 status_out.pipe_locs = pipe_locs
459
460 def RunSubshell(self, node):
461 # type: (command_t) -> int
462 p = self._MakeProcess(node)
463 if self.job_control.Enabled():
464 p.AddStateChange(process.SetPgid(process.OWN_LEADER, self.tracer))
465
466 return p.RunProcess(self.waiter, trace.ForkWait)
467
468 def RunCommandSub(self, cs_part):
469 # type: (CommandSub) -> str
470
471 if not self.exec_opts._allow_command_sub():
472 # _allow_command_sub is used in two places. Only one of them turns off _allow_process_sub
473 if not self.exec_opts._allow_process_sub():
474 why = "status wouldn't be checked (strict_errexit)"
475 else:
476 why = 'eval_unsafe_arith is off'
477
478 e_die("Command subs not allowed here because %s" % why,
479 loc.WordPart(cs_part))
480
481 node = cs_part.child
482
483 # Hack for weird $(<file) construct
484 if node.tag() == command_e.Simple:
485 simple = cast(command.Simple, node)
486 # Detect '< file'
487 if (len(simple.words) == 0 and len(simple.redirects) == 1 and
488 simple.redirects[0].op.id == Id.Redir_Less):
489 # change it to __cat < file
490 # TODO: change to 'internal cat' (issue 1013)
491 tok = lexer.DummyToken(Id.Lit_Chars, '__cat')
492 cat_word = CompoundWord([tok])
493 # MUTATE the command.Simple node. This will only be done the first
494 # time in the parent process.
495 simple.words.append(cat_word)
496
497 p = self._MakeProcess(node,
498 inherit_errexit=self.exec_opts.inherit_errexit())
499 # Shell quirk: Command subs remain part of the shell's process group, so we
500 # don't use p.AddStateChange(process.SetPgid(...))
501
502 r, w = posix.pipe()
503 p.AddStateChange(process.StdoutToPipe(r, w))
504
505 p.StartProcess(trace.CommandSub)
506 #log('Command sub started %d', pid)
507
508 chunks = [] # type: List[str]
509 posix.close(w) # not going to write
510 while True:
511 n, err_num = pyos.Read(r, 4096, chunks)
512
513 if n < 0:
514 if err_num == EINTR:
515 pass # retry
516 else:
517 # Like the top level IOError handler
518 e_die_status(
519 2,
520 'osh I/O error (read): %s' % posix.strerror(err_num))
521
522 elif n == 0: # EOF
523 break
524 posix.close(r)
525
526 status = p.Wait(self.waiter)
527
528 # OSH has the concept of aborting in the middle of a WORD. We're not
529 # waiting until the command is over!
530 if self.exec_opts.command_sub_errexit():
531 if status != 0:
532 msg = 'Command Sub exited with status %d' % status
533 raise error.ErrExit(status, msg, loc.WordPart(cs_part))
534
535 else:
536 # Set a flag so we check errexit at the same time as bash. Example:
537 #
538 # a=$(false)
539 # echo foo # no matter what comes here, the flag is reset
540 #
541 # Set ONLY until this command node has finished executing.
542
543 # HACK: move this
544 self.cmd_ev.check_command_sub_status = True
545 self.mem.SetLastStatus(status)
546
547 # Runtime errors test case: # $("echo foo > $@")
548 # Why rstrip()?
549 # https://unix.stackexchange.com/questions/17747/why-does-shell-command-substitution-gobble-up-a-trailing-newline-char
550 return ''.join(chunks).rstrip('\n')
551
552 def RunProcessSub(self, cs_part):
553 # type: (CommandSub) -> str
554 """Process sub creates a forks a process connected to a pipe.
555
556 The pipe is typically passed to another process via a /dev/fd/$FD path.
557
558 Life cycle of a process substitution:
559
560 1. Start with this code
561
562 diff <(seq 3) <(seq 4)
563
564 2. To evaluate the command line, we evaluate every word. The
565 NormalWordEvaluator this method, RunProcessSub(), which does 3 things:
566
567 a. Create a pipe(), getting r and w
568 b. Starts the seq process, which inherits r and w
569 It has a StdoutToPipe() redirect, which means that it dup2(w, 1)
570 and close(r)
571 c. Close the w FD, because neither the shell or 'diff' will write to it.
572 However we must retain 'r', because 'diff' hasn't opened /dev/fd yet!
573 d. We evaluate <(seq 3) to /dev/fd/$r, so "diff" can read from it
574
575 3. Now we're done evaluating every word, so we know the command line of
576 diff, which looks like
577
578 diff /dev/fd/64 /dev/fd/65
579
580 Those are the FDs for the read ends of the pipes we created.
581
582 4. diff inherits a copy of the read end of bot pipes. But it actually
583 calls open() both files passed as argv. (I think this is fine.)
584
585 5. wait() for the diff process.
586
587 6. The shell closes both the read ends of both pipes. Neither us or
588 'diffd' will read again.
589
590 7. The shell waits for both 'seq' processes.
591
592 Related:
593 shopt -s process_sub_fail
594 _process_sub_status
595 """
596 cs_loc = loc.WordPart(cs_part)
597
598 if not self.exec_opts._allow_process_sub():
599 e_die(
600 "Process subs not allowed here because status wouldn't be checked (strict_errexit)",
601 cs_loc)
602
603 p = self._MakeProcess(cs_part.child)
604
605 r, w = posix.pipe()
606 #log('pipe = %d, %d', r, w)
607
608 op_id = cs_part.left_token.id
609 if op_id == Id.Left_ProcSubIn:
610 # Example: cat < <(head foo.txt)
611 #
612 # The head process should write its stdout to a pipe.
613 redir = process.StdoutToPipe(r,
614 w) # type: process.ChildStateChange
615
616 elif op_id == Id.Left_ProcSubOut:
617 # Example: head foo.txt > >(tac)
618 #
619 # The tac process should read its stdin from a pipe.
620
621 # Note: this example sometimes requires you to hit "enter" in bash and
622 # zsh. WHy?
623 redir = process.StdinFromPipe(r, w)
624
625 else:
626 raise AssertionError()
627
628 p.AddStateChange(redir)
629
630 if self.job_control.Enabled():
631 p.AddStateChange(process.SetPgid(process.OWN_LEADER, self.tracer))
632
633 # Fork, letting the child inherit the pipe file descriptors.
634 p.StartProcess(trace.ProcessSub)
635
636 ps_frame = self.process_sub_stack[-1]
637
638 # Note: bash never waits() on the process, but zsh does. The calling
639 # program needs to read() before we can wait, e.g.
640 # diff <(sort left.txt) <(sort right.txt)
641
642 # After forking, close the end of the pipe we're not using.
643 if op_id == Id.Left_ProcSubIn:
644 posix.close(w) # cat < <(head foo.txt)
645 ps_frame.Append(p, r, cs_loc) # close later
646 elif op_id == Id.Left_ProcSubOut:
647 posix.close(r)
648 #log('Left_ProcSubOut closed %d', r)
649 ps_frame.Append(p, w, cs_loc) # close later
650 else:
651 raise AssertionError()
652
653 # Is /dev Linux-specific?
654 if op_id == Id.Left_ProcSubIn:
655 return '/dev/fd/%d' % r
656
657 elif op_id == Id.Left_ProcSubOut:
658 return '/dev/fd/%d' % w
659
660 else:
661 raise AssertionError()
662
663 def PushRedirects(self, redirects, err_out):
664 # type: (List[RedirValue], List[error.IOError_OSError]) -> None
665 if len(redirects) == 0: # Optimized to avoid allocs
666 return
667 self.fd_state.Push(redirects, err_out)
668
669 def PopRedirects(self, num_redirects, err_out):
670 # type: (int, List[error.IOError_OSError]) -> None
671 if num_redirects == 0: # Optimized to avoid allocs
672 return
673 self.fd_state.Pop(err_out)
674
675 def PushProcessSub(self):
676 # type: () -> None
677 if len(self.clean_frame_pool):
678 # Optimized to avoid allocs
679 new_frame = self.clean_frame_pool.pop()
680 else:
681 new_frame = _ProcessSubFrame()
682 self.process_sub_stack.append(new_frame)
683
684 def PopProcessSub(self, compound_st):
685 # type: (StatusArray) -> None
686 """This method is called by a context manager, which means we always
687 wait() on the way out, which I think is the right thing.
688
689 We don't always set _process_sub_status, e.g. if some fatal
690 error occurs first, but we always wait.
691 """
692 frame = self.process_sub_stack.pop()
693 if frame.WasModified():
694 frame.MaybeWaitOnProcessSubs(self.waiter, compound_st)
695 else:
696 # Optimized to avoid allocs
697 self.clean_frame_pool.append(frame)
698
699 # Note: the 3 lists in _ProcessSubFrame are hot in our profiles. It would
700 # be nice to somehow "destroy" them here, rather than letting them become
701 # garbage that needs to be traced.
702
703 # The CommandEvaluator could have a ProcessSubStack, which supports Push(),
704 # Pop(), and Top() of VALUES rather than GC objects?
705
706
707# vim: sw=4