OILS / core / executor.py View on Github | oilshell.org

714 lines, 387 significant
1"""executor.py."""
2from __future__ import print_function
3
4from errno import EINTR
5
6from _devbuild.gen.id_kind_asdl import Id
7from _devbuild.gen.option_asdl import builtin_i
8from _devbuild.gen.runtime_asdl import RedirValue, trace
9from _devbuild.gen.syntax_asdl import (
10 command,
11 command_e,
12 CommandSub,
13 CompoundWord,
14 loc,
15 loc_t,
16)
17from builtin import hay_ysh
18from core import dev
19from core import error
20from core import process
21from core.error import e_die, e_die_status
22from core import pyos
23from core import pyutil
24from core import state
25from display import ui
26from core import vm
27from frontend import consts
28from frontend import lexer
29from mycpp.mylib import log
30
31import posix_ as posix
32
33from typing import cast, Dict, List, Optional, TYPE_CHECKING
34if TYPE_CHECKING:
35 from _devbuild.gen.runtime_asdl import (cmd_value, CommandStatus,
36 StatusArray)
37 from _devbuild.gen.syntax_asdl import command_t
38 from builtin import trap_osh
39 from core import optview
40 from core import state
41 from core.vm import _Builtin
42
43_ = log
44
45
46class _ProcessSubFrame(object):
47 """To keep track of diff <(cat 1) <(cat 2) > >(tac)"""
48
49 def __init__(self):
50 # type: () -> None
51
52 # These objects appear unconditionally in the main loop, and aren't
53 # commonly used, so we manually optimize [] into None.
54
55 self._to_wait = [] # type: List[process.Process]
56 self._to_close = [] # type: List[int] # file descriptors
57 self._locs = [] # type: List[loc_t]
58 self._modified = False
59
60 def WasModified(self):
61 # type: () -> bool
62 return self._modified
63
64 def Append(self, p, fd, status_loc):
65 # type: (process.Process, int, loc_t) -> None
66 self._modified = True
67
68 self._to_wait.append(p)
69 self._to_close.append(fd)
70 self._locs.append(status_loc)
71
72 def MaybeWaitOnProcessSubs(self, waiter, status_array):
73 # type: (process.Waiter, StatusArray) -> None
74
75 # Wait in the same order that they were evaluated. That seems fine.
76 for fd in self._to_close:
77 posix.close(fd)
78
79 codes = [] # type: List[int]
80 locs = [] # type: List[loc_t]
81 for i, p in enumerate(self._to_wait):
82 #log('waiting for %s', p)
83 st = p.Wait(waiter)
84 codes.append(st)
85 locs.append(self._locs[i])
86
87 status_array.codes = codes
88 status_array.locs = locs
89
90
91# Big flgas for RunSimpleCommand
92DO_FORK = 1 << 1
93NO_CALL_PROCS = 1 << 2 # command ls suppresses function lookup
94USE_DEFAULT_PATH = 1 << 3 # for command -p ls changes the path
95
96# Copied from var.c in dash
97DEFAULT_PATH = [
98 '/usr/local/sbin', '/usr/local/bin', '/usr/sbin', '/usr/bin', '/sbin',
99 '/bin'
100]
101
102
103class ShellExecutor(vm._Executor):
104 """An executor combined with the OSH language evaluators in osh/ to create
105 a shell interpreter."""
106
107 def __init__(
108 self,
109 mem, # type: state.Mem
110 exec_opts, # type: optview.Exec
111 mutable_opts, # type: state.MutableOpts
112 procs, # type: state.Procs
113 hay_state, # type: hay_ysh.HayState
114 builtins, # type: Dict[int, _Builtin]
115 search_path, # type: state.SearchPath
116 ext_prog, # type: process.ExternalProgram
117 waiter, # type: process.Waiter
118 tracer, # type: dev.Tracer
119 job_control, # type: process.JobControl
120 job_list, # type: process.JobList
121 fd_state, # type: process.FdState
122 trap_state, # type: trap_osh.TrapState
123 errfmt # type: ui.ErrorFormatter
124 ):
125 # type: (...) -> None
126 vm._Executor.__init__(self)
127 self.mem = mem
128 self.exec_opts = exec_opts
129 self.mutable_opts = mutable_opts # for IsDisabled(), not mutating
130 self.procs = procs
131 self.hay_state = hay_state
132 self.builtins = builtins
133 self.search_path = search_path
134 self.ext_prog = ext_prog
135 self.waiter = waiter
136 self.tracer = tracer
137 self.multi_trace = tracer.multi_trace
138 self.job_control = job_control
139 # sleep 5 & puts a (PID, job#) entry here. And then "jobs" displays it.
140 self.job_list = job_list
141 self.fd_state = fd_state
142 self.trap_state = trap_state
143 self.errfmt = errfmt
144 self.process_sub_stack = [] # type: List[_ProcessSubFrame]
145 self.clean_frame_pool = [] # type: List[_ProcessSubFrame]
146
147 # When starting a pipeline in the foreground, we need to pass a handle to it
148 # through the evaluation of the last node back to ourselves for execution.
149 # We use this handle to make sure any processes forked for the last part of
150 # the pipeline are placed into the same process group as the rest of the
151 # pipeline. Since there is, by design, only ever one foreground pipeline and
152 # any pipelines started within subshells run in their parent's process
153 # group, we only need one pointer here, not some collection.
154 self.fg_pipeline = None # type: Optional[process.Pipeline]
155
156 def CheckCircularDeps(self):
157 # type: () -> None
158 assert self.cmd_ev is not None
159
160 def _MakeProcess(self, node, inherit_errexit, inherit_errtrace):
161 # type: (command_t, bool, bool) -> process.Process
162 """Assume we will run the node in another process.
163
164 Return a process.
165 """
166 UP_node = node
167 if node.tag() == command_e.ControlFlow:
168 node = cast(command.ControlFlow, UP_node)
169 # Pipeline or subshells with control flow are invalid, e.g.:
170 # - break | less
171 # - continue | less
172 # - ( return )
173 # NOTE: This could be done at parse time too.
174 if node.keyword.id != Id.ControlFlow_Exit:
175 e_die(
176 'Invalid control flow %r in pipeline / subshell / background'
177 % lexer.TokenVal(node.keyword), node.keyword)
178
179 # NOTE: If ErrExit(), we could be verbose about subprogram errors? This
180 # only really matters when executing 'exit 42', because the child shell
181 # inherits errexit and will be verbose. Other notes:
182 #
183 # - We might want errors to fit on a single line so they don't get #
184 # interleaved.
185 # - We could turn the `exit` builtin into a error.FatalRuntime exception
186 # and get this check for "free".
187 thunk = process.SubProgramThunk(self.cmd_ev,
188 node,
189 self.trap_state,
190 self.multi_trace,
191 inherit_errexit,
192 inherit_errtrace)
193 p = process.Process(thunk, self.job_control, self.job_list,
194 self.tracer)
195 return p
196
197 def RunBuiltin(self, builtin_id, cmd_val):
198 # type: (int, cmd_value.Argv) -> int
199 """Run a builtin.
200
201 Also called by the 'builtin' builtin.
202 """
203 self.tracer.OnBuiltin(builtin_id, cmd_val.argv)
204
205 builtin_func = self.builtins[builtin_id]
206
207 io_errors = [] # type: List[error.IOError_OSError]
208 with vm.ctx_FlushStdout(io_errors):
209 # note: could be second word, like 'builtin read'
210 with ui.ctx_Location(self.errfmt, cmd_val.arg_locs[0]):
211 try:
212 status = builtin_func.Run(cmd_val)
213 assert isinstance(status, int)
214 except (IOError, OSError) as e:
215 self.errfmt.PrintMessage(
216 '%s builtin I/O error: %s' %
217 (cmd_val.argv[0], pyutil.strerror(e)),
218 cmd_val.arg_locs[0])
219 return 1
220 except error.Usage as e:
221 arg0 = cmd_val.argv[0]
222 # e.g. 'type' doesn't accept flag '-x'
223 self.errfmt.PrefixPrint(e.msg, '%r ' % arg0, e.location)
224 return 2 # consistent error code for usage error
225
226 if len(io_errors): # e.g. disk full, ulimit
227 self.errfmt.PrintMessage(
228 '%s builtin I/O error: %s' %
229 (cmd_val.argv[0], pyutil.strerror(io_errors[0])),
230 cmd_val.arg_locs[0])
231 return 1
232
233 return status
234
235 def RunSimpleCommand(self, cmd_val, cmd_st, run_flags):
236 # type: (cmd_value.Argv, CommandStatus, int) -> int
237 """Run builtins, functions, external commands.
238
239 Possible variations:
240 - YSH might have different, simpler rules. No special builtins, etc.
241 - YSH might have OILS_PATH = :| /bin /usr/bin | or something.
242 - Interpreters might want to define all their own builtins.
243 """
244 argv = cmd_val.argv
245 if len(cmd_val.arg_locs):
246 arg0_loc = cmd_val.arg_locs[0] # type: loc_t
247 else:
248 arg0_loc = loc.Missing
249
250 # This happens when you write "$@" but have no arguments.
251 if len(argv) == 0:
252 if self.exec_opts.strict_argv():
253 e_die("Command evaluated to an empty argv array", arg0_loc)
254 else:
255 return 0 # status 0, or skip it?
256
257 arg0 = argv[0]
258
259 builtin_id = consts.LookupAssignBuiltin(arg0)
260 if builtin_id != consts.NO_INDEX:
261 # command readonly is disallowed, for technical reasons. Could relax it
262 # later.
263 self.errfmt.Print_("Can't run assignment builtin recursively",
264 arg0_loc)
265 return 1
266
267 builtin_id = consts.LookupSpecialBuiltin(arg0)
268 if builtin_id != consts.NO_INDEX:
269 cmd_st.show_code = True # this is a "leaf" for errors
270 status = self.RunBuiltin(builtin_id, cmd_val)
271 # TODO: Enable this and fix spec test failures.
272 # Also update _SPECIAL_BUILTINS in osh/builtin.py.
273 #if status != 0:
274 # e_die_status(status, 'special builtin failed')
275 return status
276
277 call_procs = not (run_flags & NO_CALL_PROCS)
278 # Builtins like 'true' can be redefined as functions.
279 if call_procs:
280 # TODO: Look shell functions in self.sh_funcs, but procs are
281 # value.Proc in the var namespace.
282 # Pitfall: What happens if there are two of the same name? I guess
283 # that's why you have = and 'type' inspect them
284
285 proc_node = self.procs.Get(arg0)
286 if proc_node is not None:
287 if self.exec_opts.strict_errexit():
288 disabled_tok = self.mutable_opts.ErrExitDisabledToken()
289 if disabled_tok:
290 self.errfmt.Print_(
291 'errexit was disabled for this construct',
292 disabled_tok)
293 self.errfmt.StderrLine('')
294 e_die(
295 "Can't run a proc while errexit is disabled. "
296 "Use 'try' or wrap it in a process with $0 myproc",
297 arg0_loc)
298
299 with dev.ctx_Tracer(self.tracer, 'proc', argv):
300 with state.ctx_HideErrTrap(self.trap_state, self.exec_opts.errtrace()):
301 # NOTE: Functions could call 'exit 42' directly, etc.
302 status = self.cmd_ev.RunProc(proc_node, cmd_val)
303 return status
304
305 # Notes:
306 # - procs shadow hay names
307 # - hay names shadow normal builtins? Should we limit to CAPS or no?
308 if self.hay_state.Resolve(arg0):
309 return self.RunBuiltin(builtin_i.haynode, cmd_val)
310
311 builtin_id = consts.LookupNormalBuiltin(arg0)
312
313 if self.exec_opts._running_hay():
314 # Hay: limit the builtins that can be run
315 # - declare 'use dialect'
316 # - echo and write for debugging
317 # - no JSON?
318 if builtin_id in (builtin_i.haynode, builtin_i.use, builtin_i.echo,
319 builtin_i.write):
320 cmd_st.show_code = True # this is a "leaf" for errors
321 return self.RunBuiltin(builtin_id, cmd_val)
322
323 self.errfmt.Print_('Unknown command %r while running hay' % arg0,
324 arg0_loc)
325 return 127
326
327 if builtin_id != consts.NO_INDEX:
328 cmd_st.show_code = True # this is a "leaf" for errors
329 return self.RunBuiltin(builtin_id, cmd_val)
330
331 environ = self.mem.GetExported() # Include temporary variables
332
333 if cmd_val.typed_args:
334 e_die(
335 '%r appears to be external. External commands don\'t accept typed args (OILS-ERR-200)'
336 % arg0, cmd_val.typed_args.left)
337
338 # Resolve argv[0] BEFORE forking.
339 if run_flags & USE_DEFAULT_PATH:
340 argv0_path = state.LookupExecutable(arg0, DEFAULT_PATH)
341 else:
342 argv0_path = self.search_path.CachedLookup(arg0)
343 if argv0_path is None:
344 self.errfmt.Print_('%r not found (OILS-ERR-100)' % arg0, arg0_loc)
345 return 127
346
347 # Normal case: ls /
348 if run_flags & DO_FORK:
349 thunk = process.ExternalThunk(self.ext_prog, argv0_path, cmd_val,
350 environ)
351 p = process.Process(thunk, self.job_control, self.job_list,
352 self.tracer)
353
354 if self.job_control.Enabled():
355 if self.fg_pipeline is not None:
356 pgid = self.fg_pipeline.ProcessGroupId()
357 # If job control is enabled, this should be true
358 assert pgid != process.INVALID_PGID
359
360 change = process.SetPgid(pgid, self.tracer)
361 self.fg_pipeline = None # clear to avoid confusion in subshells
362 else:
363 change = process.SetPgid(process.OWN_LEADER, self.tracer)
364 p.AddStateChange(change)
365
366 status = p.RunProcess(self.waiter, trace.External(cmd_val.argv))
367
368 # this is close to a "leaf" for errors
369 # problem: permission denied EACCESS prints duplicate messages
370 # TODO: add message command 'ls' failed
371 cmd_st.show_code = True
372
373 return status
374
375 self.tracer.OnExec(cmd_val.argv)
376
377 # Already forked for pipeline: ls / | wc -l
378 self.ext_prog.Exec(argv0_path, cmd_val, environ) # NEVER RETURNS
379
380 raise AssertionError('for -Wreturn-type in C++')
381
382 def RunBackgroundJob(self, node):
383 # type: (command_t) -> int
384 """For & etc."""
385 # Special case for pipeline. There is some evidence here:
386 # https://www.gnu.org/software/libc/manual/html_node/Launching-Jobs.html#Launching-Jobs
387 #
388 # "You can either make all the processes in the process group be children
389 # of the shell process, or you can make one process in group be the
390 # ancestor of all the other processes in that group. The sample shell
391 # program presented in this chapter uses the first approach because it
392 # makes bookkeeping somewhat simpler."
393 UP_node = node
394
395 if UP_node.tag() == command_e.Pipeline:
396 node = cast(command.Pipeline, UP_node)
397 pi = process.Pipeline(self.exec_opts.sigpipe_status_ok(),
398 self.job_control, self.job_list, self.tracer)
399 for child in node.children:
400 p = self._MakeProcess(child, True, self.exec_opts.errtrace())
401 p.Init_ParentPipeline(pi)
402 pi.Add(p)
403
404 pi.StartPipeline(self.waiter)
405 pi.SetBackground()
406 last_pid = pi.LastPid()
407 self.mem.last_bg_pid = last_pid # for $!
408
409 self.job_list.AddJob(pi) # show in 'jobs' list
410
411 else:
412 # Problem: to get the 'set -b' behavior of immediate notifications, we
413 # have to register SIGCHLD. But then that introduces race conditions.
414 # If we haven't called Register yet, then we won't know who to notify.
415
416 p = self._MakeProcess(node, True, self.exec_opts.errtrace())
417 if self.job_control.Enabled():
418 p.AddStateChange(
419 process.SetPgid(process.OWN_LEADER, self.tracer))
420
421 p.SetBackground()
422 pid = p.StartProcess(trace.Fork)
423 self.mem.last_bg_pid = pid # for $!
424 self.job_list.AddJob(p) # show in 'jobs' list
425 return 0
426
427 def RunPipeline(self, node, status_out):
428 # type: (command.Pipeline, CommandStatus) -> None
429
430 pi = process.Pipeline(self.exec_opts.sigpipe_status_ok(),
431 self.job_control, self.job_list, self.tracer)
432
433 # initialized with CommandStatus.CreateNull()
434 pipe_locs = [] # type: List[loc_t]
435
436 # First n-1 processes (which is empty when n == 1)
437 n = len(node.children)
438 for i in xrange(n - 1):
439 child = node.children[i]
440
441 # TODO: determine these locations at parse time?
442 pipe_locs.append(loc.Command(child))
443
444 p = self._MakeProcess(child, True, self.exec_opts.errtrace())
445 p.Init_ParentPipeline(pi)
446 pi.Add(p)
447
448 last_child = node.children[n - 1]
449 # Last piece of code is in THIS PROCESS. 'echo foo | read line; echo $line'
450 pi.AddLast((self.cmd_ev, last_child))
451 pipe_locs.append(loc.Command(last_child))
452
453 with dev.ctx_Tracer(self.tracer, 'pipeline', None):
454 pi.StartPipeline(self.waiter)
455 self.fg_pipeline = pi
456 status_out.pipe_status = pi.RunLastPart(self.waiter, self.fd_state)
457 self.fg_pipeline = None # clear in case we didn't end up forking
458
459 status_out.pipe_locs = pipe_locs
460
461 def RunSubshell(self, node):
462 # type: (command_t) -> int
463 p = self._MakeProcess(node, True, self.exec_opts.errtrace())
464 if self.job_control.Enabled():
465 p.AddStateChange(process.SetPgid(process.OWN_LEADER, self.tracer))
466
467 return p.RunProcess(self.waiter, trace.ForkWait)
468
469 def RunCommandSub(self, cs_part):
470 # type: (CommandSub) -> str
471
472 if not self.exec_opts._allow_command_sub():
473 # _allow_command_sub is used in two places. Only one of them turns off _allow_process_sub
474 if not self.exec_opts._allow_process_sub():
475 why = "status wouldn't be checked (strict_errexit)"
476 else:
477 why = 'eval_unsafe_arith is off'
478
479 e_die("Command subs not allowed here because %s" % why,
480 loc.WordPart(cs_part))
481
482 node = cs_part.child
483
484 # Hack for weird $(<file) construct
485 if node.tag() == command_e.Redirect:
486 redir_node = cast(command.Redirect, node)
487 # Detect '< file'
488 if (len(redir_node.redirects) == 1 and
489 redir_node.redirects[0].op.id == Id.Redir_Less and
490 redir_node.child.tag() == command_e.NoOp):
491
492 # Change it to __cat < file.
493 # TODO: could be 'internal cat' (issue #1013)
494 tok = lexer.DummyToken(Id.Lit_Chars, '__cat')
495 cat_word = CompoundWord([tok])
496
497 # Blame < because __cat has no location
498 blame_tok = redir_node.redirects[0].op
499 simple = command.Simple(blame_tok, [], [cat_word], None, None,
500 True)
501
502 # MUTATE redir node so it's like $(<file _cat)
503 redir_node.child = simple
504
505 p = self._MakeProcess(node, self.exec_opts.inherit_errexit(), self.exec_opts.errtrace())
506 # Shell quirk: Command subs remain part of the shell's process group, so we
507 # don't use p.AddStateChange(process.SetPgid(...))
508
509 r, w = posix.pipe()
510 p.AddStateChange(process.StdoutToPipe(r, w))
511
512 p.StartProcess(trace.CommandSub)
513 #log('Command sub started %d', pid)
514
515 chunks = [] # type: List[str]
516 posix.close(w) # not going to write
517 while True:
518 n, err_num = pyos.Read(r, 4096, chunks)
519
520 if n < 0:
521 if err_num == EINTR:
522 pass # retry
523 else:
524 # Like the top level IOError handler
525 e_die_status(
526 2,
527 'osh I/O error (read): %s' % posix.strerror(err_num))
528
529 elif n == 0: # EOF
530 break
531 posix.close(r)
532
533 status = p.Wait(self.waiter)
534
535 # OSH has the concept of aborting in the middle of a WORD. We're not
536 # waiting until the command is over!
537 if self.exec_opts.command_sub_errexit():
538 if status != 0:
539 msg = 'Command Sub exited with status %d' % status
540 raise error.ErrExit(status, msg, loc.WordPart(cs_part))
541
542 else:
543 # Set a flag so we check errexit at the same time as bash. Example:
544 #
545 # a=$(false)
546 # echo foo # no matter what comes here, the flag is reset
547 #
548 # Set ONLY until this command node has finished executing.
549
550 # HACK: move this
551 self.cmd_ev.check_command_sub_status = True
552 self.mem.SetLastStatus(status)
553
554 # Runtime errors test case: # $("echo foo > $@")
555 # Why rstrip()?
556 # https://unix.stackexchange.com/questions/17747/why-does-shell-command-substitution-gobble-up-a-trailing-newline-char
557 return ''.join(chunks).rstrip('\n')
558
559 def RunProcessSub(self, cs_part):
560 # type: (CommandSub) -> str
561 """Process sub creates a forks a process connected to a pipe.
562
563 The pipe is typically passed to another process via a /dev/fd/$FD path.
564
565 Life cycle of a process substitution:
566
567 1. Start with this code
568
569 diff <(seq 3) <(seq 4)
570
571 2. To evaluate the command line, we evaluate every word. The
572 NormalWordEvaluator this method, RunProcessSub(), which does 3 things:
573
574 a. Create a pipe(), getting r and w
575 b. Starts the seq process, which inherits r and w
576 It has a StdoutToPipe() redirect, which means that it dup2(w, 1)
577 and close(r)
578 c. Close the w FD, because neither the shell or 'diff' will write to it.
579 However we must retain 'r', because 'diff' hasn't opened /dev/fd yet!
580 d. We evaluate <(seq 3) to /dev/fd/$r, so "diff" can read from it
581
582 3. Now we're done evaluating every word, so we know the command line of
583 diff, which looks like
584
585 diff /dev/fd/64 /dev/fd/65
586
587 Those are the FDs for the read ends of the pipes we created.
588
589 4. diff inherits a copy of the read end of bot pipes. But it actually
590 calls open() both files passed as argv. (I think this is fine.)
591
592 5. wait() for the diff process.
593
594 6. The shell closes both the read ends of both pipes. Neither us or
595 'diffd' will read again.
596
597 7. The shell waits for both 'seq' processes.
598
599 Related:
600 shopt -s process_sub_fail
601 _process_sub_status
602 """
603 cs_loc = loc.WordPart(cs_part)
604
605 if not self.exec_opts._allow_process_sub():
606 e_die(
607 "Process subs not allowed here because status wouldn't be checked (strict_errexit)",
608 cs_loc)
609
610 p = self._MakeProcess(cs_part.child, True, self.exec_opts.errtrace())
611
612 r, w = posix.pipe()
613 #log('pipe = %d, %d', r, w)
614
615 op_id = cs_part.left_token.id
616 if op_id == Id.Left_ProcSubIn:
617 # Example: cat < <(head foo.txt)
618 #
619 # The head process should write its stdout to a pipe.
620 redir = process.StdoutToPipe(r,
621 w) # type: process.ChildStateChange
622
623 elif op_id == Id.Left_ProcSubOut:
624 # Example: head foo.txt > >(tac)
625 #
626 # The tac process should read its stdin from a pipe.
627
628 # Note: this example sometimes requires you to hit "enter" in bash and
629 # zsh. WHy?
630 redir = process.StdinFromPipe(r, w)
631
632 else:
633 raise AssertionError()
634
635 p.AddStateChange(redir)
636
637 if self.job_control.Enabled():
638 p.AddStateChange(process.SetPgid(process.OWN_LEADER, self.tracer))
639
640 # Fork, letting the child inherit the pipe file descriptors.
641 p.StartProcess(trace.ProcessSub)
642
643 ps_frame = self.process_sub_stack[-1]
644
645 # Note: bash never waits() on the process, but zsh does. The calling
646 # program needs to read() before we can wait, e.g.
647 # diff <(sort left.txt) <(sort right.txt)
648
649 # After forking, close the end of the pipe we're not using.
650 if op_id == Id.Left_ProcSubIn:
651 posix.close(w) # cat < <(head foo.txt)
652 ps_frame.Append(p, r, cs_loc) # close later
653 elif op_id == Id.Left_ProcSubOut:
654 posix.close(r)
655 #log('Left_ProcSubOut closed %d', r)
656 ps_frame.Append(p, w, cs_loc) # close later
657 else:
658 raise AssertionError()
659
660 # Is /dev Linux-specific?
661 if op_id == Id.Left_ProcSubIn:
662 return '/dev/fd/%d' % r
663
664 elif op_id == Id.Left_ProcSubOut:
665 return '/dev/fd/%d' % w
666
667 else:
668 raise AssertionError()
669
670 def PushRedirects(self, redirects, err_out):
671 # type: (List[RedirValue], List[error.IOError_OSError]) -> None
672 if len(redirects) == 0: # Optimized to avoid allocs
673 return
674 self.fd_state.Push(redirects, err_out)
675
676 def PopRedirects(self, num_redirects, err_out):
677 # type: (int, List[error.IOError_OSError]) -> None
678 if num_redirects == 0: # Optimized to avoid allocs
679 return
680 self.fd_state.Pop(err_out)
681
682 def PushProcessSub(self):
683 # type: () -> None
684 if len(self.clean_frame_pool):
685 # Optimized to avoid allocs
686 new_frame = self.clean_frame_pool.pop()
687 else:
688 new_frame = _ProcessSubFrame()
689 self.process_sub_stack.append(new_frame)
690
691 def PopProcessSub(self, compound_st):
692 # type: (StatusArray) -> None
693 """This method is called by a context manager, which means we always
694 wait() on the way out, which I think is the right thing.
695
696 We don't always set _process_sub_status, e.g. if some fatal
697 error occurs first, but we always wait.
698 """
699 frame = self.process_sub_stack.pop()
700 if frame.WasModified():
701 frame.MaybeWaitOnProcessSubs(self.waiter, compound_st)
702 else:
703 # Optimized to avoid allocs
704 self.clean_frame_pool.append(frame)
705
706 # Note: the 3 lists in _ProcessSubFrame are hot in our profiles. It would
707 # be nice to somehow "destroy" them here, rather than letting them become
708 # garbage that needs to be traced.
709
710 # The CommandEvaluator could have a ProcessSubStack, which supports Push(),
711 # Pop(), and Top() of VALUES rather than GC objects?
712
713
714# vim: sw=4