| 1 | #!/usr/bin/env python2
|
| 2 | """
|
| 3 | builtin_process.py - Builtins that deal with processes or modify process state.
|
| 4 |
|
| 5 | This is sort of the opposite of builtin_pure.py.
|
| 6 | """
|
| 7 | from __future__ import print_function
|
| 8 |
|
| 9 | import resource
|
| 10 | from resource import (RLIM_INFINITY, RLIMIT_CORE, RLIMIT_CPU, RLIMIT_DATA,
|
| 11 | RLIMIT_FSIZE, RLIMIT_NOFILE, RLIMIT_STACK, RLIMIT_AS)
|
| 12 | from signal import SIGCONT
|
| 13 |
|
| 14 | from _devbuild.gen import arg_types
|
| 15 | from _devbuild.gen.syntax_asdl import loc
|
| 16 | from _devbuild.gen.runtime_asdl import (cmd_value, job_state_e, wait_status,
|
| 17 | wait_status_e)
|
| 18 | from core import dev
|
| 19 | from core import error
|
| 20 | from core.error import e_usage, e_die_status
|
| 21 | from core import process # W1_OK, W1_ECHILD
|
| 22 | from core import pyos
|
| 23 | from core import pyutil
|
| 24 | from core import vm
|
| 25 | from frontend import flag_util
|
| 26 | from frontend import typed_args
|
| 27 | from mycpp import mops
|
| 28 | from mycpp import mylib
|
| 29 | from mycpp.mylib import log, tagswitch, print_stderr
|
| 30 |
|
| 31 | import posix_ as posix
|
| 32 |
|
| 33 | from typing import TYPE_CHECKING, List, Tuple, Optional, cast
|
| 34 | if TYPE_CHECKING:
|
| 35 | from core.process import Waiter, ExternalProgram, FdState
|
| 36 | from core.state import Mem, SearchPath
|
| 37 | from display import ui
|
| 38 |
|
| 39 | _ = log
|
| 40 |
|
| 41 |
|
| 42 | class Jobs(vm._Builtin):
|
| 43 | """List jobs."""
|
| 44 |
|
| 45 | def __init__(self, job_list):
|
| 46 | # type: (process.JobList) -> None
|
| 47 | self.job_list = job_list
|
| 48 |
|
| 49 | def Run(self, cmd_val):
|
| 50 | # type: (cmd_value.Argv) -> int
|
| 51 |
|
| 52 | attrs, arg_r = flag_util.ParseCmdVal('jobs', cmd_val)
|
| 53 | arg = arg_types.jobs(attrs.attrs)
|
| 54 |
|
| 55 | if arg.l:
|
| 56 | style = process.STYLE_LONG
|
| 57 | elif arg.p:
|
| 58 | style = process.STYLE_PID_ONLY
|
| 59 | else:
|
| 60 | style = process.STYLE_DEFAULT
|
| 61 |
|
| 62 | self.job_list.DisplayJobs(style)
|
| 63 |
|
| 64 | if arg.debug:
|
| 65 | self.job_list.DebugPrint()
|
| 66 |
|
| 67 | return 0
|
| 68 |
|
| 69 |
|
| 70 | class Fg(vm._Builtin):
|
| 71 | """Put a job in the foreground."""
|
| 72 |
|
| 73 | def __init__(self, job_control, job_list, waiter):
|
| 74 | # type: (process.JobControl, process.JobList, Waiter) -> None
|
| 75 | self.job_control = job_control
|
| 76 | self.job_list = job_list
|
| 77 | self.waiter = waiter
|
| 78 |
|
| 79 | def Run(self, cmd_val):
|
| 80 | # type: (cmd_value.Argv) -> int
|
| 81 |
|
| 82 | job_spec = '' # get current job by default
|
| 83 | if len(cmd_val.argv) > 1:
|
| 84 | job_spec = cmd_val.argv[1]
|
| 85 |
|
| 86 | job = self.job_list.GetJobWithSpec(job_spec)
|
| 87 | if job is None:
|
| 88 | print_stderr('fg: No job to put in the foreground')
|
| 89 | return 1
|
| 90 |
|
| 91 | pgid = job.ProcessGroupId()
|
| 92 | assert pgid != process.INVALID_PGID, \
|
| 93 | 'Processes put in the background should have a PGID'
|
| 94 |
|
| 95 | # TODO: Print job ID rather than the PID
|
| 96 | print_stderr('fg: PID %d Continued' % pgid)
|
| 97 | # Put the job's process group back into the foreground. GiveTerminal() must
|
| 98 | # be called before sending SIGCONT or else the process might immediately get
|
| 99 | # suspsended again if it tries to read/write on the terminal.
|
| 100 | self.job_control.MaybeGiveTerminal(pgid)
|
| 101 | job.SetForeground()
|
| 102 | # needed for Wait() loop to work
|
| 103 | job.state = job_state_e.Running
|
| 104 | posix.killpg(pgid, SIGCONT)
|
| 105 |
|
| 106 | status = -1
|
| 107 | wait_st = job.JobWait(self.waiter)
|
| 108 | UP_wait_st = wait_st
|
| 109 | with tagswitch(wait_st) as case:
|
| 110 | if case(wait_status_e.Proc):
|
| 111 | wait_st = cast(wait_status.Proc, UP_wait_st)
|
| 112 | status = wait_st.code
|
| 113 |
|
| 114 | elif case(wait_status_e.Pipeline):
|
| 115 | wait_st = cast(wait_status.Pipeline, UP_wait_st)
|
| 116 | # TODO: handle PIPESTATUS? Is this right?
|
| 117 | status = wait_st.codes[-1]
|
| 118 |
|
| 119 | elif case(wait_status_e.Cancelled):
|
| 120 | wait_st = cast(wait_status.Cancelled, UP_wait_st)
|
| 121 | status = 128 + wait_st.sig_num
|
| 122 |
|
| 123 | else:
|
| 124 | raise AssertionError()
|
| 125 |
|
| 126 | return status
|
| 127 |
|
| 128 |
|
| 129 | class Bg(vm._Builtin):
|
| 130 | """Put a job in the background."""
|
| 131 |
|
| 132 | def __init__(self, job_list):
|
| 133 | # type: (process.JobList) -> None
|
| 134 | self.job_list = job_list
|
| 135 |
|
| 136 | def Run(self, cmd_val):
|
| 137 | # type: (cmd_value.Argv) -> int
|
| 138 |
|
| 139 | # How does this differ from 'fg'? It doesn't wait and it sets controlling
|
| 140 | # terminal?
|
| 141 |
|
| 142 | raise error.Usage("isn't implemented", loc.Missing)
|
| 143 |
|
| 144 |
|
| 145 | class Fork(vm._Builtin):
|
| 146 |
|
| 147 | def __init__(self, shell_ex):
|
| 148 | # type: (vm._Executor) -> None
|
| 149 | self.shell_ex = shell_ex
|
| 150 |
|
| 151 | def Run(self, cmd_val):
|
| 152 | # type: (cmd_value.Argv) -> int
|
| 153 | _, arg_r = flag_util.ParseCmdVal('fork',
|
| 154 | cmd_val,
|
| 155 | accept_typed_args=True)
|
| 156 |
|
| 157 | arg, location = arg_r.Peek2()
|
| 158 | if arg is not None:
|
| 159 | e_usage('got unexpected argument %r' % arg, location)
|
| 160 |
|
| 161 | cmd = typed_args.OptionalBlock(cmd_val)
|
| 162 | if cmd is None:
|
| 163 | e_usage('expected a block', loc.Missing)
|
| 164 |
|
| 165 | return self.shell_ex.RunBackgroundJob(cmd)
|
| 166 |
|
| 167 |
|
| 168 | class ForkWait(vm._Builtin):
|
| 169 |
|
| 170 | def __init__(self, shell_ex):
|
| 171 | # type: (vm._Executor) -> None
|
| 172 | self.shell_ex = shell_ex
|
| 173 |
|
| 174 | def Run(self, cmd_val):
|
| 175 | # type: (cmd_value.Argv) -> int
|
| 176 | _, arg_r = flag_util.ParseCmdVal('forkwait',
|
| 177 | cmd_val,
|
| 178 | accept_typed_args=True)
|
| 179 | arg, location = arg_r.Peek2()
|
| 180 | if arg is not None:
|
| 181 | e_usage('got unexpected argument %r' % arg, location)
|
| 182 |
|
| 183 | cmd = typed_args.OptionalBlock(cmd_val)
|
| 184 | if cmd is None:
|
| 185 | e_usage('expected a block', loc.Missing)
|
| 186 |
|
| 187 | return self.shell_ex.RunSubshell(cmd)
|
| 188 |
|
| 189 |
|
| 190 | class Exec(vm._Builtin):
|
| 191 |
|
| 192 | def __init__(self, mem, ext_prog, fd_state, search_path, errfmt):
|
| 193 | # type: (Mem, ExternalProgram, FdState, SearchPath, ui.ErrorFormatter) -> None
|
| 194 | self.mem = mem
|
| 195 | self.ext_prog = ext_prog
|
| 196 | self.fd_state = fd_state
|
| 197 | self.search_path = search_path
|
| 198 | self.errfmt = errfmt
|
| 199 |
|
| 200 | def Run(self, cmd_val):
|
| 201 | # type: (cmd_value.Argv) -> int
|
| 202 | _, arg_r = flag_util.ParseCmdVal('exec', cmd_val)
|
| 203 |
|
| 204 | # Apply redirects in this shell. # NOTE: Redirects were processed earlier.
|
| 205 | if arg_r.AtEnd():
|
| 206 | self.fd_state.MakePermanent()
|
| 207 | return 0
|
| 208 |
|
| 209 | environ = self.mem.GetExported()
|
| 210 | i = arg_r.i
|
| 211 | cmd = cmd_val.argv[i]
|
| 212 | argv0_path = self.search_path.CachedLookup(cmd)
|
| 213 | if argv0_path is None:
|
| 214 | e_die_status(127, 'exec: %r not found' % cmd, cmd_val.arg_locs[1])
|
| 215 |
|
| 216 | # shift off 'exec', and remove typed args because they don't apply
|
| 217 | c2 = cmd_value.Argv(cmd_val.argv[i:], cmd_val.arg_locs[i:],
|
| 218 | cmd_val.is_last_cmd, None)
|
| 219 |
|
| 220 | self.ext_prog.Exec(argv0_path, c2, environ) # NEVER RETURNS
|
| 221 | # makes mypy and C++ compiler happy
|
| 222 | raise AssertionError('unreachable')
|
| 223 |
|
| 224 |
|
| 225 | class Wait(vm._Builtin):
|
| 226 | """
|
| 227 | wait: wait [-n] [id ...]
|
| 228 | Wait for job completion and return exit status.
|
| 229 |
|
| 230 | Waits for each process identified by an ID, which may be a process ID or a
|
| 231 | job specification, and reports its termination status. If ID is not
|
| 232 | given, waits for all currently active child processes, and the return
|
| 233 | status is zero. If ID is a a job specification, waits for all processes
|
| 234 | in that job's pipeline.
|
| 235 |
|
| 236 | If the -n option is supplied, waits for the next job to terminate and
|
| 237 | returns its exit status.
|
| 238 |
|
| 239 | Exit Status:
|
| 240 | Returns the status of the last ID; fails if ID is invalid or an invalid
|
| 241 | option is given.
|
| 242 | """
|
| 243 |
|
| 244 | def __init__(self, waiter, job_list, mem, tracer, errfmt):
|
| 245 | # type: (Waiter, process.JobList, Mem, dev.Tracer, ui.ErrorFormatter) -> None
|
| 246 | self.waiter = waiter
|
| 247 | self.job_list = job_list
|
| 248 | self.mem = mem
|
| 249 | self.tracer = tracer
|
| 250 | self.errfmt = errfmt
|
| 251 |
|
| 252 | def Run(self, cmd_val):
|
| 253 | # type: (cmd_value.Argv) -> int
|
| 254 | with dev.ctx_Tracer(self.tracer, 'wait', cmd_val.argv):
|
| 255 | return self._Run(cmd_val)
|
| 256 |
|
| 257 | def _Run(self, cmd_val):
|
| 258 | # type: (cmd_value.Argv) -> int
|
| 259 | attrs, arg_r = flag_util.ParseCmdVal('wait', cmd_val)
|
| 260 | arg = arg_types.wait(attrs.attrs)
|
| 261 |
|
| 262 | job_ids, arg_locs = arg_r.Rest2()
|
| 263 |
|
| 264 | if arg.n:
|
| 265 | # Loop until there is one fewer process running, there's nothing to wait
|
| 266 | # for, or there's a signal
|
| 267 | n = self.job_list.NumRunning()
|
| 268 | if n == 0:
|
| 269 | status = 127
|
| 270 | else:
|
| 271 | target = n - 1
|
| 272 | status = 0
|
| 273 | while self.job_list.NumRunning() > target:
|
| 274 | result = self.waiter.WaitForOne()
|
| 275 | if result == process.W1_OK:
|
| 276 | status = self.waiter.last_status
|
| 277 | elif result == process.W1_ECHILD:
|
| 278 | # nothing to wait for, or interrupted
|
| 279 | status = 127
|
| 280 | break
|
| 281 | elif result >= 0: # signal
|
| 282 | status = 128 + result
|
| 283 | break
|
| 284 |
|
| 285 | return status
|
| 286 |
|
| 287 | if len(job_ids) == 0:
|
| 288 | #log('*** wait')
|
| 289 |
|
| 290 | # BUG: If there is a STOPPED process, this will hang forever, because we
|
| 291 | # don't get ECHILD. Not sure it matters since you can now Ctrl-C it.
|
| 292 | # But how to fix this?
|
| 293 |
|
| 294 | status = 0
|
| 295 | while self.job_list.NumRunning() != 0:
|
| 296 | result = self.waiter.WaitForOne()
|
| 297 | if result == process.W1_ECHILD:
|
| 298 | # nothing to wait for, or interrupted. status is 0
|
| 299 | break
|
| 300 | elif result >= 0: # signal
|
| 301 | status = 128 + result
|
| 302 | break
|
| 303 |
|
| 304 | return status
|
| 305 |
|
| 306 | # Get list of jobs. Then we need to check if they are ALL stopped.
|
| 307 | # Returns the exit code of the last one on the COMMAND LINE, not the exit
|
| 308 | # code of last one to FINISH.
|
| 309 | jobs = [] # type: List[process.Job]
|
| 310 | for i, job_id in enumerate(job_ids):
|
| 311 | location = arg_locs[i]
|
| 312 |
|
| 313 | job = None # type: Optional[process.Job]
|
| 314 | if job_id == '' or job_id.startswith('%'):
|
| 315 | job = self.job_list.GetJobWithSpec(job_id)
|
| 316 |
|
| 317 | if job is None:
|
| 318 | # Does it look like a PID?
|
| 319 | try:
|
| 320 | pid = int(job_id)
|
| 321 | except ValueError:
|
| 322 | raise error.Usage(
|
| 323 | 'expected PID or jobspec, got %r' % job_id, location)
|
| 324 |
|
| 325 | job = self.job_list.ProcessFromPid(pid)
|
| 326 |
|
| 327 | if job is None:
|
| 328 | self.errfmt.Print_("%s isn't a child of this shell" % job_id,
|
| 329 | blame_loc=location)
|
| 330 | return 127
|
| 331 |
|
| 332 | jobs.append(job)
|
| 333 |
|
| 334 | status = 1 # error
|
| 335 | for job in jobs:
|
| 336 | wait_st = job.JobWait(self.waiter)
|
| 337 | UP_wait_st = wait_st
|
| 338 | with tagswitch(wait_st) as case:
|
| 339 | if case(wait_status_e.Proc):
|
| 340 | wait_st = cast(wait_status.Proc, UP_wait_st)
|
| 341 | status = wait_st.code
|
| 342 |
|
| 343 | elif case(wait_status_e.Pipeline):
|
| 344 | wait_st = cast(wait_status.Pipeline, UP_wait_st)
|
| 345 | # TODO: handle PIPESTATUS? Is this right?
|
| 346 | status = wait_st.codes[-1]
|
| 347 |
|
| 348 | elif case(wait_status_e.Cancelled):
|
| 349 | wait_st = cast(wait_status.Cancelled, UP_wait_st)
|
| 350 | status = 128 + wait_st.sig_num
|
| 351 |
|
| 352 | else:
|
| 353 | raise AssertionError()
|
| 354 |
|
| 355 | return status
|
| 356 |
|
| 357 |
|
| 358 | class Umask(vm._Builtin):
|
| 359 |
|
| 360 | def __init__(self):
|
| 361 | # type: () -> None
|
| 362 | """Dummy constructor for mycpp."""
|
| 363 | pass
|
| 364 |
|
| 365 | def Run(self, cmd_val):
|
| 366 | # type: (cmd_value.Argv) -> int
|
| 367 |
|
| 368 | argv = cmd_val.argv[1:]
|
| 369 | if len(argv) == 0:
|
| 370 | # umask() has a dumb API: you can't get it without modifying it first!
|
| 371 | # NOTE: dash disables interrupts around the two umask() calls, but that
|
| 372 | # shouldn't be a concern for us. Signal handlers won't call umask().
|
| 373 | mask = posix.umask(0)
|
| 374 | posix.umask(mask) #
|
| 375 | print('0%03o' % mask) # octal format
|
| 376 | return 0
|
| 377 |
|
| 378 | if len(argv) == 1:
|
| 379 | a = argv[0]
|
| 380 | try:
|
| 381 | new_mask = int(a, 8)
|
| 382 | except ValueError:
|
| 383 | # NOTE: This also happens when we have '8' or '9' in the input.
|
| 384 | print_stderr(
|
| 385 | "oils warning: umask with symbolic input isn't implemented"
|
| 386 | )
|
| 387 | return 1
|
| 388 |
|
| 389 | posix.umask(new_mask)
|
| 390 | return 0
|
| 391 |
|
| 392 | e_usage('umask: unexpected arguments', loc.Missing)
|
| 393 |
|
| 394 |
|
| 395 | def _LimitString(lim, factor):
|
| 396 | # type: (mops.BigInt, int) -> str
|
| 397 | if mops.Equal(lim, mops.FromC(RLIM_INFINITY)):
|
| 398 | return 'unlimited'
|
| 399 | else:
|
| 400 | i = mops.Div(lim, mops.IntWiden(factor))
|
| 401 | return mops.ToStr(i)
|
| 402 |
|
| 403 |
|
| 404 | class Ulimit(vm._Builtin):
|
| 405 |
|
| 406 | def __init__(self):
|
| 407 | # type: () -> None
|
| 408 | """Dummy constructor for mycpp."""
|
| 409 |
|
| 410 | self._table = None # type: List[Tuple[str, int, int, str]]
|
| 411 |
|
| 412 | def _Table(self):
|
| 413 | # type: () -> List[Tuple[str, int, int, str]]
|
| 414 |
|
| 415 | # POSIX 2018
|
| 416 | #
|
| 417 | # https://pubs.opengroup.org/onlinepubs/9699919799/functions/getrlimit.html
|
| 418 | if self._table is None:
|
| 419 | # This table matches _ULIMIT_RESOURCES in frontend/flag_def.py
|
| 420 |
|
| 421 | # flag, RLIMIT_X, factor, description
|
| 422 | self._table = [
|
| 423 | # Following POSIX and most shells except bash, -f is in
|
| 424 | # blocks of 512 bytes
|
| 425 | ('-c', RLIMIT_CORE, 512, 'core dump size'),
|
| 426 | ('-d', RLIMIT_DATA, 1024, 'data segment size'),
|
| 427 | ('-f', RLIMIT_FSIZE, 512, 'file size'),
|
| 428 | ('-n', RLIMIT_NOFILE, 1, 'file descriptors'),
|
| 429 | ('-s', RLIMIT_STACK, 1024, 'stack size'),
|
| 430 | ('-t', RLIMIT_CPU, 1, 'CPU seconds'),
|
| 431 | ('-v', RLIMIT_AS, 1024, 'address space size'),
|
| 432 | ]
|
| 433 |
|
| 434 | return self._table
|
| 435 |
|
| 436 | def _FindFactor(self, what):
|
| 437 | # type: (int) -> int
|
| 438 | for _, w, factor, _ in self._Table():
|
| 439 | if w == what:
|
| 440 | return factor
|
| 441 | raise AssertionError()
|
| 442 |
|
| 443 | def Run(self, cmd_val):
|
| 444 | # type: (cmd_value.Argv) -> int
|
| 445 |
|
| 446 | attrs, arg_r = flag_util.ParseCmdVal('ulimit', cmd_val)
|
| 447 | arg = arg_types.ulimit(attrs.attrs)
|
| 448 |
|
| 449 | what = 0
|
| 450 | num_what_flags = 0
|
| 451 |
|
| 452 | if arg.c:
|
| 453 | what = RLIMIT_CORE
|
| 454 | num_what_flags += 1
|
| 455 |
|
| 456 | if arg.d:
|
| 457 | what = RLIMIT_DATA
|
| 458 | num_what_flags += 1
|
| 459 |
|
| 460 | if arg.f:
|
| 461 | what = RLIMIT_FSIZE
|
| 462 | num_what_flags += 1
|
| 463 |
|
| 464 | if arg.n:
|
| 465 | what = RLIMIT_NOFILE
|
| 466 | num_what_flags += 1
|
| 467 |
|
| 468 | if arg.s:
|
| 469 | what = RLIMIT_STACK
|
| 470 | num_what_flags += 1
|
| 471 |
|
| 472 | if arg.t:
|
| 473 | what = RLIMIT_CPU
|
| 474 | num_what_flags += 1
|
| 475 |
|
| 476 | if arg.v:
|
| 477 | what = RLIMIT_AS
|
| 478 | num_what_flags += 1
|
| 479 |
|
| 480 | if num_what_flags > 1:
|
| 481 | raise error.Usage(
|
| 482 | 'can only handle one resource at a time; got too many flags',
|
| 483 | cmd_val.arg_locs[0])
|
| 484 |
|
| 485 | # Print all
|
| 486 | show_all = arg.a or arg.all
|
| 487 | if show_all:
|
| 488 | if num_what_flags > 0:
|
| 489 | raise error.Usage("doesn't accept resource flags with -a",
|
| 490 | cmd_val.arg_locs[0])
|
| 491 |
|
| 492 | extra, extra_loc = arg_r.Peek2()
|
| 493 | if extra is not None:
|
| 494 | raise error.Usage('got extra arg with -a', extra_loc)
|
| 495 |
|
| 496 | # Worst case 20 == len(str(2**64))
|
| 497 | fmt = '%5s %15s %15s %7s %s'
|
| 498 | print(fmt % ('FLAG', 'SOFT', 'HARD', 'FACTOR', 'DESC'))
|
| 499 | for flag, what, factor, desc in self._Table():
|
| 500 | soft, hard = pyos.GetRLimit(what)
|
| 501 |
|
| 502 | soft2 = _LimitString(soft, factor)
|
| 503 | hard2 = _LimitString(hard, factor)
|
| 504 | print(fmt % (flag, soft2, hard2, str(factor), desc))
|
| 505 |
|
| 506 | return 0
|
| 507 |
|
| 508 | if num_what_flags == 0:
|
| 509 | what = RLIMIT_FSIZE # -f is the default
|
| 510 |
|
| 511 | s, s_loc = arg_r.Peek2()
|
| 512 |
|
| 513 | if s is None:
|
| 514 | factor = self._FindFactor(what)
|
| 515 | soft, hard = pyos.GetRLimit(what)
|
| 516 | if arg.H:
|
| 517 | print(_LimitString(hard, factor))
|
| 518 | else:
|
| 519 | print(_LimitString(soft, factor))
|
| 520 | return 0
|
| 521 |
|
| 522 | # Set the given resource
|
| 523 | if s == 'unlimited':
|
| 524 | # In C, RLIM_INFINITY is rlim_t
|
| 525 | limit = mops.FromC(RLIM_INFINITY)
|
| 526 | else:
|
| 527 | try:
|
| 528 | big_int = mops.FromStr(s)
|
| 529 | except ValueError as e:
|
| 530 | raise error.Usage(
|
| 531 | "expected a number or 'unlimited', got %r" % s, s_loc)
|
| 532 |
|
| 533 | if mops.Greater(mops.IntWiden(0), big_int):
|
| 534 | raise error.Usage(
|
| 535 | "doesn't accept negative numbers, got %r" % s, s_loc)
|
| 536 |
|
| 537 | factor = self._FindFactor(what)
|
| 538 |
|
| 539 | fac = mops.IntWiden(factor)
|
| 540 | limit = mops.Mul(big_int, fac)
|
| 541 |
|
| 542 | # Overflow check like bash does
|
| 543 | # TODO: This should be replaced with a different overflow check
|
| 544 | # when we have arbitrary precision integers
|
| 545 | if not mops.Equal(mops.Div(limit, fac), big_int):
|
| 546 | #log('div %s', mops.ToStr(mops.Div(limit, fac)))
|
| 547 | raise error.Usage(
|
| 548 | 'detected integer overflow: %s' % mops.ToStr(big_int),
|
| 549 | s_loc)
|
| 550 |
|
| 551 | arg_r.Next()
|
| 552 | extra2, extra_loc2 = arg_r.Peek2()
|
| 553 | if extra2 is not None:
|
| 554 | raise error.Usage('got extra arg', extra_loc2)
|
| 555 |
|
| 556 | # Now set the resource
|
| 557 | soft, hard = pyos.GetRLimit(what)
|
| 558 |
|
| 559 | # For error message
|
| 560 | old_soft = soft
|
| 561 | old_hard = hard
|
| 562 |
|
| 563 | # Bash behavior: manipulate both, unless a flag is parsed. This
|
| 564 | # differs from zsh!
|
| 565 | if not arg.S and not arg.H:
|
| 566 | soft = limit
|
| 567 | hard = limit
|
| 568 | if arg.S:
|
| 569 | soft = limit
|
| 570 | if arg.H:
|
| 571 | hard = limit
|
| 572 |
|
| 573 | if mylib.PYTHON:
|
| 574 | try:
|
| 575 | pyos.SetRLimit(what, soft, hard)
|
| 576 | except OverflowError: # only happens in CPython
|
| 577 | raise error.Usage('detected overflow', s_loc)
|
| 578 | except (ValueError, resource.error) as e:
|
| 579 | # Annoying: Python binding changes IOError -> ValueError
|
| 580 |
|
| 581 | print_stderr('oils: ulimit error: %s' % e)
|
| 582 |
|
| 583 | # Extra info we could expose in C++ too
|
| 584 | print_stderr('soft=%s hard=%s -> soft=%s hard=%s' % (
|
| 585 | _LimitString(old_soft, factor),
|
| 586 | _LimitString(old_hard, factor),
|
| 587 | _LimitString(soft, factor),
|
| 588 | _LimitString(hard, factor),
|
| 589 | ))
|
| 590 | return 1
|
| 591 | else:
|
| 592 | try:
|
| 593 | pyos.SetRLimit(what, soft, hard)
|
| 594 | except (IOError, OSError) as e:
|
| 595 | print_stderr('oils: ulimit error: %s' % pyutil.strerror(e))
|
| 596 | return 1
|
| 597 |
|
| 598 | return 0
|
| 599 |
|
| 600 |
|
| 601 | # vim: sw=4
|