1 | #!/usr/bin/env bash
|
2 | #
|
3 | # Test scripts found in the wild for both correctness and performance.
|
4 | #
|
5 | # Usage:
|
6 | # benchmarks/osh-runtime.sh <function name>
|
7 |
|
8 | set -o nounset
|
9 | set -o pipefail
|
10 | set -o errexit
|
11 |
|
12 | REPO_ROOT=$(cd "$(dirname $0)/.."; pwd)
|
13 |
|
14 | source benchmarks/common.sh # tsv-concat
|
15 | source benchmarks/id.sh # print-job-id
|
16 | source soil/common.sh # find-dir-html
|
17 | source test/common.sh
|
18 | source test/tsv-lib.sh # tsv-row
|
19 |
|
20 | readonly BASE_DIR=_tmp/osh-runtime
|
21 |
|
22 | # TODO: Move to ../oil_DEPS
|
23 | readonly TAR_DIR=$PWD/_deps/osh-runtime # Make it absolute
|
24 |
|
25 | #
|
26 | # Dependencies
|
27 | #
|
28 |
|
29 | readonly PY27_DIR=$PWD/Python-2.7.13
|
30 |
|
31 | # NOTE: Same list in oilshell.org/blob/run.sh.
|
32 | tarballs() {
|
33 | cat <<EOF
|
34 | tcc-0.9.26.tar.bz2
|
35 | yash-2.46.tar.xz
|
36 | ocaml-4.06.0.tar.xz
|
37 | util-linux-2.40.tar.xz
|
38 | EOF
|
39 | }
|
40 |
|
41 | download() {
|
42 | mkdir -p $TAR_DIR
|
43 | tarballs | xargs -n 1 -I {} --verbose -- \
|
44 | wget --no-clobber --directory $TAR_DIR 'https://www.oilshell.org/blob/testdata/{}'
|
45 | }
|
46 |
|
47 | extract() {
|
48 | set -x
|
49 | time for f in $TAR_DIR/*.{bz2,xz}; do
|
50 | tar -x --directory $TAR_DIR --file $f
|
51 | done
|
52 | set +x
|
53 |
|
54 | ls -l $TAR_DIR
|
55 | }
|
56 |
|
57 | #
|
58 | # Computation
|
59 | #
|
60 |
|
61 | run-tasks() {
|
62 | local raw_out_dir=$1
|
63 | raw_out_dir="$PWD/$raw_out_dir" # because we change dirs
|
64 |
|
65 | local task_id=0
|
66 | while read -r host_name sh_path workload; do
|
67 |
|
68 | log "*** $host_name $sh_path $workload $task_id"
|
69 |
|
70 | local sh_run_path
|
71 | case $sh_path in
|
72 | /*) # Already absolute
|
73 | sh_run_path=$sh_path
|
74 | ;;
|
75 | */*) # It's relative, so make it absolute
|
76 | sh_run_path=$PWD/$sh_path
|
77 | ;;
|
78 | *) # 'dash' should remain 'dash'
|
79 | sh_run_path=$sh_path
|
80 | ;;
|
81 | esac
|
82 |
|
83 | local working_dir=''
|
84 | local files_out_dir="$raw_out_dir/files-$task_id"
|
85 | mkdir -v -p $files_out_dir
|
86 |
|
87 | local save_new_files=''
|
88 |
|
89 | local -a argv
|
90 | case $workload in
|
91 | hello-world)
|
92 | argv=( testdata/osh-runtime/hello_world.sh )
|
93 | ;;
|
94 |
|
95 | bin-true)
|
96 | argv=( testdata/osh-runtime/bin_true.sh )
|
97 | ;;
|
98 |
|
99 | abuild-print-help)
|
100 | argv=( testdata/osh-runtime/abuild -h )
|
101 | ;;
|
102 |
|
103 | configure.cpython)
|
104 | argv=( $PY27_DIR/configure )
|
105 | working_dir=$files_out_dir
|
106 | ;;
|
107 |
|
108 | configure.util-linux)
|
109 | # flag needed to avoid sqlite3 dep error message
|
110 | argv=( $TAR_DIR/util-linux-2.40/configure --disable-liblastlog2 )
|
111 | working_dir=$files_out_dir
|
112 | ;;
|
113 |
|
114 | configure.*)
|
115 | argv=( ./configure )
|
116 |
|
117 | local conf_dir
|
118 | case $workload in
|
119 | *.ocaml)
|
120 | conf_dir='ocaml-4.06.0'
|
121 | ;;
|
122 | *.tcc)
|
123 | conf_dir='tcc-0.9.26'
|
124 | ;;
|
125 | *.yash)
|
126 | conf_dir='yash-2.46'
|
127 | ;;
|
128 | *)
|
129 | die "Invalid workload $workload"
|
130 | esac
|
131 |
|
132 | # These are run in-tree?
|
133 | working_dir=$TAR_DIR/$conf_dir
|
134 | ;;
|
135 |
|
136 | *)
|
137 | die "Invalid workload $workload"
|
138 | ;;
|
139 | esac
|
140 |
|
141 | local -a time_argv=(
|
142 | time-tsv
|
143 | --output "$raw_out_dir/times.tsv" --append
|
144 | --rusage
|
145 | --rusage-2
|
146 | --field "$task_id"
|
147 | --field "$host_name" --field "$sh_path"
|
148 | --field "$workload"
|
149 | -- "$sh_run_path" "${argv[@]}"
|
150 | )
|
151 |
|
152 | local stdout_file="$files_out_dir/STDOUT.txt"
|
153 | local gc_stats_file="$raw_out_dir/gc-$task_id.txt"
|
154 |
|
155 | # Maybe change dirs
|
156 | if test -n "$working_dir"; then
|
157 | pushd "$working_dir"
|
158 | fi
|
159 |
|
160 | if test -n "$save_new_files"; then
|
161 | touch __TIMESTAMP
|
162 | fi
|
163 |
|
164 | # Run it, possibly with GC stats
|
165 | case $sh_path in
|
166 | *_bin/*/osh)
|
167 | OILS_GC_STATS_FD=99 "${time_argv[@]}" > $stdout_file 99> $gc_stats_file
|
168 | ;;
|
169 | *)
|
170 | "${time_argv[@]}" > $stdout_file
|
171 | ;;
|
172 | esac
|
173 |
|
174 | if test -n "$save_new_files"; then
|
175 | echo "COPYING to $files_out_dir"
|
176 | find . -type f -newer __TIMESTAMP \
|
177 | | xargs -I {} -- cp --verbose {} $files_out_dir
|
178 | fi
|
179 |
|
180 | # Restore dir
|
181 | if test -n "$working_dir"; then
|
182 | popd
|
183 | fi
|
184 |
|
185 | task_id=$((task_id + 1))
|
186 | done
|
187 | }
|
188 |
|
189 | # Sorted by priority for test-oils.sh osh-runtime --num-shells 3
|
190 |
|
191 | readonly -a ALL_WORKLOADS=(
|
192 | hello-world
|
193 | bin-true
|
194 |
|
195 | configure.cpython
|
196 | configure.util-linux
|
197 | configure.ocaml
|
198 | configure.tcc
|
199 | configure.yash
|
200 |
|
201 | abuild-print-help
|
202 | )
|
203 |
|
204 | print-tasks() {
|
205 | local host_name=$1
|
206 | local osh_native=$2
|
207 |
|
208 | if test -n "${QUICKLY:-}"; then
|
209 | workloads=(
|
210 | hello-world
|
211 | bin-true
|
212 | #configure.util-linux
|
213 | #abuild-print-help
|
214 | )
|
215 | else
|
216 | workloads=( "${ALL_WORKLOADS[@]}" )
|
217 | fi
|
218 |
|
219 | for sh_path in bash dash bin/osh $osh_native; do
|
220 | for workload in "${workloads[@]}"; do
|
221 | tsv-row $host_name $sh_path $workload
|
222 | done
|
223 | done
|
224 | }
|
225 |
|
226 | print-tasks-xshar() {
|
227 | local host_name=$1
|
228 | local osh_native=$2
|
229 |
|
230 | local num_iters=${3:-1}
|
231 | local num_shells=${4:-1}
|
232 | local num_workloads=${5:-1}
|
233 |
|
234 | local s=0
|
235 | local w=0
|
236 |
|
237 | for i in $(seq $num_iters); do
|
238 |
|
239 | for sh_path in $osh_native bash dash; do
|
240 |
|
241 | for workload in "${ALL_WORKLOADS[@]}"; do
|
242 | tsv-row $host_name $sh_path $workload
|
243 |
|
244 | w=$(( w + 1 )) # cut off at specified workloads
|
245 | if test $w -eq $num_workloads; then
|
246 | break
|
247 | fi
|
248 | done
|
249 |
|
250 | s=$(( s + 1 )) # cut off as specified shells
|
251 | if test $s -eq $num_shells; then
|
252 | break
|
253 | fi
|
254 |
|
255 | done
|
256 | done
|
257 | }
|
258 |
|
259 | run-tasks-wrapper() {
|
260 | ### reads tasks from stdin
|
261 |
|
262 | local host_name=$1 # 'no-host' or 'lenny'
|
263 | local raw_out_dir=$2
|
264 |
|
265 | mkdir -v -p $raw_out_dir
|
266 |
|
267 | local tsv_out="$raw_out_dir/times.tsv"
|
268 |
|
269 | # Write header of the TSV file that is appended to.
|
270 | time-tsv -o $tsv_out --print-header \
|
271 | --rusage \
|
272 | --rusage-2 \
|
273 | --field task_id \
|
274 | --field host_name --field sh_path \
|
275 | --field workload
|
276 |
|
277 | # reads tasks from stdin
|
278 | # run-tasks outputs 3 things: raw times.tsv, per-task STDOUT and files, and
|
279 | # per-task GC stats
|
280 | run-tasks $raw_out_dir
|
281 |
|
282 | # Turn individual files into a TSV, adding host
|
283 | benchmarks/gc_stats_to_tsv.py $raw_out_dir/gc-*.txt \
|
284 | | tsv-add-const-column host_name "$host_name" \
|
285 | > $raw_out_dir/gc_stats.tsv
|
286 |
|
287 | cp -v _tmp/provenance.tsv $raw_out_dir
|
288 | }
|
289 |
|
290 | measure() {
|
291 | ### For release and CI
|
292 | local host_name=$1 # 'no-host' or 'lenny'
|
293 | local raw_out_dir=$2 # _tmp/osh-runtime or ../../benchmark-data/osh-runtime
|
294 | local osh_native=$3 # $OSH_CPP_NINJA_BUILD or $OSH_CPP_BENCHMARK_DATA
|
295 |
|
296 | print-tasks $host_name $osh_native | run-tasks-wrapper $host_name $raw_out_dir
|
297 | }
|
298 |
|
299 | stage1() {
|
300 | local base_dir=${1:-$BASE_DIR} # _tmp/osh-runtime or ../benchmark-data/osh-runtime
|
301 | local single_machine=${2:-}
|
302 |
|
303 | local out_dir=$BASE_DIR/stage1 # _tmp/osh-runtime
|
304 | mkdir -p $out_dir
|
305 |
|
306 | # Globs are in lexicographical order, which works for our dates.
|
307 |
|
308 | local -a raw_times=()
|
309 | local -a raw_gc_stats=()
|
310 | local -a raw_provenance=()
|
311 |
|
312 | if test -n "$single_machine"; then
|
313 | local -a a=( $base_dir/raw.$single_machine.* )
|
314 |
|
315 | raw_times+=( ${a[-1]}/times.tsv )
|
316 | raw_gc_stats+=( ${a[-1]}/gc_stats.tsv )
|
317 | raw_provenance+=( ${a[-1]}/provenance.tsv )
|
318 |
|
319 | else
|
320 | local -a a=( $base_dir/raw.$MACHINE1.* )
|
321 | local -a b=( $base_dir/raw.$MACHINE2.* )
|
322 |
|
323 | raw_times+=( ${a[-1]}/times.tsv ${b[-1]}/times.tsv )
|
324 | raw_gc_stats+=( ${a[-1]}/gc_stats.tsv ${b[-1]}/gc_stats.tsv )
|
325 | raw_provenance+=( ${a[-1]}/provenance.tsv ${b[-1]}/provenance.tsv )
|
326 | fi
|
327 |
|
328 | tsv-concat "${raw_times[@]}" > $out_dir/times.tsv
|
329 |
|
330 | tsv-concat "${raw_gc_stats[@]}" > $out_dir/gc_stats.tsv
|
331 |
|
332 | tsv-concat "${raw_provenance[@]}" > $out_dir/provenance.tsv
|
333 | }
|
334 |
|
335 | print-report() {
|
336 | local in_dir=$1
|
337 |
|
338 | benchmark-html-head 'OSH Runtime Performance'
|
339 |
|
340 | cat <<EOF
|
341 | <body class="width60">
|
342 | <p id="home-link">
|
343 | <a href="/">oilshell.org</a>
|
344 | </p>
|
345 | EOF
|
346 |
|
347 | cmark <<'EOF'
|
348 | ## OSH Runtime Performance
|
349 |
|
350 | Source code: [benchmarks/osh-runtime.sh](https://github.com/oilshell/oil/tree/master/benchmarks/osh-runtime.sh)
|
351 |
|
352 | - [Elapsed Time](#elapsed-time)
|
353 | - [Minor Page Faults](#page-faults)
|
354 | - [Memory Usage](#memory-usage)
|
355 | - [GC Stats](#gc-stats)
|
356 | - [rusage Details](#rusage-details)
|
357 | - [More Details](#more-details)
|
358 | - [Shell and Host](#shell-and-host)
|
359 |
|
360 | <a name="elapsed-time" />
|
361 |
|
362 | ### Elapsed Time by Shell (milliseconds)
|
363 |
|
364 | Some benchmarks call many external tools, while some exercise the shell
|
365 | interpreter itself.
|
366 | EOF
|
367 | tsv2html $in_dir/elapsed.tsv
|
368 |
|
369 | cmark <<EOF
|
370 | <a name="page-faults" />
|
371 |
|
372 | ### Minor Page Faults
|
373 | EOF
|
374 |
|
375 | tsv2html $in_dir/page_faults.tsv
|
376 |
|
377 | cmark <<EOF
|
378 | <a name="memory-usage" />
|
379 |
|
380 | ### Memory Usage (Max Resident Set Size in MB)
|
381 |
|
382 | Memory usage is measured in MB (powers of 10), not MiB (powers of 2).
|
383 | EOF
|
384 | tsv2html $in_dir/max_rss.tsv
|
385 |
|
386 | cmark <<EOF
|
387 | <a name="gc-stats" />
|
388 |
|
389 | ### GC Stats
|
390 | EOF
|
391 | tsv2html $in_dir/gc_stats.tsv
|
392 |
|
393 | cmark <<EOF
|
394 | <a name="rusage-details" />
|
395 |
|
396 | ### rusage Details
|
397 | EOF
|
398 | tsv2html $in_dir/details.tsv
|
399 |
|
400 | cmark <<EOF
|
401 | <a name="more-details" />
|
402 |
|
403 | ### More Details
|
404 | EOF
|
405 | tsv2html $in_dir/details_io.tsv
|
406 |
|
407 | cmark <<'EOF'
|
408 | <a name="shell-and-host" />
|
409 |
|
410 | ### Shell and Host
|
411 | EOF
|
412 | tsv2html $in_dir/shells.tsv
|
413 | tsv2html $in_dir/hosts.tsv
|
414 |
|
415 | # Only show files.html link on a single machine
|
416 | if test -f $(dirname $in_dir)/files.html; then
|
417 | cmark <<'EOF'
|
418 | ---
|
419 |
|
420 | [raw files](files.html)
|
421 | EOF
|
422 | fi
|
423 |
|
424 | cat <<EOF
|
425 | </body>
|
426 | </html>
|
427 | EOF
|
428 | }
|
429 |
|
430 | test-oils-run() {
|
431 | local osh=$1
|
432 |
|
433 | # flags passed by caller
|
434 | local num_iters=${2:-1}
|
435 | local num_shells=${3:-1}
|
436 | local num_workloads=${4:-1}
|
437 |
|
438 | local time_py=$XSHAR_DIR/benchmarks/time_.py
|
439 | $time_py --tsv --rusage -- \
|
440 | $osh -c 'echo "smoke test: hi from benchmarks/osh-runtime.sh"'
|
441 |
|
442 | local host_name
|
443 | host_name=$(hostname)
|
444 |
|
445 | local job_id
|
446 | job_id=$(print-job-id)
|
447 |
|
448 | # Write _tmp/provenance.* and _tmp/{host,shell}-id
|
449 | shell-provenance-2 \
|
450 | $host_name $job_id _tmp \
|
451 | bash dash $osh
|
452 |
|
453 | # e.g. 2024-05-01__10-11-12.ci-vm-name
|
454 | local raw_out_dir="$BASE_DIR/$job_id.$host_name"
|
455 | mkdir -p $raw_out_dir $BASE_DIR/stage1
|
456 |
|
457 | # Similar to 'measure', for soil-run and release
|
458 | print-tasks-xshar $host_name $osh $num_iters $num_shells $num_workloads \
|
459 | | run-tasks-wrapper $host_name $raw_out_dir
|
460 |
|
461 | # Note: 'stage1' in soil-run is a trivial concatenation, so we can create input for
|
462 | # benchmarks/report.R. We don't need that here
|
463 | }
|
464 |
|
465 | soil-run() {
|
466 | ### Run it on just this machine, and make a report
|
467 |
|
468 | rm -r -f $BASE_DIR
|
469 | mkdir -p $BASE_DIR
|
470 |
|
471 | # TODO: This testdata should be baked into Docker image, or mounted
|
472 | download
|
473 | extract
|
474 |
|
475 | # could add _bin/cxx-bumpleak/oils-for-unix, although sometimes it's slower
|
476 | local -a osh_bin=( $OSH_CPP_NINJA_BUILD )
|
477 | ninja "${osh_bin[@]}"
|
478 |
|
479 | local single_machine='no-host'
|
480 |
|
481 | local job_id
|
482 | job_id=$(print-job-id)
|
483 |
|
484 | # Write _tmp/provenance.* and _tmp/{host,shell}-id
|
485 | shell-provenance-2 \
|
486 | $single_machine $job_id _tmp \
|
487 | bash dash bin/osh "${osh_bin[@]}"
|
488 |
|
489 | local host_job_id="$single_machine.$job_id"
|
490 | local raw_out_dir="$BASE_DIR/raw.$host_job_id"
|
491 | mkdir -p $raw_out_dir $BASE_DIR/stage1
|
492 |
|
493 | measure $single_machine $raw_out_dir $OSH_CPP_NINJA_BUILD
|
494 |
|
495 | # Trivial concatenation for 1 machine
|
496 | stage1 '' $single_machine
|
497 |
|
498 | benchmarks/report.sh stage2 $BASE_DIR
|
499 |
|
500 | # Make _tmp/osh-parser/files.html, so index.html can potentially link to it
|
501 | find-dir-html _tmp/osh-runtime files
|
502 |
|
503 | benchmarks/report.sh stage3 $BASE_DIR
|
504 | }
|
505 |
|
506 | #
|
507 | # Debugging
|
508 | #
|
509 |
|
510 | compare-cpython() {
|
511 | #local -a a=( ../benchmark-data/osh-runtime/*.lenny.2024* )
|
512 | local -a a=( ../benchmark-data/osh-runtime/*.hoover.2024* )
|
513 |
|
514 | # More of a diff here?
|
515 | #local -a a=( ../benchmark-data/osh-runtime/*.broome.2023* )
|
516 | # less diff here
|
517 | #local -a a=( ../benchmark-data/osh-runtime/*.lenny.2023* )
|
518 |
|
519 | local dir=${a[-1]}
|
520 |
|
521 | echo $dir
|
522 |
|
523 | head -n 1 $dir/times.tsv
|
524 | fgrep 'configure.cpython' $dir/times.tsv
|
525 |
|
526 | local bash_id=2
|
527 | local dash_id=8
|
528 | local osh_py_id=14
|
529 | local osh_cpp_id=20
|
530 |
|
531 | set +o errexit
|
532 |
|
533 | local out_dir=_tmp/cpython-configure
|
534 | mkdir -p $out_dir
|
535 |
|
536 | echo 'bash vs. dash'
|
537 | diff -u --recursive $dir/{files-2,files-8} > $out_dir/bash-vs-dash.txt
|
538 | diffstat $out_dir/bash-vs-dash.txt
|
539 | echo
|
540 |
|
541 | echo 'bash vs. osh-py'
|
542 | diff -u --recursive $dir/{files-2,files-14} > $out_dir/bash-vs-osh-py.txt
|
543 | diffstat $out_dir/bash-vs-osh-py.txt
|
544 | echo
|
545 |
|
546 | echo 'bash vs. osh-cpp'
|
547 | diff -u --recursive $dir/{files-2,files-20} > $out_dir/bash-vs-osh-cpp.txt
|
548 | diffstat $out_dir/bash-vs-osh-cpp.txt
|
549 | echo
|
550 |
|
551 | return
|
552 |
|
553 | diff -u $dir/{files-2,files-20}/STDOUT.txt
|
554 | echo
|
555 |
|
556 | diff -u $dir/{files-2,files-20}/pyconfig.h
|
557 | echo
|
558 |
|
559 | cdiff -u $dir/{files-2,files-20}/config.log
|
560 | echo
|
561 | }
|
562 |
|
563 | "$@"
|