1 | #!/usr/bin/env bash
|
2 | #
|
3 | # Measure how fast the OSH parser is.
|
4 | #
|
5 | # Usage:
|
6 | # benchmarks/osh-parser.sh <function name>
|
7 | #
|
8 | # Examples:
|
9 | # benchmarks/osh-parser.sh soil-run
|
10 | # QUICKLY=1 benchmarks/osh-parser.sh soil-run
|
11 |
|
12 | set -o nounset
|
13 | set -o pipefail
|
14 | set -o errexit
|
15 |
|
16 | REPO_ROOT=$(cd "$(dirname $0)/.."; pwd) # tsv-lib.sh uses this
|
17 | readonly REPO_ROOT
|
18 |
|
19 | source benchmarks/common.sh # die
|
20 | source benchmarks/cachegrind.sh # with-cachgrind
|
21 | source soil/common.sh # find-dir-html
|
22 | source test/tsv-lib.sh # tsv2html
|
23 | source test/common.sh # die
|
24 |
|
25 | # TODO: The raw files should be published. In both
|
26 | # ~/git/oilshell/benchmarks-data and also in the /release/ hierarchy?
|
27 | readonly BASE_DIR=_tmp/osh-parser
|
28 | readonly SORTED=$BASE_DIR/tmp/sorted.txt
|
29 |
|
30 | write-sorted-manifest() {
|
31 | local files=${1:-benchmarks/osh-parser-files.txt}
|
32 | local counts=$BASE_DIR/tmp/line-counts.txt
|
33 | local csv_out=$2
|
34 | local sep=${3:-','} # CSV or TSV
|
35 |
|
36 | # Remove comments and sort by line count
|
37 | grep -v '^#' $files | xargs wc -l | sort -n > $counts
|
38 |
|
39 | # Raw list of paths
|
40 | cat $counts | awk '$2 != "total" { print $2 }' > $SORTED
|
41 |
|
42 | # Make a CSV file from wc output
|
43 | cat $counts | awk -v sep="$sep" '
|
44 | BEGIN { print "num_lines" sep "path" }
|
45 | $2 != "total" { print $1 sep $2 }' \
|
46 | > $csv_out
|
47 | }
|
48 |
|
49 | # Called by xargs with a task row.
|
50 | parser-task() {
|
51 | local out_dir=$1 # output
|
52 | local job_id=$2
|
53 | local host=$3
|
54 | local host_hash=$4
|
55 | local sh_path=$5
|
56 | local shell_hash=$6
|
57 | local script_path=$7
|
58 |
|
59 | echo "--- TIME $sh_path $script_path ---"
|
60 |
|
61 | local times_out="$out_dir/$host.$job_id.times.csv"
|
62 |
|
63 | local shell_name
|
64 | shell_name=$(basename $sh_path)
|
65 |
|
66 | # Can't use array because of set -u bug!!! Only fixed in bash 4.4.
|
67 | extra_args=''
|
68 | case "$shell_name" in
|
69 | osh|oils-for-unix.*)
|
70 | extra_args='--ast-format none'
|
71 | ;;
|
72 | esac
|
73 |
|
74 | # exit code, time in seconds, host_hash, shell_hash, path. \0
|
75 | # would have been nice here!
|
76 | # TODO: TSV
|
77 | benchmarks/time_.py \
|
78 | --append \
|
79 | --output $times_out \
|
80 | --rusage \
|
81 | --field "$host" --field "$host_hash" \
|
82 | --field "$shell_name" --field "$shell_hash" \
|
83 | --field "$script_path" -- \
|
84 | "$sh_path" -n $extra_args "$script_path" || echo FAILED
|
85 | }
|
86 |
|
87 | # Called by xargs with a task row.
|
88 | # NOTE: This is very similar to the function above, except that we add
|
89 | # cachegrind. We could probably conslidate these.
|
90 | cachegrind-task() {
|
91 | local out_dir=$1 # output
|
92 | local job_id=$2
|
93 | local host_name=$3
|
94 | local unused2=$4
|
95 | local sh_path=$5
|
96 | local shell_hash=$6
|
97 | local script_path=$7
|
98 |
|
99 | echo "--- CACHEGRIND $sh_path $script_path ---"
|
100 |
|
101 | local host_job_id="$host_name.$job_id"
|
102 |
|
103 | # NOTE: This has to match the path that the header was written to
|
104 | local times_out="$out_dir/$host_job_id.cachegrind.tsv"
|
105 |
|
106 | local cachegrind_out_dir="$host_job_id.cachegrind"
|
107 | mkdir -p $out_dir/$cachegrind_out_dir
|
108 |
|
109 | local shell_name
|
110 | shell_name=$(basename $sh_path)
|
111 |
|
112 | local script_name
|
113 | script_name=$(basename $script_path)
|
114 |
|
115 | # RELATIVE PATH
|
116 | local cachegrind_out_path="${cachegrind_out_dir}/${shell_name}-${shell_hash}__${script_name}.txt"
|
117 |
|
118 | # Can't use array because of set -u bug!!! Only fixed in bash 4.4.
|
119 | extra_args=''
|
120 | case "$shell_name" in
|
121 | osh|oils-for-unix.*)
|
122 | extra_args="--ast-format none"
|
123 | ;;
|
124 | esac
|
125 |
|
126 | benchmarks/time_.py \
|
127 | --tsv \
|
128 | --append \
|
129 | --output $times_out \
|
130 | --rusage \
|
131 | --field "$shell_name" --field "$shell_hash" \
|
132 | --field "$script_path" \
|
133 | --field $cachegrind_out_path \
|
134 | -- \
|
135 | $0 with-cachegrind $out_dir/$cachegrind_out_path \
|
136 | "$sh_path" -n $extra_args "$script_path" || echo FAILED
|
137 | }
|
138 |
|
139 | # For each shell, print 10 script paths.
|
140 | print-tasks() {
|
141 | local provenance=$1
|
142 | shift
|
143 | # rest are shells
|
144 |
|
145 | # Add 1 field for each of 5 fields.
|
146 | cat $provenance | filter-provenance "$@" |
|
147 | while read fields; do
|
148 | if test -n "${QUICKLY:-}"; then
|
149 | # Quick test
|
150 | head -n 2 $SORTED | xargs -n 1 -- echo "$fields"
|
151 | else
|
152 | cat $SORTED | xargs -n 1 -- echo "$fields"
|
153 | fi
|
154 | done
|
155 | }
|
156 |
|
157 | cachegrind-parse-configure-coreutils() {
|
158 | ### Similar to benchmarks/gc, benchmarks/uftrace
|
159 |
|
160 | local bin=_bin/cxx-opt/oils-for-unix
|
161 | ninja $bin
|
162 | local out=_tmp/parse.configure-coreutils.txt
|
163 |
|
164 | local -a cmd=(
|
165 | $bin --ast-format none -n
|
166 | benchmarks/testdata/configure-coreutils )
|
167 |
|
168 | time "${cmd[@]}"
|
169 |
|
170 | time cachegrind $out "${cmd[@]}"
|
171 |
|
172 | echo
|
173 | cat $out
|
174 | }
|
175 |
|
176 | cachegrind-demo() {
|
177 | #local sh=bash
|
178 | local sh=zsh
|
179 |
|
180 | local out_dir=_tmp/cachegrind
|
181 |
|
182 | mkdir -p $out_dir
|
183 |
|
184 | # notes:
|
185 | # - not passing --trace-children (follow execvpe)
|
186 | # - passing --xml=yes gives error: cachegrind doesn't support XML
|
187 | # - there is a log out and a details out
|
188 |
|
189 | valgrind --tool=cachegrind \
|
190 | --log-file=$out_dir/log.txt \
|
191 | --cachegrind-out-file=$out_dir/details.txt \
|
192 | -- $sh -c 'echo hi'
|
193 |
|
194 | echo
|
195 | head -n 20 $out_dir/*.txt
|
196 | }
|
197 |
|
198 | readonly NUM_TASK_COLS=6 # input columns: 5 from provenance, 1 for file
|
199 |
|
200 | # Figure out all tasks to run, and run them. When called from auto.sh, $2
|
201 | # should be the ../benchmarks-data repo.
|
202 | measure() {
|
203 | local provenance=$1
|
204 | local host_job_id=$2
|
205 | local out_dir=${3:-$BASE_DIR/raw}
|
206 | local osh_cpp=${4:-$OSH_CPP_BENCHMARK_DATA}
|
207 |
|
208 | local times_out="$out_dir/$host_job_id.times.csv"
|
209 | local lines_out="$out_dir/$host_job_id.lines.csv"
|
210 |
|
211 | mkdir -p $BASE_DIR/{tmp,raw,stage1} $out_dir
|
212 |
|
213 | # Files that we should measure. Exploded into tasks.
|
214 | write-sorted-manifest '' $lines_out
|
215 |
|
216 | # Write Header of the CSV file that is appended to.
|
217 | # TODO: TSV
|
218 | benchmarks/time_.py --print-header \
|
219 | --rusage \
|
220 | --field host_name --field host_hash \
|
221 | --field shell_name --field shell_hash \
|
222 | --field path \
|
223 | > $times_out
|
224 |
|
225 | local tasks=$BASE_DIR/tasks.txt
|
226 | print-tasks $provenance "${SHELLS[@]}" $osh_cpp > $tasks
|
227 |
|
228 | # Run them all
|
229 | cat $tasks | xargs -n $NUM_TASK_COLS -- $0 parser-task $out_dir
|
230 | }
|
231 |
|
232 | measure-cachegrind() {
|
233 | local provenance=$1
|
234 | local host_job_id=$2
|
235 | local out_dir=${3:-$BASE_DIR/raw}
|
236 | local osh_cpp=${4:-$OSH_CPP_BENCHMARK_DATA}
|
237 |
|
238 | local cachegrind_tsv="$out_dir/$host_job_id.cachegrind.tsv"
|
239 | local lines_out="$out_dir/$host_job_id.lines.tsv"
|
240 |
|
241 | mkdir -p $BASE_DIR/{tmp,raw,stage1} $out_dir
|
242 |
|
243 | write-sorted-manifest '' $lines_out $'\t' # TSV
|
244 |
|
245 | # TODO: This header is fragile. Every task should print its own file with a
|
246 | # header, and then we can run them in parallel, and join them with
|
247 | # devtools/csv_concat.py
|
248 |
|
249 | benchmarks/time_.py --tsv --print-header \
|
250 | --rusage \
|
251 | --field shell_name --field shell_hash \
|
252 | --field path \
|
253 | --field cachegrind_out_path \
|
254 | > $cachegrind_tsv
|
255 |
|
256 | local ctasks=$BASE_DIR/cachegrind-tasks.txt
|
257 |
|
258 | # zsh weirdly forks during zsh -n, which complicates our cachegrind
|
259 | # measurement. So just ignore it. (This can be seen with
|
260 | # strace -e fork -f -- zsh -n $file)
|
261 | print-tasks $provenance bash dash mksh $osh_cpp > $ctasks
|
262 |
|
263 | cat $ctasks | xargs -n $NUM_TASK_COLS -- $0 cachegrind-task $out_dir
|
264 | }
|
265 |
|
266 | #
|
267 | # Data Preparation and Analysis
|
268 | #
|
269 |
|
270 | stage1-cachegrind() {
|
271 | local raw_dir=$1
|
272 | local single_machine=$2
|
273 | local out_dir=$3
|
274 | local raw_data_csv=$4
|
275 |
|
276 | local maybe_host
|
277 | if test -n "$single_machine"; then
|
278 | # CI: _tmp/osh-parser/raw.no-host.$job_id
|
279 | maybe_host='no-host'
|
280 | else
|
281 | # release: ../benchmark-data/osh-parser/raw.lenny.$job_id
|
282 | #maybe_host=$(hostname)
|
283 | maybe_host=$MACHINE1 # lenny
|
284 | fi
|
285 |
|
286 | # Only runs on one machine
|
287 | local -a sorted=( $raw_dir/$maybe_host.*.cachegrind.tsv )
|
288 | local tsv_in=${sorted[-1]} # latest one
|
289 |
|
290 | devtools/tsv_column_from_files.py \
|
291 | --new-column irefs \
|
292 | --path-column cachegrind_out_path \
|
293 | --extract-group-1 'I[ ]*refs:[ ]*([\d,]+)' \
|
294 | --remove-commas \
|
295 | $tsv_in > $out_dir/cachegrind.tsv
|
296 |
|
297 | echo $tsv_in >> $raw_data_csv
|
298 | }
|
299 |
|
300 | stage1() {
|
301 | local raw_dir=${1:-$BASE_DIR/raw}
|
302 | local single_machine=${2:-}
|
303 |
|
304 | local out=$BASE_DIR/stage1
|
305 | mkdir -p $out
|
306 |
|
307 | # Construct a one-column CSV file
|
308 | local raw_data_csv=$out/raw-data.csv
|
309 | echo 'path' > $raw_data_csv
|
310 |
|
311 | stage1-cachegrind $raw_dir "$single_machine" $out $raw_data_csv
|
312 |
|
313 | local lines_csv=$out/lines.csv
|
314 |
|
315 | local -a raw=()
|
316 | if test -n "$single_machine"; then
|
317 | local -a a=($raw_dir/$single_machine.*.times.csv)
|
318 | raw+=( ${a[-1]} )
|
319 | echo ${a[-1]} >> $raw_data_csv
|
320 |
|
321 | # They are the same, output one of them.
|
322 | cat $raw_dir/$single_machine.*.lines.csv > $lines_csv
|
323 | else
|
324 | # Globs are in lexicographical order, which works for our dates.
|
325 | local -a a=($raw_dir/$MACHINE1.*.times.csv)
|
326 | local -a b=($raw_dir/$MACHINE2.*.times.csv)
|
327 |
|
328 | raw+=( ${a[-1]} ${b[-1]} )
|
329 | {
|
330 | echo ${a[-1]}
|
331 | echo ${b[-1]}
|
332 | } >> $raw_data_csv
|
333 |
|
334 |
|
335 | # Verify that the files are equal, and pass one of them.
|
336 | local -a c=($raw_dir/$MACHINE1.*.lines.csv)
|
337 | local -a d=($raw_dir/$MACHINE2.*.lines.csv)
|
338 |
|
339 | local left=${c[-1]}
|
340 | local right=${d[-1]}
|
341 |
|
342 | if ! diff $left $right; then
|
343 | die "Benchmarks were run on different files ($left != $right)"
|
344 | fi
|
345 |
|
346 | # They are the same, output one of them.
|
347 | cat $left > $lines_csv
|
348 | fi
|
349 |
|
350 | local times_csv=$out/times.csv
|
351 | csv-concat "${raw[@]}" > $times_csv
|
352 |
|
353 | head $out/*
|
354 | wc -l $out/*
|
355 | }
|
356 |
|
357 | # TODO:
|
358 | # - maybe rowspan for hosts: flanders/lenny
|
359 | # - does that interfere with sorting?
|
360 | #
|
361 | # NOTE: not bothering to make it sortable now. Just using the CSS.
|
362 |
|
363 | print-report() {
|
364 | local in_dir=$1
|
365 |
|
366 | benchmark-html-head 'OSH Parser Performance'
|
367 |
|
368 | cat <<EOF
|
369 | <body class="width60">
|
370 | <p id="home-link">
|
371 | <a href="/">oilshell.org</a>
|
372 | </p>
|
373 | EOF
|
374 |
|
375 | cmark <<'EOF'
|
376 | ## OSH Parser Performance
|
377 |
|
378 | We time `$sh -n $file` for various files under various shells, and repeat then
|
379 | run under cachegrind for stable metrics.
|
380 |
|
381 | Source code: [oil/benchmarks/osh-parser.sh](https://github.com/oilshell/oil/tree/master/benchmarks/osh-parser.sh)
|
382 |
|
383 | ### Summary
|
384 |
|
385 | #### Instructions Per Line (via cachegrind)
|
386 |
|
387 | Lower numbers are generally better, but each shell recognizes a different
|
388 | language, and OSH uses a more thorough parsing algorithm. In **thousands** of
|
389 | "I refs".
|
390 |
|
391 | EOF
|
392 | tsv2html $in_dir/cachegrind_summary.tsv
|
393 |
|
394 | cmark <<'EOF'
|
395 |
|
396 | (zsh isn't measured because `zsh -n` unexpectedly forks.)
|
397 |
|
398 | #### Average Parsing Rate, Measured on Two Machines (lines/ms)
|
399 |
|
400 | Shell startup time is included in the elapsed time measurements, but long files
|
401 | are chosen to minimize its effect.
|
402 | EOF
|
403 | csv2html $in_dir/summary.csv
|
404 |
|
405 | cmark <<< '### Per-File Measurements'
|
406 | echo
|
407 |
|
408 | # Flat tables for CI
|
409 | if test -f $in_dir/times_flat.tsv; then
|
410 | cmark <<< '#### Time and Memory'
|
411 | echo
|
412 |
|
413 | tsv2html $in_dir/times_flat.tsv
|
414 | fi
|
415 | if test -f $in_dir/cachegrind_flat.tsv; then
|
416 | cmark <<< '#### Instruction Counts'
|
417 | echo
|
418 |
|
419 | tsv2html $in_dir/cachegrind_flat.tsv
|
420 | fi
|
421 |
|
422 | # Breakdowns for release
|
423 | if test -f $in_dir/instructions.tsv; then
|
424 | cmark <<< '#### Instructions Per Line (in thousands)'
|
425 | echo
|
426 | tsv2html $in_dir/instructions.tsv
|
427 | fi
|
428 |
|
429 | if test -f $in_dir/elapsed.csv; then
|
430 | cmark <<< '#### Elapsed Time (milliseconds)'
|
431 | echo
|
432 | csv2html $in_dir/elapsed.csv
|
433 | fi
|
434 |
|
435 | if test -f $in_dir/rate.csv; then
|
436 | cmark <<< '#### Parsing Rate (lines/ms)'
|
437 | echo
|
438 | csv2html $in_dir/rate.csv
|
439 | fi
|
440 |
|
441 | if test -f $in_dir/max_rss.csv; then
|
442 | cmark <<'EOF'
|
443 | ### Memory Usage (Max Resident Set Size in MB)
|
444 |
|
445 | Again, OSH uses a **different algorithm** (and language) than POSIX shells. It
|
446 | builds an AST in memory rather than just validating the code line-by-line.
|
447 |
|
448 | EOF
|
449 | csv2html $in_dir/max_rss.csv
|
450 | fi
|
451 |
|
452 | cmark <<EOF
|
453 | ### Shell and Host Details
|
454 | EOF
|
455 | csv2html $in_dir/shells.csv
|
456 | csv2html $in_dir/hosts.csv
|
457 |
|
458 | cmark <<EOF
|
459 | ### Raw Data
|
460 | EOF
|
461 | csv2html $in_dir/raw-data.csv
|
462 |
|
463 | # Only show files.html link on a single machine
|
464 | if test -f $(dirname $in_dir)/files.html; then
|
465 | cmark << 'EOF'
|
466 | ---
|
467 | [raw files](files.html)
|
468 |
|
469 | EOF
|
470 | fi
|
471 |
|
472 | cat <<EOF
|
473 | </body>
|
474 | </html>
|
475 | EOF
|
476 | }
|
477 |
|
478 | soil-run() {
|
479 | ### Run it on just this machine, and make a report
|
480 |
|
481 | rm -r -f $BASE_DIR
|
482 | mkdir -p $BASE_DIR
|
483 |
|
484 | local -a osh_bin=( $OSH_CPP_NINJA_BUILD )
|
485 | ninja "${osh_bin[@]}"
|
486 |
|
487 | local single_machine='no-host'
|
488 |
|
489 | local job_id
|
490 | job_id=$(benchmarks/id.sh print-job-id)
|
491 |
|
492 | benchmarks/id.sh shell-provenance-2 \
|
493 | $single_machine $job_id _tmp \
|
494 | bash dash bin/osh "${osh_bin[@]}"
|
495 |
|
496 | # TODO: measure* should use print-tasks | run-tasks
|
497 | local provenance=_tmp/provenance.txt
|
498 | local host_job_id="$single_machine.$job_id"
|
499 |
|
500 | measure $provenance $host_job_id '' $OSH_CPP_NINJA_BUILD
|
501 |
|
502 | measure-cachegrind $provenance $host_job_id '' $OSH_CPP_NINJA_BUILD
|
503 |
|
504 | # TODO: R can use this TSV file
|
505 | cp -v _tmp/provenance.tsv $BASE_DIR/stage1/provenance.tsv
|
506 |
|
507 | # Trivial concatenation for 1 machine
|
508 | stage1 '' $single_machine
|
509 |
|
510 | benchmarks/report.sh stage2 $BASE_DIR
|
511 |
|
512 | # Make _tmp/osh-parser/files.html, so index.html can potentially link to it
|
513 | find-dir-html _tmp/osh-parser files
|
514 |
|
515 | benchmarks/report.sh stage3 $BASE_DIR
|
516 | }
|
517 |
|
518 | "$@"
|