1 | #!/usr/bin/env bash
|
2 | #
|
3 | # Run tests against multiple shells with the sh_spec framework.
|
4 | #
|
5 | # Usage:
|
6 | # test/spec-runner.sh <function name>
|
7 |
|
8 | set -o nounset
|
9 | set -o pipefail
|
10 | set -o errexit
|
11 | shopt -s strict:all 2>/dev/null || true # dogfood for OSH
|
12 |
|
13 | REPO_ROOT=$(cd "$(dirname $0)/.."; pwd)
|
14 |
|
15 | source build/dev-shell.sh
|
16 | source test/common.sh
|
17 | source test/spec-common.sh
|
18 | source test/tsv-lib.sh # $TAB
|
19 |
|
20 | NUM_SPEC_TASKS=${NUM_SPEC_TASKS:-400}
|
21 |
|
22 | # Option to use our xargs implementation.
|
23 | #xargs() {
|
24 | # echo "Using ~/git/oilshell/xargs.py/xargs.py"
|
25 | # ~/git/oilshell/xargs.py/xargs.py "$@"
|
26 | #}
|
27 |
|
28 | #
|
29 | # Test Runner
|
30 | #
|
31 |
|
32 | write-suite-manifests() {
|
33 | #test/sh_spec.py --print-table spec/*.test.sh
|
34 | { test/sh_spec.py --print-table spec/*.test.sh | while read suite name; do
|
35 | case $suite in
|
36 | osh) echo $name >& $osh ;;
|
37 | ysh) echo $name >& $ysh ;;
|
38 | tea) echo $name >& $tea ;;
|
39 | disabled) ;; # ignore
|
40 | *) die "Invalid suite $suite" ;;
|
41 | esac
|
42 | done
|
43 | } {osh}>_tmp/spec/SUITE-osh.txt \
|
44 | {ysh}>_tmp/spec/SUITE-ysh.txt \
|
45 | {tea}>_tmp/spec/SUITE-tea.txt \
|
46 | {needs_terminal}>_tmp/spec/SUITE-needs-terminal.txt
|
47 |
|
48 | # These are kind of pseudo-suites, not the main 3
|
49 | test/sh_spec.py --print-tagged interactive \
|
50 | spec/*.test.sh > _tmp/spec/SUITE-interactive.txt
|
51 |
|
52 | test/sh_spec.py --print-tagged dev-minimal \
|
53 | spec/*.test.sh > _tmp/spec/SUITE-osh-minimal.txt
|
54 | }
|
55 |
|
56 |
|
57 | diff-manifest() {
|
58 | ### temporary test
|
59 |
|
60 | write-suite-manifests
|
61 | #return
|
62 |
|
63 | # crazy sorting, affects glob
|
64 | # doesn't work
|
65 | #LANG=C
|
66 | #LC_COLLATE=C
|
67 | #LC_ALL=C
|
68 | #export LANG LC_COLLATE LC_ALL
|
69 |
|
70 | for suite in osh ysh tea interactive osh-minimal; do
|
71 | echo
|
72 | echo [$suite]
|
73 | echo
|
74 |
|
75 | diff -u -r <(sort spec2/SUITE-$suite.txt) <(sort _tmp/spec/SUITE-$suite.txt) #|| true
|
76 | done
|
77 | }
|
78 |
|
79 | dispatch-one() {
|
80 | # Determines what binaries to compare against: compare-py | compare-cpp | release-alpine
|
81 | local compare_mode=${1:-compare-py}
|
82 | # Which subdir of _tmp/spec: osh-py ysh-py osh-cpp ysh-cpp smoosh tea
|
83 | local spec_subdir=${2:-osh-py}
|
84 | local spec_name=$3
|
85 | shift 3 # rest are more flags
|
86 |
|
87 | log "__ $spec_name"
|
88 |
|
89 | local -a prefix
|
90 | case $compare_mode in
|
91 |
|
92 | compare-py) prefix=(test/spec.sh) ;;
|
93 |
|
94 | compare-cpp) prefix=(test/spec-cpp.sh run-file) ;;
|
95 |
|
96 | # For interactive comparison
|
97 | osh-only) prefix=(test/spec-util.sh run-file-with-osh) ;;
|
98 | bash-only) prefix=(test/spec-util.sh run-file-with-bash) ;;
|
99 |
|
100 | release-alpine) prefix=(test/spec-alpine.sh run-file) ;;
|
101 |
|
102 | *) die "Invalid compare mode $compare_mode" ;;
|
103 | esac
|
104 |
|
105 | local base_dir=_tmp/spec/$spec_subdir
|
106 |
|
107 | # TODO: Could --stats-{file,template} be a separate awk step on .tsv files?
|
108 | run-task-with-status \
|
109 | $base_dir/${spec_name}.task.txt \
|
110 | "${prefix[@]}" $spec_name \
|
111 | --format html \
|
112 | --stats-file $base_dir/${spec_name}.stats.txt \
|
113 | --stats-template \
|
114 | '%(num_cases)d %(oils_num_passed)d %(oils_num_failed)d %(oils_failures_allowed)d %(oils_ALT_delta)d' \
|
115 | "$@" \
|
116 | > $base_dir/${spec_name}.html
|
117 | }
|
118 |
|
119 |
|
120 | _html-summary() {
|
121 | ### Print an HTML summary to stdout and return whether all tests succeeded
|
122 |
|
123 | local sh_label=$1 # osh or ysh
|
124 | local base_dir=$2 # e.g. _tmp/spec/ysh-cpp
|
125 | local totals=$3 # path to print HTML to
|
126 | local manifest=$4
|
127 |
|
128 | html-head --title "Spec Test Summary" \
|
129 | ../../../web/base.css ../../../web/spec-tests.css
|
130 |
|
131 | cat <<EOF
|
132 | <body class="width50">
|
133 |
|
134 | <p id="home-link">
|
135 | <!-- The release index is two dirs up -->
|
136 | <a href="../..">Up</a> |
|
137 | <a href="/">oilshell.org</a>
|
138 | </p>
|
139 |
|
140 | <h1>Spec Test Results Summary</h1>
|
141 |
|
142 | <table>
|
143 | <thead>
|
144 | <tr>
|
145 | <td>name</td>
|
146 | <td># cases</td> <td>$sh_label # passed</td> <td>$sh_label # failed</td>
|
147 | <td>$sh_label failures allowed</td>
|
148 | <td>$sh_label ALT delta</td>
|
149 | <td>Elapsed Seconds</td>
|
150 | </tr>
|
151 | </thead>
|
152 | <!-- TOTALS -->
|
153 | EOF
|
154 |
|
155 | # Awk notes:
|
156 | # - "getline" is kind of like bash "read", but it doesn't allow you do
|
157 | # specify variable names. You have to destructure it yourself.
|
158 | # - Lack of string interpolation is very annoying
|
159 |
|
160 | head -n $NUM_SPEC_TASKS $manifest | sort | awk -v totals=$totals -v base_dir=$base_dir '
|
161 | # Awk problem: getline errors are ignored by default!
|
162 | function error(path) {
|
163 | print "Error reading line from file: " path > "/dev/stderr"
|
164 | exit(1)
|
165 | }
|
166 |
|
167 | {
|
168 | spec_name = $0
|
169 |
|
170 | # Read from the task files
|
171 | path = ( base_dir "/" spec_name ".task.txt" )
|
172 | n = getline < path
|
173 | if (n != 1) {
|
174 | error(path)
|
175 | }
|
176 | status = $1
|
177 | wall_secs = $2
|
178 |
|
179 | path = ( base_dir "/" spec_name ".stats.txt" )
|
180 | n = getline < path
|
181 | if (n != 1) {
|
182 | error(path)
|
183 | }
|
184 | num_cases = $1
|
185 | oils_num_passed = $2
|
186 | oils_num_failed = $3
|
187 | oils_failures_allowed = $4
|
188 | oils_ALT_delta = $5
|
189 |
|
190 | sum_status += status
|
191 | sum_wall_secs += wall_secs
|
192 | sum_num_cases += num_cases
|
193 | sum_oils_num_passed += oils_num_passed
|
194 | sum_oils_num_failed += oils_num_failed
|
195 | sum_oils_failures_allowed += oils_failures_allowed
|
196 | sum_oils_ALT_delta += oils_ALT_delta
|
197 | num_rows += 1
|
198 |
|
199 | # For the console
|
200 | if (status == 0) {
|
201 | num_passed += 1
|
202 | } else {
|
203 | num_failed += 1
|
204 | print spec_name " failed with status " status > "/dev/stderr"
|
205 | }
|
206 |
|
207 | if (status != 0) {
|
208 | css_class = "failed"
|
209 | } else if (oils_num_failed != 0) {
|
210 | css_class = "osh-allow-fail"
|
211 | } else if (oils_num_passed != 0) {
|
212 | css_class = "osh-pass"
|
213 | } else {
|
214 | css_class = ""
|
215 | }
|
216 | print "<tr class=" css_class ">"
|
217 | print "<td><a href=" spec_name ".html>" spec_name "</a></td>"
|
218 | print "<td>" num_cases "</td>"
|
219 | print "<td>" oils_num_passed "</td>"
|
220 | print "<td>" oils_num_failed "</td>"
|
221 | print "<td>" oils_failures_allowed "</td>"
|
222 | print "<td>" oils_ALT_delta "</td>"
|
223 | printf("<td>%.2f</td>\n", wall_secs);
|
224 | print "</tr>"
|
225 | }
|
226 |
|
227 | END {
|
228 | print "<tr class=totals>" >totals
|
229 | print "<td>TOTAL (" num_rows " rows) </td>" >totals
|
230 | print "<td>" sum_num_cases "</td>" >totals
|
231 | print "<td>" sum_oils_num_passed "</td>" >totals
|
232 | print "<td>" sum_oils_num_failed "</td>" >totals
|
233 | print "<td>" sum_oils_failures_allowed "</td>" >totals
|
234 | print "<td>" sum_oils_ALT_delta "</td>" >totals
|
235 | printf("<td>%.2f</td>\n", sum_wall_secs) > totals
|
236 | print "</tr>" >totals
|
237 |
|
238 | print "<tfoot>"
|
239 | print "<!-- TOTALS -->"
|
240 | print "</tfoot>"
|
241 |
|
242 | # For the console
|
243 | print "" > "/dev/stderr"
|
244 | if (num_failed == 0) {
|
245 | print "*** All " num_passed " tests PASSED" > "/dev/stderr"
|
246 | } else {
|
247 | print "*** " num_failed " tests FAILED" > "/dev/stderr"
|
248 | exit(1) # failure
|
249 | }
|
250 | }
|
251 | '
|
252 | all_passed=$?
|
253 |
|
254 | cat <<EOF
|
255 | </table>
|
256 |
|
257 | <h3>Version Information</h3>
|
258 | <pre>
|
259 | EOF
|
260 |
|
261 | # TODO: can pass shells here, e.g. for test/spec-cpp.sh
|
262 | test/spec-version.sh ${suite}-version-text
|
263 |
|
264 | cat <<EOF
|
265 | </pre>
|
266 | </body>
|
267 | </html>
|
268 | EOF
|
269 |
|
270 | return $all_passed
|
271 | }
|
272 |
|
273 | html-summary() {
|
274 | local suite=$1
|
275 | local base_dir=$2
|
276 |
|
277 | local manifest="_tmp/spec/SUITE-$suite.txt"
|
278 |
|
279 | local totals=$base_dir/totals-$suite.html
|
280 | local tmp=$base_dir/tmp-$suite.html
|
281 |
|
282 | local out=$base_dir/index.html
|
283 |
|
284 | # TODO: Do we also need $base_dir/{osh,oil}-details-for-toil.json
|
285 | # osh failures, and all failures
|
286 | # When deploying, if they exist, them copy them outside?
|
287 | # I guess toil_web.py can use the zipfile module?
|
288 | # To get _tmp/spec/...
|
289 | # it can read JSON like:
|
290 | # { "task_tsv": "_tmp/toil/INDEX.tsv",
|
291 | # "details_json": [ ... ],
|
292 | # }
|
293 |
|
294 | set +o errexit
|
295 | _html-summary $suite $base_dir $totals $manifest > $tmp
|
296 | all_passed=$?
|
297 | set -o errexit
|
298 |
|
299 | # Total rows are displayed at both the top and bottom.
|
300 | awk -v totals="$(cat $totals)" '
|
301 | /<!-- TOTALS -->/ {
|
302 | print totals
|
303 | next
|
304 | }
|
305 | { print }
|
306 | ' < $tmp > $out
|
307 |
|
308 | echo
|
309 | echo "Results: file://$PWD/$out"
|
310 |
|
311 | return $all_passed
|
312 | }
|
313 |
|
314 | _all-parallel() {
|
315 | local suite=${1:-osh}
|
316 | local compare_mode=${2:-compare-py}
|
317 | local spec_subdir=${3:-survey}
|
318 |
|
319 | # The rest are more flags
|
320 | shift 3
|
321 |
|
322 | local manifest="_tmp/spec/SUITE-$suite.txt"
|
323 | local output_base_dir="_tmp/spec/$spec_subdir"
|
324 | mkdir -p $output_base_dir
|
325 |
|
326 | write-suite-manifests
|
327 |
|
328 | # The exit codes are recorded in files for html-summary to aggregate.
|
329 | set +o errexit
|
330 | head -n $NUM_SPEC_TASKS $manifest \
|
331 | | xargs -I {} -P $MAX_PROCS -- \
|
332 | $0 dispatch-one $compare_mode $spec_subdir {} "$@"
|
333 | set -o errexit
|
334 |
|
335 | all-tests-to-html $manifest $output_base_dir
|
336 |
|
337 | # note: the HTML links to ../../web/, which is in the repo.
|
338 | html-summary $suite $output_base_dir # returns whether all passed
|
339 | }
|
340 |
|
341 | all-parallel() {
|
342 | ### Run spec tests in parallel.
|
343 |
|
344 | # Note that this function doesn't fail because 'run-file' saves the status
|
345 | # to a file.
|
346 |
|
347 | time $0 _all-parallel "$@"
|
348 | }
|
349 |
|
350 | all-tests-to-html() {
|
351 | local manifest=$1
|
352 | local output_base_dir=$2
|
353 | # ignore attrs output
|
354 | head -n $NUM_SPEC_TASKS $manifest \
|
355 | | xargs --verbose -- doctools/src_tree.py spec-files $output_base_dir >/dev/null
|
356 |
|
357 | #| xargs -n 1 -P $MAX_PROCS -- $0 test-to-html $output_base_dir
|
358 | log "done: all-tests-to-html"
|
359 | }
|
360 |
|
361 | shell-sanity-check() {
|
362 | echo "PWD = $PWD"
|
363 | echo "PATH = $PATH"
|
364 |
|
365 | for sh in "$@"; do
|
366 | # note: shells are in $PATH, but not $OSH_LIST
|
367 | if ! $sh -c 'echo -n "hello from $0: "; command -v $0 || true'; then
|
368 | echo "ERROR: $sh failed sanity check"
|
369 | return 1
|
370 | fi
|
371 | done
|
372 | }
|
373 |
|
374 | filename=$(basename $0)
|
375 | if test "$filename" = 'spec-runner.sh'; then
|
376 | "$@"
|
377 | fi
|