1 | #!/usr/bin/env bash
|
2 | #
|
3 | # Run unit tests. Sets PYTHONPATH.
|
4 | #
|
5 | # Usage:
|
6 | # test/unit.sh <function name>
|
7 | #
|
8 | # Examples:
|
9 | #
|
10 | # test/unit.sh unit frontend/lexer_test.py
|
11 | # test/unit.sh all
|
12 | # test/unit.sh minimal
|
13 |
|
14 | set -o nounset
|
15 | set -o pipefail
|
16 | set -o errexit
|
17 | shopt -s strict:all 2>/dev/null || true # dogfood for OSH
|
18 |
|
19 | REPO_ROOT=$(cd "$(dirname $0)/.."; pwd) # tsv-lib.sh uses this
|
20 | readonly REPO_ROOT
|
21 |
|
22 | source build/dev-shell.sh # R_LIBS_USER, but also changes python3
|
23 | source test/common.sh # html-head
|
24 | source devtools/run-task.sh # run-task
|
25 | source test/tsv-lib.sh
|
26 |
|
27 | # for 'import typing' in Python 2. Can't go in build/dev-shell.sh because it
|
28 | # would affect Python 3.
|
29 | export PYTHONPATH="vendor:$PYTHONPATH"
|
30 |
|
31 | # For auto-complete
|
32 | unit() {
|
33 | "$@"
|
34 | }
|
35 |
|
36 | delete-pyc() {
|
37 | find . -name '*.pyc' | xargs --no-run-if-empty -- rm || true
|
38 | }
|
39 |
|
40 | # WTF, fixes native_test issue
|
41 | #export PYTHONDONTWRITEBYTECODE=1
|
42 |
|
43 | banner() {
|
44 | echo -----
|
45 | echo "$@"
|
46 | echo -----
|
47 | }
|
48 |
|
49 | readonly -a PY2_UNIT_TESTS=( {asdl,asdl/examples,build,builtin,core,data_lang,doctools,frontend,lazylex,ysh,osh,pyext,pylib,soil,test,tools}/*_test.py )
|
50 |
|
51 | readonly -a PY3_UNIT_TESTS=( mycpp/*_test.py spec/stateful/*_test.py )
|
52 |
|
53 | py2-tests() {
|
54 | local minimal=${1:-}
|
55 |
|
56 | for t in "${PY2_UNIT_TESTS[@]}"; do
|
57 | # For Travis after build/py.sh minimal: if we didn't build fastlex.so,
|
58 | # then skip a unit test that will fail.
|
59 |
|
60 | if test -n "$minimal"; then
|
61 | if test $t = 'pyext/fastlex_test.py'; then
|
62 | continue
|
63 | fi
|
64 | # doctools/cmark.sh makes that shared library
|
65 | if test $t = 'doctools/cmark_test.py'; then
|
66 | continue
|
67 | fi
|
68 | fi
|
69 |
|
70 | echo $t
|
71 | done
|
72 | }
|
73 |
|
74 | py3-tests() {
|
75 | for t in "${PY3_UNIT_TESTS[@]}"; do
|
76 | echo $t
|
77 | done
|
78 | }
|
79 |
|
80 | all-tests() {
|
81 | py2-tests "$@"
|
82 |
|
83 | # TODO: This only PRINTS the tests. It doesn't actually run them, but we
|
84 | # need a different PYTHONPATH here.
|
85 | py3-tests
|
86 | }
|
87 |
|
88 | run-unit-tests() {
|
89 | while read test_path; do
|
90 | # no separate working dir
|
91 | run-test-bin $test_path '' _test/py-unit
|
92 | done
|
93 | }
|
94 |
|
95 | all() {
|
96 | ### Run unit tests after build/py.sh all
|
97 |
|
98 | time all-tests "$@" | run-unit-tests
|
99 | echo
|
100 | echo "All unit tests passed."
|
101 | }
|
102 |
|
103 | minimal() {
|
104 | ### Run unit tests after build/py.sh minimal
|
105 |
|
106 | time py2-tests T | run-unit-tests
|
107 | echo
|
108 | echo "Minimal unit tests passed."
|
109 | }
|
110 |
|
111 | #
|
112 | # Experimental tsv-stream
|
113 | #
|
114 |
|
115 | tsv-stream-one() {
|
116 | local rel_path=$1
|
117 |
|
118 | local log_file=_tmp/unit/$rel_path.txt
|
119 | mkdir -p "$(dirname $log_file)"
|
120 |
|
121 | echo
|
122 | echo "| ROW test=$rel_path test_HREF=$log_file"
|
123 |
|
124 | # TODO: Emit | ADD status=0 elapsed_secs=0.11
|
125 |
|
126 | time-tsv -o /dev/stdout -- $rel_path
|
127 | local status=$?
|
128 |
|
129 | if test $status -ne 0; then
|
130 | echo
|
131 | echo "*** $t FAILED ***"
|
132 | echo
|
133 | return 255 # xargs aborts
|
134 | fi
|
135 |
|
136 | }
|
137 |
|
138 | tsv-stream-all() {
|
139 | echo '| HEADER status elapsed_secs test test_HREF'
|
140 |
|
141 | time py2-tests T | head -n 20 | xargs -n 1 -- $0 tsv-stream-one
|
142 | }
|
143 |
|
144 | # Experimental idea: Capture tsv-stream-all, and turn it into two things:
|
145 | #
|
146 | # - A TSV file, which can be turned into HTML, and summarized with counts
|
147 | # - An HTML text string with <a name=""> anchors, which the table can link to
|
148 | #
|
149 | # At the terminal, the raw output is still readable, without a filter.
|
150 | # Although we might want:
|
151 | #
|
152 | # | OK
|
153 | # | FAIL
|
154 | #
|
155 | # Instead of:
|
156 | #
|
157 | # | ADD status=0
|
158 | # | ADD status=1
|
159 | #
|
160 | # Also, we currently send output to /dev/null at the terminal, and we save it
|
161 | # when doing a release.
|
162 | #
|
163 | # We might also do something like C++ unit tests:
|
164 | #
|
165 | # RUN osh/split_test.py &> _test/osh/split_test
|
166 |
|
167 |
|
168 | all-2() {
|
169 | ### New harness that uses tsv-stream
|
170 |
|
171 | # Use this at the command line, in the CI, and in the release.
|
172 |
|
173 | tsv-stream-all | devtools/tsv_stream.py
|
174 | }
|
175 |
|
176 | # NOTE: Show options like this:
|
177 | # python -m unittest discover -h
|
178 |
|
179 | #
|
180 | # For _release/VERSION
|
181 | #
|
182 |
|
183 | run-test-and-log() {
|
184 | local tasks_tsv=$1
|
185 | local rel_path=$2
|
186 |
|
187 | local log=_tmp/unit/$rel_path.txt
|
188 | mkdir -p "$(dirname $log)"
|
189 |
|
190 | time-tsv --append --out $tasks_tsv \
|
191 | --field $rel_path --field "$rel_path.txt" -- \
|
192 | $rel_path >$log 2>&1
|
193 | }
|
194 |
|
195 | run-all-and-log() {
|
196 | local out_dir=_tmp/unit
|
197 | mkdir -p $out_dir
|
198 | rm -r -f $out_dir/*
|
199 |
|
200 | local tasks_tsv=$out_dir/tasks.tsv
|
201 |
|
202 | local status=0
|
203 |
|
204 | # TODO: I need to write a schema too? Or change csv2html.py to support HREF
|
205 | # in NullSchema.
|
206 |
|
207 | tsv-row 'status' 'elapsed_secs' 'test' 'test_HREF' > $tasks_tsv
|
208 |
|
209 | # There are no functions here, so disabline errexit is safe.
|
210 | # Note: In YSH, this could use shopt { }.
|
211 | set +o errexit
|
212 | time all-tests | xargs -n 1 -- $0 run-test-and-log $tasks_tsv
|
213 | status=$?
|
214 | set -o errexit
|
215 |
|
216 | if test $status -ne 0; then
|
217 | cat $tasks_tsv
|
218 | echo
|
219 | echo "*** Some tests failed. See $tasks_tsv ***"
|
220 | echo
|
221 |
|
222 | return $status
|
223 | fi
|
224 |
|
225 | #tree _tmp/unit
|
226 | echo
|
227 | echo "All unit tests passed."
|
228 | }
|
229 |
|
230 |
|
231 | # TODO: It would be nice to have timestamps of the underlying CSV files and
|
232 | # timestamp of running the report. This is useful for benchmarks too.
|
233 |
|
234 | print-report() {
|
235 | local in_dir=${1:-_tmp/unit}
|
236 | local base_url='../../web' # published at more_tests.wwz/unit/
|
237 |
|
238 | html-head --title 'Oils Unit Test Results' \
|
239 | "$base_url/table/table-sort.js" \
|
240 | "$base_url/table/table-sort.css" \
|
241 | "$base_url/base.css" \
|
242 | "$base_url/benchmarks.css"
|
243 |
|
244 | # NOTE: Using benchmarks for now.
|
245 | cat <<EOF
|
246 | <body class="width40">
|
247 | <p id="home-link">
|
248 | <a href="/">oilshell.org</a>
|
249 | </p>
|
250 | <h2>Unit Test Results</h2>
|
251 |
|
252 | EOF
|
253 |
|
254 | tsv2html $in_dir/report.tsv
|
255 |
|
256 | cat <<EOF
|
257 | </body>
|
258 | </html>
|
259 | EOF
|
260 | }
|
261 |
|
262 | # Presentation changes:
|
263 | #
|
264 | # - elapsed seconds -> milliseconds
|
265 | # - Need a link to the log for the test name (done, but no schema)
|
266 | # - schema for right-justifying numbers
|
267 |
|
268 | write-report() {
|
269 | local out=_tmp/unit/index.html
|
270 | test/report.R unit _tmp/unit _tmp/unit
|
271 | print-report > $out
|
272 | echo "Wrote $out"
|
273 | }
|
274 |
|
275 | soil-run() {
|
276 | # TODO: Should run everything in CI, but it depends on R. dev-minimal
|
277 | # doesn't have it
|
278 | #
|
279 | # Skips fastlex_test.py and cmark_test.py
|
280 | minimal
|
281 | }
|
282 |
|
283 | # Called by scripts/release.sh.
|
284 | run-for-release() {
|
285 | run-all-and-log
|
286 | write-report
|
287 | }
|
288 |
|
289 | run-task "$@"
|