generated from ntoll/codespaces-project-template-pyscript
-
Notifications
You must be signed in to change notification settings - Fork 0
/
main.py
163 lines (149 loc) · 5.94 KB
/
main.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
"""
How do you test a test framework?
You can't use the test framework to test itself, because it may contain bugs!
Hence this script, which uses upytest to run tests and check the results are as
expected. The expected results are hard-coded in this script, and the actual
results are generated by running tests with upytest. The script then compares
the expected and actual results to ensure they match.
Finally, the script creates a div element to display the results in the page.
If tests fail, the script will raise an AssertionError, which will be
displayed with a red background. If the tests pass, the script will display a
message with a green background.
There are two sorts of expected results: the number of tests that pass, fail,
and are skipped, and the names of the tests that pass, fail, and are skipped.
Tests that pass end with "passes", tests that fail end with "fails", and tests
that are skipped end with "skipped".
This script will work with both MicroPython and Pyodide, just so we can ensure
the test framework works in both environments. The index.html file uses
MicroPython, the index2.html file uses Pyodide.
That's it! Now we can test a test framework with a meta-test framework. 🤯
"""
from pyscript.web import page, div, h2, p, b
import upytest
expected_results = {
"result_all": {
"passes": 14,
"fails": 12,
"skipped": 8,
},
"result_random": {
"passes": 14,
"fails": 12,
"skipped": 8,
},
"result_module": {
"passes": 13,
"fails": 12,
"skipped": 8,
},
"result_class": {
"passes": 3,
"fails": 3,
"skipped": 2,
},
"result_specific": {
"passes": 1,
"fails": 0,
"skipped": 0,
},
}
actual_results = {}
# Run all tests in the tests directory.
print("\033[1mRunning all tests in directory...\033[0m")
actual_results["result_all"] = await upytest.run("./tests")
# Run all tests in the tests directory in random order.
print("\n\n\033[1mRunning all tests in directory in random order...\033[0m")
actual_results["result_random"] = await upytest.run("./tests", random=True)
# Run all tests in a specific module.
print("\n\n\033[1mRunning all tests in a specific module...\033[0m")
actual_results["result_module"] = await upytest.run(
"tests/test_core_functionality.py"
)
# Run all tests in a specific test class.
print("\n\n\033[1mRunning all tests in a specific class...\033[0m")
actual_results["result_class"] = await upytest.run(
"tests/test_core_functionality.py::TestClass"
)
# Run a specific test function.
print("\n\n\033[1mRun a specific function...\033[0m")
actual_results["result_specific"] = await upytest.run(
"tests/test_core_functionality.py::test_passes"
)
# Evaluate the results have the right number of tests.
for name, result in expected_results.items():
for key, value in result.items():
actual = len(actual_results[name][key])
assert (
actual == value
), f"Expected {value} {key} in {name}, got {actual}"
# Ensure the tests that pass have a name ending in "passes", tests that fail
# have a name ending in "fails", and tests that are skipped have a name ending
# in "skipped".
for test_run, result in actual_results.items(): # result_all, result_module, etc.
for test_status, matching_tests in result.items(): # passes, fails, skipped
if test_status in ["passes", "fails", "skipped"]:
for test in matching_tests:
assert test["test_name"].endswith(
test_status
), f"Test {test["test_name"]} does not end with {test_status}"
# Ensure the randomized tests are different from the non-randomized tests.
for test_status in ["passes", "fails", "skipped"]:
assert (
actual_results["result_all"][test_status]
!= actual_results["result_random"][test_status]
), f"Randomized tests are the same as non-randomized tests for {test_status}"
# Ensure the results are JSON serializable.
import json
check = json.dumps(actual_results)
# Create a div to display the results in the page.
page.append(
div(
h2("Test Results All Correct ✨"),
div(
p(
b("All Tests: "),
f"Passes: {len(actual_results['result_all']['passes'])},"
f" Fails: {len(actual_results['result_all']['fails'])},"
f" Skipped: {len(actual_results['result_all']['skipped'])}.",
),
),
div(
p(
b("Randomized Tests: "),
f"Passes: {len(actual_results['result_all']['passes'])},"
f" Fails: {len(actual_results['result_all']['fails'])},"
f" Skipped: {len(actual_results['result_all']['skipped'])}.",
f" (Different order to the non-randomized 'All Tests').",
),
),
div(
p(
b("Tests in a Specified Module: "),
f"Passes: {len(actual_results['result_module']['passes'])},"
f" Fails: {len(actual_results['result_module']['fails'])},"
f" Skipped: {len(actual_results['result_module']['skipped'])}.",
),
),
div(
p(
b("Tests in a Specified Test Class: "),
f"Passes: {len(actual_results['result_class']['passes'])},"
f" Fails: {len(actual_results['result_class']['fails'])},"
f" Skipped: {len(actual_results['result_class']['skipped'])}.",
),
),
div(
p(
b("Test a Specific Test: "),
f"Passes: {len(actual_results['result_specific']['passes'])},"
f" Fails: {len(actual_results['result_specific']['fails'])},"
f" Skipped: {len(actual_results['result_specific']['skipped'])}.",
),
),
style={
"background-color": "lightgreen",
"padding": "10px",
"border": "1px solid green",
},
)
)