-
Notifications
You must be signed in to change notification settings - Fork 2
/
example.py
148 lines (111 loc) · 5.31 KB
/
example.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
""" Grade Example Flow.
"""
import unittest
from grade import mixins, decorators, runners
from grade.pipeline import Pipeline, Run, AssertExitSuccess, AssertValgrindSuccess, PartialCredit
def student_function(x: list, m: int) -> list:
""" Random student function. """
return sum(x) % m
class Tests(mixins.ScoringMixin, unittest.TestCase):
""" An example TestCase, with the ScoringMixing.
Grade requires a TestCase as a starting point, from there,
we can give each test function a weight, assign partial credit,
and much more!
"""
@classmethod
def setUpClass(cls):
""" Here, we can perform any long-duration pre-requisite task.
The calls in this block are only called once, at the very beginning.
A common use-case for this block is compilation.
"""
Run(["ls"])()
def setUp(self) -> None:
""" Within this block, we ensure we have everything we need to run the tests.
A common use-case here is to require that the source files exist,
or the compiled binary from `setUpClass`.
Warning: If anything in this block fails, the student will receive a 0.
"""
self.require("example.py")
def test_compile(self):
""" Test working compilation.
At this point, we know that the code compiled and all files were
present, so we can just assertTrue(True)!
"""
self.weight = 1 # Set the weight of the unit test (max score)
self.assertTrue(True)
# For most grading functions, we provide both methods and decorators.
# There is no functional difference between methods and decorators.
@decorators.weight(10) # Equivalent to self.weight = 10
def test_student_function(self):
""" Test the students python code on some inputs.
"""
self.assertEqual(student_function(list(range(10)), 5), sum(list(range(10))) % 5)
# If making it this far into the test represents some key milestone
# you can assign the student some partial credit. If score is never
# set, it defaults to the full value if the test is successful, 0 otherwise.
self.score = self.score + 5
# As soon as you assign a student partial credit, score is locked to
# that value. Remember to update it before the test exits!
self.assertEqual(student_function([0], 1), 0)
# Since the student made it to the end and we set a partial score,
# we must finish the test by setting their score to full credit.
self.score = self.weight
# Grade executable files.
@decorators.weight(10)
def test_executable(self):
""" Checking executable files with a Pipeline.
Pipelines are designed around testing executable files.
They are comprised of "layers" which pass CompletedProcess objects to
each other. You can stack layers however you'd like, but they must start
with a call to Run(), which generates the initial CompletedProcess.
"""
Pipeline(Run(["ls"]), AssertExitSuccess(), AssertValgrindSuccess())()
def test_executable_multiple_times(self):
""" Checking multiple runs of an executable.
We can also use pipelines in comprehensions and maps,
to test multiple targets at once.
Say you wanted to test that a students code returns zero
for all valid test inputs; this would be a perfect usecase for
multiple targets.
"""
self.weight = 10
# noinspection PyShadowingNames
def pipeline(testcase):
""" Create a pipeline for the given testcase. """
return Pipeline(Run(["echo", testcase]), AssertExitSuccess())
# Now make an iterable (ex: list[str], glob, etc.)
testcases = [c for c in "hello world"]
# You can execute them all at once (all or nothing credit)
[pipeline(testcase)() for testcase in testcases]
# Or, you can also award partial credit for each testcase:
results = PartialCredit(map(pipeline, testcases), 10)()
self.score = results.score
# Any output written to stdout or stderr is captured and included
# in the tests' output value.
def test_failure(self):
print("False is not True!")
self.assertTrue(False)
def test_collecting_runtimes(self):
""" CompletedProcess objects all know their duration.
Duration can be used to allocate points based on speed, or add to a
leaderboard!
"""
# To setup a leaderboard, you have to at least set it's name.
self.leaderboardTitle = "Runtime"
# (Desc)ending or (asc)ending order (desc default)
self.leaderboardOrder = "desc"
self.weight = 10
results = Pipeline(Run(["echo", "hello world"]), AssertExitSuccess())()
# Here we set the leaderboard score as the duration.
self.leaderboardScore = results.duration
if results.duration < 1_000:
self.score = 10
elif results.duration < 10_000:
self.score = 5
if __name__ == "__main__":
# You can run things from inside of a script as follows:
suite = unittest.TestLoader().discover("./", pattern="example.py")
results = runners.GradedRunner().run(suite)
print(results.json)
# Or, you can use the command line interface:
# `python -m grade -h` to get started with the CLI.