diff --git a/.coveragerc b/.coveragerc
index 2322f534..624c6501 100644
--- a/.coveragerc
+++ b/.coveragerc
@@ -11,9 +11,15 @@ exclude_lines =
# Don't complain if tests don't hit defensive assertion code:
raise NotImplementedError
+ # Ignore ellipse statements
+ \.\.\.
+
omit =
# This file is just calls otter.cli.cli
otter/__main__.py
# This file is a copy of an external script and is not tested as part of Otter
otter/plugins/builtin/gmail_notifications/bin/gmail_oauth2.py
+
+ # ignore templates
+ otter/generate/templates/**/*
diff --git a/otter/test_files/abstract_test.py b/otter/test_files/abstract_test.py
index 687b7386..41f6adf4 100644
--- a/otter/test_files/abstract_test.py
+++ b/otter/test_files/abstract_test.py
@@ -98,7 +98,7 @@ def _repr_html_(self):
if not tcr.passed and tcr.test_case.failure_message is not None:
ret += f"
{tcr.test_case.name}
message: {tcr.test_case.failure_message}
"
ret += f"{tcr.test_case.name}
result:
"
- ret += f"{indent(tcr.message, ' ')}
"
+ ret += f"{indent(tcr.message or '', ' ')}
"
return ret
@@ -145,7 +145,7 @@ def resolve_test_file_points(
total_points = None
elif total_points is not None and not isinstance(total_points, (int, float)):
- raise TypeError(f"Test spec points has invalid type: {total_points}")
+ raise TypeError(f"Test spec points has invalid type: {type(total_points)}")
point_values = []
for test_case in test_cases:
@@ -162,7 +162,7 @@ def resolve_test_file_points(
pre_specified = sum(p for p in point_values if p is not None)
if total_points is not None:
if pre_specified > total_points:
- raise ValueError(f"More points specified in test cases than allowed for test")
+ raise ValueError("More points specified in test cases than allowed for test")
else:
try:
@@ -180,9 +180,6 @@ def resolve_test_file_points(
except ZeroDivisionError:
per_remaining = 0.0
- elif pre_specified == 0:
- per_remaining = 1 / len(point_values)
-
else:
# assume all other tests are worth 0 points
per_remaining = 0.0
@@ -269,12 +266,12 @@ def summary(self, public_only: bool = False) -> str:
"""
if (not public_only and self.passed_all) or (public_only and self.passed_all_public):
ret = f"{self.name} results: All test cases passed!"
- if (not public_only and self.passed_all) and any(
- tcr.test_case.success_message is not None for tcr in self.test_case_results
- ):
- for tcr in self.test_case_results:
- if tcr.test_case.success_message is not None:
- ret += f"\n{tcr.test_case.name} message: {tcr.test_case.success_message}"
+ all_tcrs = self.test_case_results
+ if public_only:
+ all_tcrs = [tcr for tcr in self.test_case_results if not tcr.test_case.hidden]
+ for tcr in all_tcrs:
+ if tcr.test_case.success_message is not None:
+ ret += f"\n{tcr.test_case.name} message: {tcr.test_case.success_message}"
return ret
tcrs = self.test_case_results
@@ -289,7 +286,7 @@ def summary(self, public_only: bool = False) -> str:
if not tcr.passed and tcr.test_case.failure_message is not None:
smry += f"{tcr.test_case.name} message: {tcr.test_case.failure_message}\n\n"
smry += f"{tcr.test_case.name} result:\n"
- smry += f"{indent(tcr.message.strip(), ' ')}\n\n"
+ smry += f"{indent((tcr.message or '').strip(), ' ')}\n\n"
tcr_summaries.append(smry.strip())
diff --git a/otter/test_files/exception_test.py b/otter/test_files/exception_test.py
index 590e778d..118c6b0e 100644
--- a/otter/test_files/exception_test.py
+++ b/otter/test_files/exception_test.py
@@ -52,6 +52,17 @@ def __init__(
self.failure_message = failure_message
self.test_func = lambda: None
+ def __eq__(self, other: Any) -> bool:
+ if not isinstance(other, type(self)):
+ return False
+ return (
+ self.name == other.name
+ and self.points == other.points
+ and self.hidden == other.hidden
+ and self.success_message == other.success_message
+ and self.failure_message == other.failure_message
+ )
+
def __call__(self, test_func: Callable[..., None]) -> "test_case":
"""
Wrap a test case function as a decorator.
@@ -175,7 +186,7 @@ def run(self, global_environment: dict[str, Any]):
Arguments:
global_environment (``dict[str, Any]``): result of executing a Python notebook/script
"""
- test_case_results = []
+ self.test_case_results = []
for tc in self.test_cases:
test_case = tc.body
passed, message = True, "✅ Test case passed"
@@ -184,9 +195,9 @@ def run(self, global_environment: dict[str, Any]):
except Exception as e:
passed, message = False, "❌ Test case failed\n" + self._generate_error_message(e)
- test_case_results.append(TestCaseResult(test_case=tc, message=message, passed=passed))
-
- self.test_case_results = test_case_results
+ self.test_case_results.append(
+ TestCaseResult(test_case=tc, message=message, passed=passed)
+ )
@staticmethod
def _compile_string(s: str, path: str = "") -> CodeType:
diff --git a/otter/test_files/ok_test.py b/otter/test_files/ok_test.py
index 7b137df3..66695eef 100644
--- a/otter/test_files/ok_test.py
+++ b/otter/test_files/ok_test.py
@@ -66,6 +66,7 @@ def run(self, global_environment: dict[str, Any]):
Arguments:
``global_environment`` (``dict``): result of executing a Python notebook/script
"""
+ self.test_case_results = []
for i, test_case in enumerate(self.test_cases):
passed, result = run_doctest(
self.name + " " + str(i), test_case.body, global_environment
diff --git a/test/conftest.py b/test/conftest.py
index d1457d50..a9931058 100644
--- a/test/conftest.py
+++ b/test/conftest.py
@@ -9,6 +9,7 @@
from unittest import mock
from otter import __file__ as OTTER_PATH
+from otter.test_files import TestCase, TestCaseResult, TestFile
from .utils import TestFileManager
@@ -17,6 +18,11 @@
REPO_DIR = os.getcwd()
REAL_DOCKER_BUILD = docker.build
+# prevent pytest from thinking these classes are testing classes
+TestCase.__test__ = False
+TestCaseResult.__test__ = False
+TestFile.__test__ = False
+
def pytest_addoption(parser):
"""
diff --git a/test/test_assign/test_integration.py b/test/test_assign/test_integration.py
index f9ebf6c5..0bed978b 100644
--- a/test/test_assign/test_integration.py
+++ b/test/test_assign/test_integration.py
@@ -19,10 +19,6 @@
from ..utils import assert_dirs_equal, TestFileManager, unzip_to_temp
-# prevent pytest from thinking TestCase is a testing class
-TestCase.__test__ = False
-
-
FILE_MANAGER = TestFileManager(__file__)
diff --git a/test/test_test_files/test_abstract_test.py b/test/test_test_files/test_abstract_test.py
new file mode 100644
index 00000000..698e4c70
--- /dev/null
+++ b/test/test_test_files/test_abstract_test.py
@@ -0,0 +1,717 @@
+"""Tests for ``otter.test_files.abstract_test``"""
+
+import pytest
+import random
+
+from dataclasses import asdict
+from unittest import mock
+
+from otter.test_files.abstract_test import TestCase, TestCaseResult, TestFile
+
+
+class MockTestFile(TestFile):
+ """A ``TestFile`` for testing the ABC's methods."""
+
+ _test_cases: list[TestCase]
+
+ @classmethod
+ def from_file(cls, path):
+ return cls(path, path, cls._test_cases)
+
+ @classmethod
+ def from_metadata(cls, s, path):
+ return cls(path, path, cls._test_cases)
+
+ def run(self, global_environment):
+ tcrs: list[TestCaseResult] = []
+ for i, tc in enumerate(self.test_cases):
+ passed = global_environment[tc.name]
+ tcrs.append(TestCaseResult(tc, None if passed else ":(", passed))
+ self.test_case_results = tcrs
+
+
+@pytest.fixture(autouse=True)
+def reset_mock_test_file():
+ MockTestFile._test_cases = [
+ TestCase("q1", "q1 body", False, 1, None, None),
+ TestCase("q1H", "q1 body", True, 1, None, None),
+ TestCase("q2", "q2 body", False, 1, "q2 success", None),
+ TestCase("q2H", "q2 body", True, 2, "q2H success", None),
+ TestCase("q3", "q3 body", False, 0, None, "q3 failure"),
+ TestCase("q3H", "q3 body", True, 1, None, "q3H failure"),
+ ]
+
+
+def make_test_case(**kwargs):
+ return TestCase(
+ **{
+ "name": "q1",
+ "body": "q1 body",
+ "points": 1,
+ "hidden": False,
+ "success_message": None,
+ "failure_message": None,
+ **kwargs,
+ },
+ )
+
+
+def test_repr():
+ """Tests ``TestFile.__repr__``"""
+ tf = MockTestFile.from_file("foo")
+ with mock.patch.object(tf, "summary", return_value="foobar") as mocked_summary:
+ repr(tf)
+ mocked_summary.assert_called_once_with()
+
+
+@pytest.mark.parametrize(
+ "test_cases_override, test_case_results, want",
+ [
+ (
+ [
+ TestCase("q1", "q1 body", False, 1, None, None),
+ TestCase("q1H", "q1 body", True, 1, None, None),
+ ],
+ {
+ "q1": True,
+ "q1H": True,
+ "q2": True,
+ "q2H": True,
+ "q3": True,
+ "q3H": True,
+ },
+ "foo
passed! 🎉
",
+ ),
+ (
+ None,
+ {
+ "q1": True,
+ "q1H": True,
+ "q2": True,
+ "q2H": True,
+ "q3": True,
+ "q3H": True,
+ },
+ (
+ "foo
passed! 🎉
"
+ "q2
message: q2 success
"
+ "q2H
message: q2H success
"
+ ),
+ ),
+ (
+ None,
+ {
+ "q1": False,
+ "q1H": True,
+ "q2": True,
+ "q2H": False,
+ "q3": False,
+ "q3H": False,
+ },
+ (
+ "foo
results:
"
+ "q1
result:
"
+ " :(
"
+ "q1H
result:
"
+ ""
+ "q2
message: q2 success
"
+ "q2
result:
"
+ ""
+ "q2H
result:
"
+ " :(
"
+ "q3
message: q3 failure
"
+ "q3
result:
"
+ " :(
"
+ "q3H
message: q3H failure
"
+ "q3H
result:
"
+ " :(
"
+ ),
+ ),
+ ],
+)
+def test_repr_html(test_cases_override, test_case_results, want):
+ """Tests ``TestFile._repr_html_``"""
+ if test_cases_override is not None:
+ MockTestFile._test_cases = test_cases_override
+ random.seed(42)
+ tf = MockTestFile.from_file("foo")
+ tf.run(test_case_results)
+ assert tf._repr_html_() == want
+
+
+@pytest.mark.parametrize(
+ "total_points, test_cases, want",
+ [
+ (
+ None,
+ [
+ make_test_case(points=None),
+ make_test_case(points=None),
+ make_test_case(points=None),
+ ],
+ [
+ make_test_case(points=1 / 3),
+ make_test_case(points=1 / 3),
+ make_test_case(points=1 / 3),
+ ],
+ ),
+ (
+ None,
+ [
+ make_test_case(points=1),
+ make_test_case(points=1),
+ make_test_case(points=1),
+ ],
+ [
+ make_test_case(points=1),
+ make_test_case(points=1),
+ make_test_case(points=1),
+ ],
+ ),
+ (
+ 2,
+ [
+ make_test_case(points=None),
+ make_test_case(points=None),
+ make_test_case(points=None),
+ ],
+ [
+ make_test_case(points=2 / 3),
+ make_test_case(points=2 / 3),
+ make_test_case(points=2 / 3),
+ ],
+ ),
+ (
+ 2,
+ [
+ make_test_case(points=1),
+ make_test_case(points=0.5),
+ make_test_case(points=0.5),
+ ],
+ [
+ make_test_case(points=1),
+ make_test_case(points=0.5),
+ make_test_case(points=0.5),
+ ],
+ ),
+ (
+ 4,
+ [
+ make_test_case(points=1),
+ make_test_case(points=None),
+ make_test_case(points=None),
+ ],
+ [
+ make_test_case(points=1),
+ make_test_case(points=1.5),
+ make_test_case(points=1.5),
+ ],
+ ),
+ (
+ 4,
+ [
+ make_test_case(points=1),
+ make_test_case(points=3),
+ make_test_case(points=None),
+ ],
+ [
+ make_test_case(points=1),
+ make_test_case(points=3),
+ make_test_case(points=0),
+ ],
+ ),
+ (
+ None,
+ [
+ make_test_case(points=0),
+ make_test_case(points=0),
+ make_test_case(points=0),
+ ],
+ [
+ make_test_case(points=0),
+ make_test_case(points=0),
+ make_test_case(points=0),
+ ],
+ ),
+ (
+ None,
+ [
+ make_test_case(points=0),
+ make_test_case(points=None),
+ make_test_case(points=None),
+ ],
+ [
+ make_test_case(points=0),
+ make_test_case(points=0.5),
+ make_test_case(points=0.5),
+ ],
+ ),
+ (
+ [1, 2, 3],
+ [
+ make_test_case(points=None),
+ make_test_case(points=None),
+ make_test_case(points=None),
+ ],
+ [
+ make_test_case(points=1),
+ make_test_case(points=2),
+ make_test_case(points=3),
+ ],
+ ),
+ (
+ [1, 2, 3],
+ [
+ make_test_case(points=0.5),
+ make_test_case(points=0.5),
+ make_test_case(points=0.5),
+ ],
+ [
+ make_test_case(points=1),
+ make_test_case(points=2),
+ make_test_case(points=3),
+ ],
+ ),
+ ],
+)
+def test_resolve_test_file_points(total_points, test_cases, want):
+ """Tests ``TestFile.resolve_test_file_points``"""
+ assert TestFile.resolve_test_file_points(total_points, test_cases) == want
+
+
+def test_resolve_test_file_points_errors():
+ """Tests errors in ``TestFile.resolve_test_file_points``"""
+ with pytest.raises(
+ ValueError, match="Points specified in test has different length than number of test cases"
+ ):
+ TestFile.resolve_test_file_points([1, 2], [make_test_case()])
+
+ with pytest.raises(TypeError, match="Test spec points has invalid type: "):
+ TestFile.resolve_test_file_points("foo", [])
+
+ with pytest.raises(
+ ValueError, match="More points specified in test cases than allowed for test"
+ ):
+ TestFile.resolve_test_file_points(
+ 1, [make_test_case(points=2), make_test_case(points=None)]
+ )
+
+
+@pytest.mark.parametrize(
+ "test_case_results, want",
+ [
+ (
+ {
+ "q1": True,
+ "q1H": True,
+ "q2": True,
+ "q2H": True,
+ "q3": True,
+ "q3H": True,
+ },
+ True,
+ ),
+ (
+ {
+ "q1": True,
+ "q1H": False,
+ "q2": True,
+ "q2H": True,
+ "q3": True,
+ "q3H": True,
+ },
+ False,
+ ),
+ (
+ {
+ "q1": True,
+ "q1H": True,
+ "q2": False,
+ "q2H": True,
+ "q3": True,
+ "q3H": True,
+ },
+ False,
+ ),
+ ],
+)
+def test_passed_all(test_case_results, want):
+ """Tests ``TestFile.passed_all``"""
+ tf = MockTestFile.from_file("foo")
+ tf.run(test_case_results)
+ assert tf.passed_all == want
+
+
+@pytest.mark.parametrize(
+ "test_case_results, want",
+ [
+ (
+ {
+ "q1": True,
+ "q1H": True,
+ "q2": True,
+ "q2H": True,
+ "q3": True,
+ "q3H": True,
+ },
+ True,
+ ),
+ (
+ {
+ "q1": True,
+ "q1H": False,
+ "q2": True,
+ "q2H": True,
+ "q3": True,
+ "q3H": True,
+ },
+ True,
+ ),
+ (
+ {
+ "q1": True,
+ "q1H": True,
+ "q2": False,
+ "q2H": True,
+ "q3": True,
+ "q3H": True,
+ },
+ False,
+ ),
+ ],
+)
+def test_passed_all_public(test_case_results, want):
+ """Tests ``TestFile.passed_all_public``"""
+ tf = MockTestFile.from_file("foo")
+ tf.run(test_case_results)
+ assert tf.passed_all_public == want
+
+
+@pytest.mark.parametrize(
+ "test_cases, want",
+ [
+ (
+ [
+ make_test_case(hidden=True),
+ make_test_case(hidden=False),
+ ],
+ False,
+ ),
+ (
+ [
+ make_test_case(hidden=False),
+ make_test_case(hidden=False),
+ ],
+ True,
+ ),
+ ],
+)
+def test_all_public(test_cases, want):
+ """Tests ``TestFile.all_public``"""
+ MockTestFile._test_cases = test_cases
+ tf = MockTestFile.from_file("foo")
+ assert tf.all_public == want
+
+
+@pytest.mark.parametrize(
+ "test_case_results, all_or_nothing, want_grade, want_score, want_possible",
+ [
+ (
+ {
+ "q1": True,
+ "q1H": True,
+ "q2": True,
+ "q2H": True,
+ "q3": True,
+ "q3H": True,
+ },
+ False,
+ 1,
+ 6,
+ 6,
+ ),
+ (
+ {
+ "q1": True,
+ "q1H": False,
+ "q2": True,
+ "q2H": True,
+ "q3": True,
+ "q3H": True,
+ },
+ False,
+ 5 / 6,
+ 5,
+ 6,
+ ),
+ (
+ {
+ "q1": True,
+ "q1H": True,
+ "q2": True,
+ "q2H": True,
+ "q3": True,
+ "q3H": True,
+ },
+ True,
+ 1,
+ 6,
+ 6,
+ ),
+ (
+ {
+ "q1": True,
+ "q1H": False,
+ "q2": True,
+ "q2H": True,
+ "q3": True,
+ "q3H": True,
+ },
+ True,
+ 0,
+ 0,
+ 6,
+ ),
+ (
+ {
+ "q1": True,
+ "q1H": True,
+ "q2": True,
+ "q2H": True,
+ "q3": False,
+ "q3H": True,
+ },
+ False,
+ 1,
+ 6,
+ 6,
+ ),
+ (
+ {
+ "q1": True,
+ "q1H": True,
+ "q2": True,
+ "q2H": True,
+ # NOTE: q3 is worth 0 points, but all_or_nothing overrides this, causing the test
+ # file to get a score of 0
+ "q3": False,
+ "q3H": True,
+ },
+ True,
+ 0,
+ 0,
+ 6,
+ ),
+ ],
+)
+def test_grade_score_possible(
+ test_case_results, all_or_nothing, want_grade, want_score, want_possible
+):
+ """Tests ``TestFile.grade``, ``TestFile.score``, and ``TestFile.possible``"""
+ tf = MockTestFile.from_file("foo")
+ tf.all_or_nothing = all_or_nothing
+ tf.run(test_case_results)
+ assert tf.grade == want_grade
+ assert tf.score == want_score
+ assert tf.possible == want_possible
+
+
+def test_update_score():
+ """Tests ``TestFile.update_score``"""
+ tf = MockTestFile.from_file("foo")
+ tf.run(
+ {
+ "q1": True,
+ "q1H": True,
+ "q2": True,
+ "q2H": True,
+ "q3": True,
+ "q3H": True,
+ }
+ )
+ assert tf.score == 6
+ tf.update_score(1)
+ assert tf.score == 1
+
+
+def test_to_dict():
+ """Tests ``TestFile.to_dict``"""
+ tf = MockTestFile.from_file("foo")
+ tf.run(
+ {
+ "q1": True,
+ "q1H": False,
+ "q2": True,
+ "q2H": True,
+ "q3": True,
+ "q3H": True,
+ }
+ )
+ assert tf.to_dict() == {
+ "score": 0,
+ "possible": 6,
+ "name": "foo",
+ "path": "foo",
+ "test_cases": [asdict(tc) for tc in MockTestFile._test_cases],
+ "all_or_nothing": True,
+ "test_case_results": [
+ {
+ "test_case": asdict(tc),
+ "message": None if tc.name != "q1H" else ":(",
+ "passed": tc.name != "q1H",
+ }
+ for tc in MockTestFile._test_cases
+ ],
+ }
+
+
+@pytest.mark.parametrize(
+ "test_cases_override, test_case_results, public_only, want",
+ [
+ (
+ [
+ TestCase("q1", "q1 body", False, 1, None, None),
+ TestCase("q1H", "q1 body", True, 1, None, None),
+ ],
+ {
+ "q1": True,
+ "q1H": True,
+ "q2": True,
+ "q2H": True,
+ "q3": True,
+ "q3H": True,
+ },
+ False,
+ "foo results: All test cases passed!",
+ ),
+ (
+ None,
+ {
+ "q1": True,
+ "q1H": True,
+ "q2": True,
+ "q2H": True,
+ "q3": True,
+ "q3H": True,
+ },
+ False,
+ (
+ "foo results: All test cases passed!\n"
+ "q2 message: q2 success\n"
+ "q2H message: q2H success"
+ ),
+ ),
+ (
+ None,
+ {
+ "q1": False,
+ "q1H": True,
+ "q2": True,
+ "q2H": False,
+ "q3": False,
+ "q3H": False,
+ },
+ False,
+ (
+ "foo results:\n"
+ " q1 result:\n"
+ " :(\n"
+ "\n"
+ " q1H result:\n"
+ "\n"
+ " q2 message: q2 success\n"
+ "\n"
+ " q2 result:\n"
+ "\n"
+ " q2H result:\n"
+ " :(\n"
+ "\n"
+ " q3 message: q3 failure\n"
+ "\n"
+ " q3 result:\n"
+ " :(\n"
+ "\n"
+ " q3H message: q3H failure\n"
+ "\n"
+ " q3H result:\n"
+ " :("
+ ),
+ ),
+ (
+ [
+ TestCase("q1", "q1 body", False, 1, None, None),
+ TestCase("q1H", "q1 body", True, 1, None, None),
+ ],
+ {
+ "q1": True,
+ "q1H": True,
+ "q2": True,
+ "q2H": True,
+ "q3": True,
+ "q3H": True,
+ },
+ True,
+ "foo results: All test cases passed!",
+ ),
+ (
+ None,
+ {
+ "q1": True,
+ "q1H": True,
+ "q2": True,
+ "q2H": True,
+ "q3": True,
+ "q3H": True,
+ },
+ True,
+ ("foo results: All test cases passed!\nq2 message: q2 success"),
+ ),
+ (
+ None,
+ {
+ "q1": True,
+ "q1H": True,
+ "q2": True,
+ "q2H": False,
+ "q3": True,
+ "q3H": True,
+ },
+ True,
+ ("foo results: All test cases passed!\nq2 message: q2 success"),
+ ),
+ (
+ None,
+ {
+ "q1": False,
+ "q1H": True,
+ "q2": True,
+ "q2H": False,
+ "q3": False,
+ "q3H": False,
+ },
+ True,
+ (
+ "foo results:\n"
+ " q1 result:\n"
+ " :(\n"
+ "\n"
+ " q2 message: q2 success\n"
+ "\n"
+ " q2 result:\n"
+ "\n"
+ " q3 message: q3 failure\n"
+ "\n"
+ " q3 result:\n"
+ " :("
+ ),
+ ),
+ ],
+)
+def test_summary(test_cases_override, test_case_results, public_only, want):
+ """Tests ``TestFile.summary``"""
+ if test_cases_override is not None:
+ MockTestFile._test_cases = test_cases_override
+ random.seed(42)
+ tf = MockTestFile.from_file("foo")
+ tf.run(test_case_results)
+ assert tf.summary(public_only=public_only) == want
diff --git a/test/test_test_files/test_exception_test.py b/test/test_test_files/test_exception_test.py
index 22370070..99f687e3 100644
--- a/test/test_test_files/test_exception_test.py
+++ b/test/test_test_files/test_exception_test.py
@@ -3,9 +3,11 @@
import pprint
import pytest
+from dataclasses import asdict
from textwrap import dedent
-from otter.test_files.exception_test import ExceptionTestFile
+from otter.test_files.abstract_test import TestCase, TestCaseResult
+from otter.test_files.exception_test import ExceptionTestFile, test_case
@pytest.fixture
@@ -29,10 +31,169 @@ def q1_2(x):
)
+@pytest.fixture
+def exception_test_contents_with_messages():
+ return dedent(
+ """\
+ from otter.test_files import test_case
+
+ OK_FORMAT = False
+
+ name = "q1"
+
+ @test_case(hidden=False, points=1, success_message="foo")
+ def q1_1(x):
+ assert x % 2 == 0
+
+ @test_case(hidden=True, points=2, failure_message="bar")
+ def q1_2(x):
+ assert x == 4
+ """
+ )
+
+
+def q1_1(x):
+ assert x % 2 == 0
+
+
+def q1_2(x):
+ assert x == 4
+
+
+@pytest.fixture
+def expected_test_cases():
+ return [
+ TestCase(
+ name="q1 - 1",
+ body=test_case(name="q1 - 1", hidden=False, points=1)(q1_1),
+ hidden=False,
+ points=1,
+ success_message=None,
+ failure_message=None,
+ ),
+ TestCase(
+ name="q1 - 2",
+ body=test_case(name="q1 - 1", hidden=True, points=2)(q1_2),
+ hidden=True,
+ points=2,
+ success_message=None,
+ failure_message=None,
+ ),
+ ]
+
+
+@pytest.fixture
+def expected_test_cases_with_messages():
+ return [
+ TestCase(
+ name="q1 - 1",
+ body=test_case(name="q1 - 1", hidden=False, points=1)(q1_1),
+ hidden=False,
+ points=1,
+ success_message="foo",
+ failure_message=None,
+ ),
+ TestCase(
+ name="q1 - 2",
+ body=test_case(name="q1 - 1", hidden=True, points=2)(q1_2),
+ hidden=True,
+ points=2,
+ success_message=None,
+ failure_message="bar",
+ ),
+ ]
+
+
+# this hack is necessary because for some reason the __eq__ generated by the dataclass decorator
+# does not use the __eq__ defined on fields (i.e. comparing the body field of two TestCases always
+# fails even though test_case has an __eq__ method)
+def compare_dataclasses_as_dicts(l1, l2):
+ assert [asdict(i1) == asdict(i2) for i1, i2 in zip(l1, l2)]
+
+
+def test_from_file(
+ exception_test_contents,
+ exception_test_contents_with_messages,
+ expected_test_cases,
+ expected_test_cases_with_messages,
+ tmp_path,
+):
+ """Tests ``ExceptionTestFile.from_file``."""
+ fp = tmp_path / "foo.py"
+ fp.write_text(exception_test_contents)
+
+ tf = ExceptionTestFile.from_file(str(fp))
+ assert tf.name == "q1"
+ assert tf.path == str(fp)
+ assert tf.all_or_nothing == False
+ compare_dataclasses_as_dicts(tf.test_cases, expected_test_cases)
+
+ fp.write_text(exception_test_contents_with_messages)
+
+ tf = ExceptionTestFile.from_file(str(fp))
+ assert tf.name == "q1"
+ assert tf.path == str(fp)
+ assert tf.all_or_nothing == False
+ compare_dataclasses_as_dicts(tf.test_cases, expected_test_cases_with_messages)
+
+
+def test_from_metadata(
+ exception_test_contents,
+ exception_test_contents_with_messages,
+ expected_test_cases,
+ expected_test_cases_with_messages,
+):
+ """Tests ``ExceptionTestFile.from_metadata``."""
+ tf = ExceptionTestFile.from_metadata(exception_test_contents, "foo.ipynb")
+ assert tf.name == "q1"
+ assert tf.path == "foo.ipynb"
+ assert tf.all_or_nothing == False
+ compare_dataclasses_as_dicts(tf.test_cases, expected_test_cases)
+
+ tf = ExceptionTestFile.from_metadata(exception_test_contents_with_messages, "foo.ipynb")
+ assert tf.name == "q1"
+ assert tf.path == "foo.ipynb"
+ assert tf.all_or_nothing == False
+ compare_dataclasses_as_dicts(tf.test_cases, expected_test_cases_with_messages)
+
+
+def test_from_metadata_errors():
+ """Tests errors in ``ExceptionTestFile.from_metadata``."""
+ with pytest.raises(ValueError, match="Test file foo.ipynb does not define 'name'"):
+ ExceptionTestFile.from_metadata(
+ "from otter.test_files import test_case\n@test_case(hidden=False, points=1)\ndef q1_1(x):\n assert x % 2 == 0",
+ "foo.ipynb",
+ )
+
+
+def test_run(exception_test_contents, expected_test_cases):
+ """Tests ``ExceptionTestFile.run``."""
+ tf = ExceptionTestFile.from_metadata(exception_test_contents, "foo.ipynb")
+
+ tf.run({"x": 4})
+ compare_dataclasses_as_dicts(
+ tf.test_case_results,
+ [
+ TestCaseResult(expected_test_cases[0], "✅ Test case passed", True),
+ TestCaseResult(expected_test_cases[1], "✅ Test case passed", True),
+ ],
+ )
+
+ tf.run({"x": 6})
+ assert len(tf.test_case_results) == 2
+ compare_dataclasses_as_dicts(
+ [tf.test_case_results[0]],
+ [TestCaseResult(expected_test_cases[0], "✅ Test case passed", True)],
+ )
+ compare_dataclasses_as_dicts([tf.test_case_results[1].test_case], [expected_test_cases[1]])
+ assert tf.test_case_results[1].passed == False
+ assert tf.test_case_results[1].message.startswith("❌ Test case failed\n")
+ assert "assert x == 4" in tf.test_case_results[1].message
+ assert "AssertionError" in tf.test_case_results[1].message
+
+
def test_all_or_nothing(exception_test_contents, tmp_path):
- """
- Tests the ``all_or_nothing`` config of an OK test.
- """
+ """Tests the ``all_or_nothing`` config."""
exception_test_contents += "\nall_or_nothing = True"
test_file = tmp_path / "q1.py"
diff --git a/test/test_test_files/test_metadata_test.py b/test/test_test_files/test_metadata_test.py
new file mode 100644
index 00000000..e69de29b
diff --git a/test/test_test_files/test_ok_test.py b/test/test_test_files/test_ok_test.py
index fe436fd6..b511d307 100644
--- a/test/test_test_files/test_ok_test.py
+++ b/test/test_test_files/test_ok_test.py
@@ -3,6 +3,7 @@
import pprint
import pytest
+from otter.test_files.abstract_test import TestCase, TestCaseResult
from otter.test_files.ok_test import OKTestFile
@@ -29,10 +30,231 @@ def ok_test_spec():
}
+@pytest.fixture
+def ok_test_spec_with_messages():
+ return {
+ "name": "q1",
+ "suites": [
+ {
+ "cases": [
+ {
+ "code": ">>> assert x % 2 == 0",
+ "hidden": False,
+ "points": 1,
+ "success_message": "foo",
+ },
+ {
+ "code": ">>> assert x == 4",
+ "hidden": True,
+ "points": 2,
+ "failure_message": "bar",
+ },
+ ],
+ },
+ ],
+ }
+
+
+@pytest.fixture
+def expected_test_cases():
+ return [
+ TestCase(
+ name="q1 - 1",
+ body=">>> assert x % 2 == 0",
+ hidden=False,
+ points=1,
+ success_message=None,
+ failure_message=None,
+ ),
+ TestCase(
+ name="q1 - 2",
+ body=">>> assert x == 4",
+ hidden=True,
+ points=2,
+ success_message=None,
+ failure_message=None,
+ ),
+ ]
+
+
+@pytest.fixture
+def expected_test_cases_with_messages():
+ return [
+ TestCase(
+ name="q1 - 1",
+ body=">>> assert x % 2 == 0",
+ hidden=False,
+ points=1,
+ success_message="foo",
+ failure_message=None,
+ ),
+ TestCase(
+ name="q1 - 2",
+ body=">>> assert x == 4",
+ hidden=True,
+ points=2,
+ success_message=None,
+ failure_message="bar",
+ ),
+ ]
+
+
+def test_from_file(
+ ok_test_spec,
+ ok_test_spec_with_messages,
+ expected_test_cases,
+ expected_test_cases_with_messages,
+ tmp_path,
+):
+ """Tests ``OKTestFile.from_file``."""
+ fp = tmp_path / "foo.py"
+ fp.write_text(f"test = {pprint.pformat(ok_test_spec)}")
+
+ tf = OKTestFile.from_file(str(fp))
+ assert tf.name == "q1"
+ assert tf.path == str(fp)
+ assert tf.all_or_nothing == False
+ assert tf.test_cases == expected_test_cases
+
+ fp.write_text(f"test = {pprint.pformat(ok_test_spec_with_messages)}")
+
+ tf = OKTestFile.from_file(str(fp))
+ assert tf.name == "q1"
+ assert tf.path == str(fp)
+ assert tf.all_or_nothing == False
+ assert tf.test_cases == expected_test_cases_with_messages
+
+
+@pytest.mark.parametrize(
+ "spec, ctx",
+ [
+ (
+ {
+ "name": "q1",
+ "suites": [
+ {},
+ {},
+ ],
+ },
+ pytest.raises(AssertionError),
+ ),
+ (
+ {
+ "suites": [
+ {},
+ ],
+ },
+ pytest.raises(AssertionError),
+ ),
+ (
+ {
+ "name": "q1",
+ "suites": [
+ {
+ "cases": [
+ {
+ "code": ">>> assert x % 2 == 0",
+ "hidden": False,
+ "points": 1,
+ },
+ ],
+ "type": "a type",
+ },
+ ],
+ },
+ pytest.raises(AssertionError),
+ ),
+ (
+ {
+ "name": "q1",
+ "suites": [
+ {
+ "cases": [
+ {
+ "code": ">>> assert x % 2 == 0",
+ "hidden": False,
+ "points": 1,
+ },
+ ],
+ "setup": "some code",
+ },
+ ],
+ },
+ pytest.raises(AssertionError),
+ ),
+ (
+ {
+ "name": "q1",
+ "suites": [
+ {
+ "cases": [
+ {
+ "code": ">>> assert x % 2 == 0",
+ "hidden": False,
+ "points": 1,
+ },
+ ],
+ "teardown": "some code",
+ },
+ ],
+ },
+ pytest.raises(AssertionError),
+ ),
+ ],
+)
+def test_from_spec_errors(spec, ctx, tmp_path):
+ """Tests errors in ``OKTestFile.from_file``."""
+ with ctx:
+ OKTestFile.from_spec(spec)
+
+
+def test_from_metadata(
+ ok_test_spec,
+ ok_test_spec_with_messages,
+ expected_test_cases,
+ expected_test_cases_with_messages,
+):
+ """Tests ``OKTestFile.from_metadata``."""
+ tf = OKTestFile.from_metadata(ok_test_spec, "foo.ipynb")
+ assert tf.name == "q1"
+ assert tf.path == "foo.ipynb"
+ assert tf.all_or_nothing == False
+ assert tf.test_cases == expected_test_cases
+
+ tf = OKTestFile.from_metadata(ok_test_spec_with_messages, "foo.ipynb")
+ assert tf.name == "q1"
+ assert tf.path == "foo.ipynb"
+ assert tf.all_or_nothing == False
+ assert tf.test_cases == expected_test_cases_with_messages
+
+
+def test_run(ok_test_spec, expected_test_cases, tmp_path):
+ """Tests ``OKTestFile.run``."""
+ fp = tmp_path / "foo.py"
+ fp.write_text(f"test = {pprint.pformat(ok_test_spec)}")
+
+ tf = OKTestFile.from_file(str(fp))
+
+ tf.run({"x": 4})
+ assert tf.test_case_results == [
+ TestCaseResult(expected_test_cases[0], "✅ Test case passed", True),
+ TestCaseResult(expected_test_cases[1], "✅ Test case passed", True),
+ ]
+
+ tf.run({"x": 6})
+ assert len(tf.test_case_results) == 2
+ assert tf.test_case_results[0] == TestCaseResult(
+ expected_test_cases[0], "✅ Test case passed", True
+ )
+ assert tf.test_case_results[1].test_case == expected_test_cases[1]
+ assert tf.test_case_results[1].passed == False
+ assert tf.test_case_results[1].message.startswith("❌ Test case failed\n")
+ assert "assert x == 4" in tf.test_case_results[1].message
+ assert "AssertionError" in tf.test_case_results[1].message
+
+
def test_all_or_nothing(ok_test_spec, tmp_path):
- """
- Tests the ``all_or_nothing`` config of an OK test.
- """
+ """Tests the ``all_or_nothing`` config."""
ok_test_spec["all_or_nothing"] = True
test_file = tmp_path / f"{ok_test_spec['name']}.py"