(* YMMV)
[tox]
envlist = py3,lint
# ...
[testenv]
commands = pytest {posargs}
# ...
[testenv:lint]
commands = multilint {posargs}
List of environments
[tox]
envlist = py3,lint
# ...
Executed in that order when tox
runs.
Shared configuration
[testenv]
commands = pytest {posargs}
# ...
Executed in all environments (unless overridden).
Specific environment configuration
[testenv:lint]
commands = multilint {posargs}
# ...
# run just pytest suite
$ tox -e py3
# run just lint
$ tox -e lint
# recreate (all) tox environments
$ tox -r
# recreate just testing environment
$ tox -re py3
# run individual test module
$ pytest path/to/test_module.py
# run single test case
$ pytest path/to/test_module.py::test_function
# verbose assert comparison
$ pytest path/to/test_module.py -vv
pytest ./examples/test_verbose.py
def test_foo(): data = [ {"a": "A", "b": "B"}, {"c": "C", "d": "D"}, ] expected = [ {"c": "C", "d": "D"}, {"e": "E", "f": "F"}, ] assert data == expected E AssertionError: assert [{'a': 'A', '...C', 'd': 'D'}] == [{'c': 'C', '...E', 'f': 'F'}] E At index 0 diff: {'a': 'A', 'b': 'B'} != {'c': 'C', 'd': 'D'} E Use -v to get the full diff examples/test_verbose.py:12: AssertionError ======================================== short test summary info ========================================
pytest ./examples/test_verbose.py -vv
_______________________________________________ test_foo ________________________________________________ def test_foo(): data = [ {"a": "A", "b": "B"}, {"c": "C", "d": "D"}, ] expected = [ {"c": "C", "d": "D"}, {"e": "E", "f": "F"}, ] assert data == expected E AssertionError: assert [{'a': 'A', 'b': 'B'}, {'c': 'C', 'd': 'D'}] == [{'c': 'C', 'd': 'D'}, {'e': 'E', 'f': 'F'}] E At index 0 diff: {'a': 'A', 'b': 'B'} != {'c': 'C', 'd': 'D'} E Full diff: E - [{'c': 'C', 'd': 'D'}, {'e': 'E', 'f': 'F'}] E + [{'a': 'A', 'b': 'B'}, {'c': 'C', 'd': 'D'}] examples/test_verbose.py:12: AssertionError ======================================== short test summary info ======================================== FAILED examples/test_verbose.py::test_foo - AssertionError: assert [{'a': 'A', 'b': 'B'}, {'c': 'C', '...
def test_foo():
print("hello")
========================================== test session starts ========================================== platform linux -- Python 3.7.5, pytest-5.4.1, py-1.8.1, pluggy-0.13.1 Using --randomly-seed=1587314423 rootdir: /home/julius/code/testing-slides plugins: requests-mock-1.7.0, randomly-3.3.1 collecting ... collected 1 item examples/test_output.py hello . =========================================== 1 passed in 0.01s ===========================================
pytest ./examples/test_debugger.py -s --pdb
def test_faillure(): data = {"a": "A", "b": "B"} assert "c" in data E AssertionError: assert 'c' in {'a': 'A', 'b': 'B'} examples/test_debugger.py:4: AssertionError >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> entering PDB >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> PDB post_mortem >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> /home/julius/code/testing-slides/examples/test_debugger.py(4)test_faillure() assert "c" in data (Pdb) dict_keys(['a', 'b']) (Pdb) ======================================== short test summary info ======================================== FAILED examples/test_debugger.py::test_faillure - AssertionError: assert 'c' in {'a': 'A', 'b': 'B'} !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! _pytest.outcomes.Exit: Quitting debugger !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! =========================================== 1 failed in 0.18s ===========================================
pytest ./examples/test_randomly.py
========================================== test session starts ========================================== platform linux -- Python 3.7.5, pytest-5.4.1, py-1.8.1, pluggy-0.13.1 Using --randomly-seed=1587314424 rootdir: /home/julius/code/testing-slides
pytest ./examples/test_randomly.py --randomly-seed=1587....
# no coverage report
pytest --no-cov
# don't rebuild database
pytest --reuse-db
Remember {posargs}
?
[tox]
envlist = py3,lint
# ...
[testenv]
commands = pytest {posargs}
# ...
# run single test
$ tox -e py3 -- path/test_file.py::test_function_name
# run interactive debugger
$ tox -e py3 -- -s --pdb
# no test coverage {posargs}
$ tox -e py3 -- --no-cov
# don’t rebuild database each time (more later)
$ tox -e py3 -- --reuse-db
Module level constants are okay, but only if it is not changed and guaranteed to be immutable and plain Python dictionary never is. Therefore, don't do this:
VAL = {"a": "A"}
def test_valb():
data = VAL
data ["b"] = "B"
assert data == {"a": "A", "b": "B"}
def test_valc():
data = VAL
data ["c"] = "C"
assert data == {"a": "A", "c": "C"}
_______________________________________________ test_valb _______________________________________________ def test_valb(): data = VAL data["b"] = "B" assert data == {"a": "A", "b": "B"} E AssertionError: assert {'a': 'A', 'b': 'B', 'c': 'C'} == {'a': 'A', 'b': 'B'} E Omitting 2 identical items, use -vv to show E Left contains 1 more item: E {'c': 'C'} E Use -v to get the full diff examples/test_constants.py:8: AssertionError
@pytest.fixture
def val():
return {"a": "b"}
def test_valb(val):
val["b"] = "B"
assert val == {"a": "A", "b": "B"}
def test_valc(val):
val["c"] = "C"
assert val == {"a": "A", "c": "C"}
If adjusting fixture happens very often…
@pytest.fixture
def val():
return {"a": "b"}
def test_valb(val):
val["b"] = "B"
val["c"] = "C"
assert val == {"a": "A", "b": "B"}
… consider writing a builder fixture
@pytest.fixture
def build_val():
def builder(*kwargs):
base = {"a": "A"}
return dict(base, **kwargs)
return builder
def test_valb(build_val):
val = build_val(b="B", c="C")
assert val == {"a": "A", "b": "B", "c": "C"}
@pytest.fixture
def add_glue_response(aws_glue_stub, build_glue_job_run_response, task):
def builder(job_run=None):
job_run = job_run or build_glue_job_run_response()
aws_glue_stub.add_response(
"get_job_run",
service_response={"JobRun": job_run},
expected_params={"JobName": task.JOB_NAME, "RunId": "jr_testrun"},
)
return job_run
return builder
def test_run__success(add_glue_response, task):
add_glue_response()
task.run()
conftest.py
shared among different
test modules…Different naming conventions apply to tests - be verbose and descriptive.
My current preference:
test_function_name__outcome__modifier
test_run__success__with_defaults
test_run__success__with_params
test_run__failure__missing_inputs
test_run__failure__aws_client_error
When working on a bigger piece of code and having to context switch - write a test about what you want to develop next.
It'll help get back up to speed.
(*not AWS)
Everything from here onwards is still WIP.
requests
library.ConnectionError
).requests
library, simpler than responses
.(*and Gotchas)
(use autospec=True
)
mock.ANY
verbose output# save startup time
$ pytest --reuse-db
$ tox -e py3 -- --reuse-db
Test minimum set of details.
Questions and suggestions?