From 067e791c56d9615fee9b03d71f10be1a60715af2 Mon Sep 17 00:00:00 2001 From: Eric Hunsberger Date: Mon, 22 Jul 2019 17:34:53 -0400 Subject: [PATCH] Initial implementation of allclose fixture This initial implementation is a combination of prior implementations that were used in Nengo core and Nengo Loihi. This repository uses Nengo Bones to manage several files, notably those involved in continuous integration. Co-authored-by: Trevor Bekolay 15086 bytes docs/_templates/sidebar.html | 55 ++++++ docs/conf.py | 73 ++++++++ docs/index.rst | 9 + pyproject.toml | 7 + pytest_allclose/__init__.py | 13 ++ pytest_allclose/plugin.py | 229 +++++++++++++++++++++++++ pytest_allclose/tests/test_allclose.py | 131 ++++++++++++++ pytest_allclose/tests/test_pytest.py | 160 +++++++++++++++++ pytest_allclose/version.py | 16 ++ setup.cfg | 114 ++++++++++++ setup.py | 67 ++++++++ 24 files changed, 1447 insertions(+), 2 deletions(-) create mode 100644 .codecov.yml create mode 100644 .gitignore create mode 100644 .gitlint create mode 100644 .nengobones.yml create mode 100644 .pre-commit-config.yaml create mode 100644 .travis.yml create mode 100644 CHANGES.rst create mode 100644 CONTRIBUTING.rst create mode 100644 CONTRIBUTORS.rst create mode 100644 LICENSE.rst create mode 100644 MANIFEST.in create mode 100644 docs/_static/favicon.ico create mode 100644 docs/_templates/sidebar.html create mode 100644 docs/conf.py create mode 100644 docs/index.rst create mode 100644 pyproject.toml create mode 100644 pytest_allclose/__init__.py create mode 100644 pytest_allclose/plugin.py create mode 100644 pytest_allclose/tests/test_allclose.py create mode 100644 pytest_allclose/tests/test_pytest.py create mode 100644 pytest_allclose/version.py create mode 100644 setup.cfg create mode 100644 setup.py diff --git a/.codecov.yml b/.codecov.yml new file mode 100644 index 0000000..0d7f5b1 --- /dev/null +++ b/.codecov.yml @@ -0,0 +1,19 @@ +# Automatically generated by nengo-bones, do not edit this file directly + +codecov: + ci: + - "!ci.appveyor.com" + notify: + require_ci_to_pass: no + +coverage: + status: + project: + default: + enabled: yes + target: auto + patch: + default: + enabled: yes + target: 100% + changes: no diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..24c1162 --- /dev/null +++ b/.gitignore @@ -0,0 +1,26 @@ +*.py[co] +.DS_Store +_build +build +dist +*.egg-info +*~ +*.bak +*.swp +log.txt +.ipynb_checkpoints/ +.cache +.tox +.vagrant +wintest.sh +Vagrantfile +*.class +*.eggs/ +.coverage +htmlcov +*.dist-info/ +.vscode +.idea +.pytest_cache/ + +.ci/*.sh diff --git a/.gitlint b/.gitlint new file mode 100644 index 0000000..054991b --- /dev/null +++ b/.gitlint @@ -0,0 +1,12 @@ +[general] +ignore=body-is-missing + +[title-max-length] +line-length=50 + +[B1] +# body line length +line-length=72 + +[title-match-regex] +regex=^[A-Z] diff --git a/.nengobones.yml b/.nengobones.yml new file mode 100644 index 0000000..e6e7008 --- /dev/null +++ b/.nengobones.yml @@ -0,0 +1,97 @@ +project_name: pytest-allclose +pkg_name: pytest_allclose +repo_name: nengo/pytest-allclose +description: Pytest fixture extending Numpy's allclose function + +copyright_start: 2019 + +license_rst: + type: mit + +contributing_rst: {} + +contributors_rst: {} + +manifest_in: {} + +setup_py: + license: MIT license + python_requires: ">=3.5" + install_req: + - numpy>=1.11 + - pytest + docs_req: + - nengo_sphinx_theme>=1.0 + - sphinx + tests_req: + - codespell + - coverage>=4.3 + - flake8 + - gitlint + - pylint + entry_points: + pytest11: + - "allclose = pytest_allclose.plugin" + classifiers: + - "Development Status :: 5 - Production/Stable" + - "Framework :: Pytest" + - "License :: OSI Approved :: MIT License" + - "Programming Language :: Python :: 3 :: Only" + - "Programming Language :: Python :: 3.5" + - "Programming Language :: Python :: 3.6" + - "Programming Language :: Python :: 3.7" + +setup_cfg: + pytest: + addopts: [] + allclose_tolerances: + - test_tolerances_big atol=0.01 rtol=0.2 + - "test_tolerances_small atol=0.001 rtol=0.005 # test comment" + - test_parametrized[True] atol=0.1 rtol=0.2 # parametrized with big tols + - test_parametrized[False] atol=0.001 rtol=0.002 # and with small tols + - test_precedence[1] atol=1 rtol=2 # correct, specific first + - test_precedence* atol=2 rtol=4 # general, should work for 2 + - test_precedence[3] atol=3 rtol=6 # incorrect, specific after general + - test_multiple_tolerances atol=0.001 rtol=0.004 + - test_multiple_tolerances atol=0.01 rtol=0.05 + - test_multiple_tolerances atol=0.002 rtol=0.005 + +docs_conf_py: + nengo_logo: general-small-light.svg + +travis_yml: + python: 3.6 + global_vars: + NUMPY: numpy + jobs: + - script: static + - script: test-coverage + - script: test + python: 3.5 + env: + NUMPY: numpy==1.11 + cache: false # disable the cache for one build to make sure that works + - script: test + python: 3.7 + dist: xenial # currently only xenial has python 3.7 + env: + NUMPY: numpy==1.16 + - script: docs + +ci_scripts: + - template: static + - template: test + pip_install: + - $NUMPY + - template: test + output_name: test-coverage + coverage: true + pip_install: + - $NUMPY + - template: docs + +codecov_yml: {} + +pre_commit_config_yaml: {} + +pyproject_toml: {} diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 0000000..6698fe1 --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,7 @@ +# Automatically generated by nengo-bones, do not edit this file directly + +repos: +- repo: https://github.com/psf/black + rev: stable + hooks: + - id: black diff --git a/.travis.yml b/.travis.yml new file mode 100644 index 0000000..edaba06 --- /dev/null +++ b/.travis.yml @@ -0,0 +1,100 @@ +# Automatically generated by nengo-bones, do not edit this file directly + +language: python +python: 3.6 +notifications: + email: + on_success: change + on_failure: change +cache: pip + +dist: trusty + +env: + global: + - SCRIPT="test" + - TEST_ARGS="" + - COV_CORE_SOURCE=pytest_allclose # early start pytest-cov engine + - COV_CORE_CONFIG=.coveragerc + - COV_CORE_DATAFILE=.coverage.eager + - BRANCH_NAME="${TRAVIS_PULL_REQUEST_BRANCH:-$TRAVIS_BRANCH}" + - NUMPY="numpy" + +jobs: + include: + - + env: + SCRIPT="static" + - + env: + SCRIPT="test-coverage" + - + env: + NUMPY="numpy==1.11" + SCRIPT="test" + python: 3.5 + cache: False + - + env: + NUMPY="numpy==1.16" + SCRIPT="test" + python: 3.7 + dist: xenial + - + env: + SCRIPT="docs" + addons: + apt: + packages: + - pandoc + +before_install: + # export travis_terminate for use in scripts + - export -f travis_terminate + _travis_terminate_linux + _travis_terminate_osx + _travis_terminate_unix + _travis_terminate_windows + # upgrade pip + - pip install pip --upgrade + # install/run nengo-bones + - pip install nengo-bones + - bones-generate --output-dir .ci ci-scripts + - if [[ "$TRAVIS_PYTHON_VERSION" < "3.6" ]]; then + echo "Skipping bones-check because Python $TRAVIS_PYTHON_VERSION < 3.6"; + else + bones-check; + fi + # display environment info + - pip freeze + +install: + - .ci/$SCRIPT.sh install + - pip freeze + +after_install: + - .ci/$SCRIPT.sh after_install + +before_script: + - .ci/$SCRIPT.sh before_script + +script: + - .ci/$SCRIPT.sh script + +before_cache: + - .ci/$SCRIPT.sh before_cache + +after_success: + - .ci/$SCRIPT.sh after_success + +after_failure: + - .ci/$SCRIPT.sh after_failure + +before_deploy: + - .ci/$SCRIPT.sh before_deploy + +after_deploy: + - .ci/$SCRIPT.sh after_deploy + +after_script: + - .ci/$SCRIPT.sh after_script diff --git a/CHANGES.rst b/CHANGES.rst new file mode 100644 index 0000000..fac96a3 --- /dev/null +++ b/CHANGES.rst @@ -0,0 +1,26 @@ +*************** +Release History +*************** + +.. Changelog entries should follow this format: + + version (release date) + ====================== + + **section** + + - One-line description of change (link to Github issue/PR) + +.. Changes should be organized in one of several sections: + + - Added + - Changed + - Deprecated + - Removed + - Fixed + +1.0.0 (unreleased) +================== + +Initial release of ``pytest-allclose``! +Thanks to all of the contributors for making this possible! diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst new file mode 100644 index 0000000..c5bc42b --- /dev/null +++ b/CONTRIBUTING.rst @@ -0,0 +1,46 @@ +.. Automatically generated by nengo-bones, do not edit this file directly + +******************************* +Contributing to pytest-allclose +******************************* + +Issues and pull requests are always welcome! +We appreciate help from the community to make pytest-allclose better. + +Filing issues +============= + +If you find a bug in pytest-allclose, +or think that a certain feature is missing, +please consider +`filing an issue `_! +Please search the currently open issues first +to see if your bug or feature request already exists. +If so, feel free to add a comment to the issue +so that we know that multiple people are affected. + +Making pull requests +==================== + +If you want to fix a bug or add a feature to pytest-allclose, +we welcome pull requests. +Ensure that you fill out all sections of the pull request template, +deleting the comments as you go. +We check most aspects of code style automatically. +Please refer to our +`code style guide `_ +for things that we check manually. + +Contributor agreement +===================== + +We require that all contributions be covered under +our contributor assignment agreement. Please see +`the agreement `_ +for instructions on how to sign. + +More details +============ + +For more details on how to contribute to Nengo, +please see the `developer guide `_. diff --git a/CONTRIBUTORS.rst b/CONTRIBUTORS.rst new file mode 100644 index 0000000..09b1342 --- /dev/null +++ b/CONTRIBUTORS.rst @@ -0,0 +1,9 @@ +.. Automatically generated by nengo-bones, do not edit this file directly + +**************************** +pytest-allclose contributors +**************************** + +See https://www.nengo.ai/people/ for a list of +the many people who have worked on pytest-allclose; +thank you for your contributions! diff --git a/LICENSE.rst b/LICENSE.rst new file mode 100644 index 0000000..8317f08 --- /dev/null +++ b/LICENSE.rst @@ -0,0 +1,29 @@ +.. Automatically generated by nengo-bones, do not edit this file directly + +*********************** +pytest-allclose license +*********************** + +MIT License + +Copyright (c) 2019-2019 Applied Brain Research + +Permission is hereby granted, free of charge, +to any person obtaining a copy of this software +and associated documentation files (the "Software"), +to deal in the Software without restriction, +including without limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of the Software, +and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included +in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/MANIFEST.in b/MANIFEST.in new file mode 100644 index 0000000..7988735 --- /dev/null +++ b/MANIFEST.in @@ -0,0 +1,37 @@ +# Automatically generated by nengo-bones, do not edit this file directly + +global-include *.py +global-include *.sh +global-include *.template +include *.rst + +# Include files for CI and recreating the source dist +include *.yml +include *.yaml +include *.toml +include MANIFEST.in +include .gitlint +include .pylintrc + +# Directories to include +graft docs + +# Subdirectories to exclude, if they exist +prune docs/_build +prune dist +prune .git +prune .github +prune .tox +prune .eggs +prune .ci + +# Exclude auto-generated files +recursive-exclude docs *.py + +# Patterns to exclude from any directory +global-exclude *.ipynb_checkpoints* +global-exclude *-checkpoint.ipynb + +# Exclude all bytecode +global-exclude *.pyc *.pyo *.pyd + diff --git a/README.rst b/README.rst index 4024e97..766c6e5 100644 --- a/README.rst +++ b/README.rst @@ -2,5 +2,168 @@ pytest-allclose *************** -``pytest-allclose`` provides the ``allclose`` Pytest fixture, -extending Numpy's ``allclose`` function. +``pytest-allclose`` provides the `~.allclose` Pytest fixture, +extending `numpy.allclose` with test-specific features. + +A core feature of the `~.allclose` fixture +is that the tolerances for tests can be configured externally. +This allows different repositories to share the same tests, +but use different tolerances. +See the "Configuration" section below for details. + +Installation +============ + +To use this fixture, install with + +.. code-block:: bash + + pip install pytest-allclose + +Usage +===== + +The `~.allclose` fixture is used just like `numpy.allclose`. + +.. code-block:: python + + import numpy as np + + def test_close(allclose): + x = np.linspace(-1, 1) + y = x + 0.001 + assert allclose(y, x, atol=0.002) + assert not allclose(y, x, atol=0.0005) + assert not allclose(y, x, rtol=0.002) + +Additional arguments +-------------------- + +The `~.allclose` fixture has a number of arguments +that are not part of `numpy.allclose`. +One such argument is ``xtol``, +which allows arrays that have been shifted along their first axis +by a certain number of steps to be considered close. + +.. code-block:: python + + import numpy as np + + def test_close(allclose): + x = np.linspace(-1, 1) + + assert allclose(x[1:], x[:-1], xtol=1) + assert allclose(x[3:], x[:-3], xtol=3) + assert not allclose(x[3:], x[:-3], xtol=1) + +Refer to the `~.allclose` API reference for all additional arguments. + +RMSE error reporting +-------------------- + +The `~.allclose` fixture stores root-mean-square error values, +which can be reported in the pytest terminal summary. +To do so, put the following in your ``conftest.py`` file. + +.. code-block:: python + + from pytest_allclose import report_rmses + + def pytest_terminal_summary(terminalreporter): + report_rmses(terminalreporter) + +See the `~.report_rmses` API reference for more information. + +Configuration +============= + +allclose_tolerances +------------------- + +``allclose_tolerances`` accepts a list of test name patterns, +followed by values for any of the `~.allclose` parameters. +These values will override any values provided within the test function itself, +allowing multiple repositories to use the same test suite, +but with different tolerances. + +.. code-block:: ini + + allclose_tolerances = + test_file.py:test_function atol=0.3 # set atol for specific test + test_file.py:test_func* rtol=0.2 # set rtol for tests matching wildcard + test_file.py:* atol=0.1 rtol=0.3 # set both tols for all tests in file + test_*tion rtol=0.2 # set rtol for all matching tests in any file + test_function[True] atol=0.1 # set atol only for one parametrization + +The only special character recognized in these patterns +is the wildcard character ``*``, +which matches any group of zero or more characters. + +If the test is parametrized, +then a pattern like ``test_name[param0-param1]`` +will match specific parameter settings, +and ``test_name*`` will match all parameter settings. +Note that the latter will match any test that starts with ``test_name``. + +If a test has multiple `~.allclose` calls, +you can use multiple tolerance lines that match the same test +to set different values for the first, second, third, etc. calls. +If there are more `~.allclose` calls than tolerance lines, +the last tolerance line will be used for all remaining `~.allclose` calls. + +Example test file: + +.. code-block:: python + + def test_close(allclose): + x = np.linspace(-1, 1) + y = x + 0.001 + assert allclose(y, x) + assert not allclose(y, x) + +Example configuration file (``pytest.ini``, ``setup.cfg``): + +.. code-block:: ini + + allclose_tolerances = + test_close atol=0.002 # affects first allclose call + test_close atol=0.0005 # affects second allclose call + +.. note:: Different tolerance lines correspond to *calls* of the + function, not lines of code. If you have a ``for`` + loop that calls `~.allclose` 3 times, + each of these calls corresponds to a new tolerance line. + If you have a fourth `~.allclose` call, + you would need three tolerance lines for the three calls + in the ``for`` loop, then a fourth line for the last call. + +.. caution:: The patterns for multiple calls of allclose in a function + must be exactly the same. + This means that if you have specific values for one + parametrization and general values for others, + you must put the specific values first + or they will not have any effect. + + Good example, specific takes precedence: + + .. code-block:: ini + + allclose_tolerances = + test_close[True-1] atol=0.002 + test_close[True-1] atol=0.0005 + test_close* atol=0.001 + test_close* atol=0.0001 + + Bad example, general takes precedence: + + .. code-block:: ini + + allclose_tolerances = + test_close* atol=0.001 + test_close* atol=0.0001 + test_close[True-1] atol=0.002 + test_close[True-1] atol=0.0005 + +See the full +`documentation `__ +for the API reference. diff --git a/docs/_static/favicon.ico b/docs/_static/favicon.ico new file mode 100644 index 0000000000000000000000000000000000000000..d034ac95e66c9d18b06f7a127e9047daebdaa98d GIT binary patch literal 15086 zcmd5?3v^V)8NMNEutgK~^jLYAWOuX4W(nj)NFb1a4M{fHO`wKHwSd^Fw1N~5+WKNG zf+j&ydF3G}dGJaJNqtfESVNJAk5+3DP&_u_kpNO$1vHS5>G$8=33q4j&107h-E;o^ zXXeiTf8YFf?%bI>BS`^LpcES`L5E3OA|xqElBBROS3X3NzCu|DG+lq-8cAA)3Q?#7 z6KOVt%H^NIsg(a=VPWYyovy@Nf&B`-UO!gp)7!MgHsfe)Qqskk`1tm?gwfxE)1OFy zF-^-SuDuf;9?pCb_^8wJ-`nz%+{zS#IYNuaaV)z_hyo#Sy=as!lyh+xM`8%6(|2&ben7^w0*W1?LP;D!pTVpHR&v}zBP5m2tYT}11 z;g_RCzGD8W@ZXqUngpKfAm1r@Q<~yl7~R6Amds;@;RcbfnD00KoAcHTtFo0oQe`VU zC~~HJm9>JcO8Sr$nOfO{b6;X(A4?Uvs^$I4zqVlA&DD9O_k-U~NT(WaD%)V$z)Io{ z$lRaE*u!$R<*}$SCXw$YmFMn?^WT`W_NFRp*}WJ;-T=3YTAX?L`iyOC`S9jT6!)cx zhuM^uXR~q7q>J1sf68C{^*a6j8e8eF!D$Diopb9!Us|2A`%Cbaxfhy`v%8irVwQR< zGmVQD`Cq_%=QDrtPce>D0!u$CWqDRWC z|(rW40R?oVdyW!9~C%G@(IS(w4>G2WjG4Gq1`-~2}!N1Uv&u1#hV3*ekd zFN->M3u3-OWoMrs*B@ypqJTt_Nn@9+30>N2|CLqZ!mkzYS= z-b%g5o!T)XZ=}cC5dEf5FN$hS%dE}3S&jR+C24B&v1YT`Jm@O(Z!3&$ zwXZZB4$7|0{t38$O0iZ-=0;10&ZzfTha&bCHSYZu|ESg-&qp0I5?=1BxkD@s)`Lpk zu5<_H#=FtS#pkw0i@U%1$Ll8iP-t$wljCRC=Tm?Gj~e&*S&1bo+)oV|GUOI+ucrMu z{*Z5unX^4-IC!_HaBrVjlJP6l^_a(_-%R25TDm{@=WfqU0q_5+aBstVDi0i9fw=V> z%n>$eaaNZ5n}1eAt_8f$t8hP;U2mO)7|#{CBj0lS?YG~gBLmVQ^2b;+-5btI8-Mf1SYrgn z8Ie26OO!mkP5YC->-}^{sj_oY>R<=@BsJGVI{ zu)agTiRlYJUKjI?A+Vo`dpBDtC_4fXalTd7aiwRu`m(qCr-|L)`n!ANOyql8ZO#E5#pN`g(rjHSq2F=Ijl3vH|XT zA(lP$6_4x09q-Et=nK6%KLfmqTW`Jf$KG;Q+5s~W_$#30GcsOi`aY0hmlm@o`RjEW zYf2-8G}b6+JMtaDtW$F8HkWR4X_(OU4wr6nX>x^fnx&G4a_Z}(5ILXh)Bz4DR<>`H z7RcJ})Hh0!gYx*wC2MJ5fK!LK^z1-qzA;GFfp(`3mV)K{jn{a_L~F9u(@-16>;Na{62B(oEJ;0F#?61qwRInYU9s z@#QxPVF`nzLjvR!5+M-bhkHFA_p6~dg1#_UFz+^-pFiu1s1fE)Q%nrasTyGav}k~G zkM};xJe~!p^MOo?eM@y{HGxz%!1#G*p&Cc7wxHz0>4STu9l~LFYMb{s;y=3(thch z)OT6(bk#ldD~ew|rTjKvK8iCehxy*^+Odsn-kd6Cj5GSB{nW2;w%{(;`GT&!dingu z=nvS_ssCbWPu_7ETj#c(@T&bQvsVTozE+%T*n)PR=WXSBgQb#{#2##WF6IcEw)#P4 z8t=CMuUr%K3E!wUo~p8zZUoL*jwR>|n?wl7a-4R%#p);&~gK3c)|Yv$dzfze)`Db?-V$rHeCx&ruwxwpnv{xF^6S#E9} z1>6H1dr`q_GWM81xSiQK6&V?MgD?2Q4d(MT*0OmuQ(hcO&mHZF6i2r#18zIVe$28H z=WX&kvbN^1h>>pRM1M;*zTlHB`Ji7>)NM#JbUMxmhHAP^sV!bvPmpluT$>Db$yrh1>YEMJ`K#TmAFW^ zF1+i{3xIo4{!UDNZd|l-%--gVnDgHk{1Lj8uJ!`Alj1O=WI+n{m0D$Nw0-nLDc=Hv zk2hzB4I4Jt5BUG4`=9+6TO4q|=GYnQGf!jR|9g&)chql`*p&9A{bpUl_t?jLv^;@b zn_Xv32CjU+WqiZ{XG1^W*wDKGhnbK2&lh~MC2zEUkyCFy%iE1{=2&EMsPS_u{pS=>~jygxB7C9d?Db({C-xzPaysyz?kDLR6ZhYf_7xxYMGEW=Jm(X8!c<=O1s`!gt-xl;&sX~HO}Zz1 z`0Q@|p?s&V>m{C5xn=>rau12lp(@up^gQ)u{kO9Z{jE_MvmVlf-4p(y-tJLTeBiH7 z_;gQUqq_dawRa$4y@@-ogZR$T@+X#~?m~NLC%$ZubOhT&+kzde31Sa+bl`hO6GVax z2ncozg*XCH2C)YPJFbBY3=Ve0hN2A8m=^4KW--bjAuQMt2e}8b3-T-^7!t||BS1=n z*ToLW&e{U#J4Fa23=$y!BcbxeAKc%9@cYH}Td3{vo0dO!w;@uV3-99>VY>~o4?=yK zzDH2nZLUUjH`>Z4jI(5QA~rPMv~hc`sSM)w9<42?y{^huUVt^i&J~%jvdDxedN&Y` zrag`6Ro3!UtX)^Z?zFsq9QDyRx$|f9agOFys`e2&!w;}K4m(;OIbO=Bty^wBa`=Jy zrJZz^5%29*;@G`?>4uuzipfRhW52`P^81J8Y;vS79xr2qJMCL*+S9ed;Vq_OQ_F6e z8!wo(c~4etPKP1JK%Dm?RsqnO_J;7tHrQRj+^*%`N1lHU^NWjdQ{$Uqa|!SBc#3s* z_7UMxZ8(R$YI5bwS+H#%zih%k$QDwpxKEn+#CVcQNLiQJYnVL(*xpFUE55p(z#!t`OYQHRF$N0hcb*^_UI6ZCn= ztEjIw@72VI4XqiPA%}rYkDtd6*WB!p7oQ)x9w{0k4x3R<&lWvn^bG6+Mjf>B`8}fb z9!1^PQI|j8T)!Q7RFX(OULr{!1xivQWDibR?1MBWNYZ + diff --git a/docs/conf.py b/docs/conf.py new file mode 100644 index 0000000..baa800e --- /dev/null +++ b/docs/conf.py @@ -0,0 +1,73 @@ +# -*- coding: utf-8 -*- +# +# Automatically generated by nengo-bones, do not edit this file directly + +import os + +import pytest_allclose + +extensions = [ + "sphinx.ext.autodoc", + "sphinx.ext.autosummary", + "sphinx.ext.githubpages", + "sphinx.ext.intersphinx", + "sphinx.ext.mathjax", + "sphinx.ext.todo", + "sphinx.ext.viewcode", + "nbsphinx", + "nengo_sphinx_theme", + "numpydoc", +] + +# -- sphinx.ext.autodoc +autoclass_content = "both" # class and __init__ docstrings are concatenated +autodoc_default_options = {"members": None} +autodoc_member_order = "bysource" # default is alphabetical + +# -- sphinx.ext.intersphinx +intersphinx_mapping = { + "nengo": ("https://www.nengo.ai/nengo/", None), + "numpy": ("https://docs.scipy.org/doc/numpy", None), + "python": ("https://docs.python.org/3", None), +} + +# -- sphinx.ext.todo +todo_include_todos = True + +# -- numpydoc config +numpydoc_show_class_members = False + +# -- nbsphinx +nbsphinx_timeout = -1 + +# -- sphinx +nitpicky = True +exclude_patterns = ["_build", "**/.ipynb_checkpoints"] +linkcheck_timeout = 30 +source_suffix = ".rst" +source_encoding = "utf-8" +master_doc = "index" +linkcheck_ignore = [r"http://localhost:\d+"] +linkcheck_anchors = True +default_role = "py:obj" +pygments_style = "sphinx" + +project = "pytest-allclose" +authors = "Applied Brain Research" +copyright = "2019-2019 Applied Brain Research" +version = ".".join(pytest_allclose.__version__.split(".")[:2]) # Short X.Y version +release = pytest_allclose.__version__ # Full version, with tags + +# -- HTML output +templates_path = ["_templates"] +html_static_path = ["_static"] +html_theme = "nengo_sphinx_theme" +html_title = "pytest-allclose {0} docs".format(release) +htmlhelp_basename = "pytest-allclose" +html_last_updated_fmt = "" # Default output format (suppressed) +html_show_sphinx = False +html_favicon = os.path.join("_static", "favicon.ico") +html_theme_options = { + "nengo_logo": "general-small-light.svg", + "nengo_logo_color": "#a8acaf", +} diff --git a/docs/index.rst b/docs/index.rst new file mode 100644 index 0000000..a07b37d --- /dev/null +++ b/docs/index.rst @@ -0,0 +1,9 @@ +.. include:: ../README.rst + :end-before: See the full + +API reference +============= + +.. autofunction:: pytest_allclose.plugin.allclose + +.. autofunction:: pytest_allclose.report_rmses diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 0000000..2410bfa --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,7 @@ +# Automatically generated by nengo-bones, do not edit this file directly + +[build-system] +requires = ["setuptools", "wheel"] + +[tool.black] +target-version = ['py35', 'py36', 'py37'] diff --git a/pytest_allclose/__init__.py b/pytest_allclose/__init__.py new file mode 100644 index 0000000..bab267c --- /dev/null +++ b/pytest_allclose/__init__.py @@ -0,0 +1,13 @@ +""" +pytest_allclose +=============== + +Pytest fixture extending Numpy's allclose function. +""" + +from .version import version as __version__ + +from .plugin import report_rmses + +__copyright__ = "2019-2019 pytest_plt contributors" +__license__ = "MIT license" diff --git a/pytest_allclose/plugin.py b/pytest_allclose/plugin.py new file mode 100644 index 0000000..0f70e0c --- /dev/null +++ b/pytest_allclose/plugin.py @@ -0,0 +1,229 @@ +"""The ``allclose`` fixture definition""" + +from fnmatch import fnmatch + +import numpy as np +import pytest + + +def _add_common_docs(func): + func.__doc__ += """ + Parameters + ---------- + a : np.ndarray + First array to be compared. + b : np.ndarray + Second array to be compared. + rtol : float, optional + Relative tolerance between a and b (relative to b). + atol : float, optional + Absolute tolerance between a and b. + xtol : int, optional + Allow signals to be right or left shifted by up to ``xtol`` + indices along the first axis + equal_nan : bool, optional + If True, nans will be considered equal to nans. + print_fail : int, optional + If > 0, print out the first ``print_fail`` entries failing + the allclose check along the first axis. + record_rmse : bool, optional + Whether to record the RMSE value for this comparison. Defaults to True. + Set to False whenever ``a`` and ``b`` should be far apart + (when ensuring two signals are sufficiently different, for example). + + Returns + ------- + bool + True if the two arrays are considered close according to the tolerances. + """ + return func + + +@pytest.fixture +@_add_common_docs +def allclose(request): + """Returns a function checking if two arrays are close, mimicking `numpy.allclose`. + + .. currentmodule:: allclose + + .. function:: _allclose(a, b, rtol=1e-5, atol=1e-8, xtol=0, equal_nan=False, \ + print_fail=5, record_rmse=True) + :noindex: + """ + + overrides = _get_allclose_overrides(request) + call_count = [0] + + @_add_common_docs + def _allclose( + a, + b, + rtol=1e-5, + atol=1e-8, + xtol=0, + equal_nan=False, + print_fail=5, + record_rmse=True, + ): + """Checks if two arrays are close, mimicking `numpy.allclose`. + """ + + if len(overrides) > 0: + override_args = overrides[min(call_count[0], len(overrides) - 1)] + atol = override_args.get("atol", atol) + rtol = override_args.get("rtol", rtol) + xtol = override_args.get("xtol", xtol) + equal_nan = override_args.get("equal_nan", equal_nan) + print_fail = override_args.get("print_fail", print_fail) + record_rmse = override_args.get("record_rmse", record_rmse) + call_count[0] += 1 + + a = np.atleast_1d(a) + b = np.atleast_1d(b) + + rmse = _safe_rms(a - b) + if record_rmse and not np.any(np.isnan(rmse)): + request.node.user_properties.append(("rmse", rmse)) + + ab_rms = _safe_rms(a) + _safe_rms(b) + rmse_relative = (2 * rmse / ab_rms) if ab_rms > 0 else np.nan + if not np.any(np.isnan(rmse_relative)): + request.node.user_properties.append(("rmse_relative", rmse_relative)) + + close = np.isclose(a, b, rtol=rtol, atol=atol, equal_nan=equal_nan) + + # if xtol > 0, check that number of adjacent positions. If they are + # close, then we consider things close. + for i in range(1, xtol + 1): + close[i:] |= np.isclose( + a[i:], b[:-i], rtol=rtol, atol=atol, equal_nan=equal_nan + ) + close[:-i] |= np.isclose( + a[:-i], b[i:], rtol=rtol, atol=atol, equal_nan=equal_nan + ) + + # we assume that the beginning and end of the array are close + # (since we're comparing to entries outside the bounds of + # the other array) + close[[i - 1, -i]] = True + + result = np.all(close) + + if print_fail > 0 and not result: + diffs = [] + # broadcast a and b to have same shape as close for indexing + broadcast_a = a + np.zeros(b.shape, dtype=a.dtype) + broadcast_b = b + np.zeros(a.shape, dtype=b.dtype) + for k, ind in enumerate(zip(*(~close).nonzero())): + if k >= print_fail: + break + diffs.append("%s: %s %s" % (ind, broadcast_a[ind], broadcast_b[ind])) + + print( + "allclose first %d failures:\n %s" % (len(diffs), "\n ".join(diffs)) + ) + + return result + + return _allclose + + +_allclose_arg_types = dict( + atol=float, rtol=float, xtol=int, equal_nan=bool, print_fail=int, record_rmse=bool +) + + +def _rms(x, axis=None, keepdims=False): + return np.sqrt(np.mean(x ** 2, axis=axis, keepdims=keepdims)) + + +def _safe_rms(x): + x = np.asarray(x) + return _rms(x).item() if x.size > 0 else np.nan + + +def _get_allclose_overrides(request): + nodename = request.node.nodeid + tol_cfg = request.config.inicfg.get("allclose_tolerances", "") + + # multiple overrides will match subsequent ``allclose`` calls + overrides = [] + + # we only match one pattern, the first one encountered + matched = None + + for line in (x for x in tol_cfg.split("\n") if len(x) > 0): + # each line contains a pattern and list of kwargs (e.g. `atol=0.1`) + split_line = line.split() + pattern = "*" + split_line[0] + + # escape special characters in pattern + replace = {"[": "[[]", "]": "[]]", "?": "[?]"} + pattern = "".join(replace.get(s, s) for s in pattern) + + if matched is None and fnmatch(nodename, pattern) or pattern == matched: + matched = pattern + kwargs = {} + for entry in split_line[1:]: + if entry.startswith("#"): + break + + k, v = entry.split("=") + if k not in _allclose_arg_types: + raise ValueError("Unrecognized argument %r" % k) + + kwargs[k] = _allclose_arg_types[k](v) + + overrides.append(kwargs) + + return overrides + + +def report_rmses(terminalreporter, relative=True): + """Report RMSEs recorded by the allclose fixture in the Pytest terminal. + + This function helps with reporting recorded root mean squared errors + (RMSEs). These RMSEs offer a measure of performance for each test + by quantifying how close their outputs are to the target values. + While this metric has some value on its own, it is most useful as a + relative metric, to evaluate if change offers an improvement to tests, + and if so, how much. + + When using RMSEs, it is important to set ``record_rmse`` to False on any + ``allclose`` call where closer values correspond to a drop in performance + (e.g. when using ``allclose`` to ensure values are different). + + Parameters + ---------- + terminalreporter : _pytest.terminal.TerminalReporter + The terminal reporter object provided by ``pytest_terminal_summary``. + relative : bool, optional + Whether to print relative (default) or absolute RMSEs. Relative RMSEs + are normalized by the mean RMS of ``a`` and ``b`` in ``allclose``. + Since different tests often compare against values of different + magnitudes, relative RMSEs provide a better metric across tests by + ensuring all tests contribute proportionally to the average RMSE. + One exception is when comparing to a signal that is all zeros, since + the relative RMSE will always be 2 no matter how close the values are. + + Examples + -------- + See `RMSE error reporting`_ for an example. + """ + + rmse_name = "rmse_relative" if relative else "rmse" + + tr = terminalreporter + all_rmses = [] + for passed_test in tr.stats.get("passed", []): + for name, val in passed_test.user_properties: + if name == rmse_name: + all_rmses.append(val) + + if len(all_rmses) > 0: + relstr = "relative " if relative else "" + tr.write_sep("=", "%sroot mean squared error for allclose checks" % relstr) + tr.write_line( + "mean %sRMSE: %.5f +/- %.4f (std)" + % (relstr, np.mean(all_rmses), np.std(all_rmses)) + ) diff --git a/pytest_allclose/tests/test_allclose.py b/pytest_allclose/tests/test_allclose.py new file mode 100644 index 0000000..d832a4c --- /dev/null +++ b/pytest_allclose/tests/test_allclose.py @@ -0,0 +1,131 @@ +"""Test the allclose fixture.""" + +import inspect + +import numpy as np +import pytest + + +def eye_vector(n, k, dtype=bool): + return np.eye(1, n, k=k, dtype=dtype)[0] + + +def add_close_noise(x, atol, rtol, rng): + scale = rng.uniform(-rtol, rtol, size=x.shape) + offset = rng.uniform(-atol, atol, size=x.shape) + return x + scale * np.abs(x) + offset + + +def get_vector_pairs(atol, rtol, rng): + x = rng.uniform(-1, 1, size=100) + + # augment the whole vector by less than the tolerances (should pass) + y = add_close_noise(x, atol=atol, rtol=rtol, rng=rng) + + # augment a random element by more then `atol` (should fail) + i = rng.randint(x.size) + mask = eye_vector(x.size, i) + x0 = x * (~mask) + y0 = x0 + 1.1 * atol * mask + + # augment the largest magnitude element by more than rtol (should fail) + i = np.argmax(np.abs(x)) + assert np.abs(x[i]) * rtol > atol + x1 = x + y1 = x1 + 3 * rtol * x1[i] * eye_vector(x.size, i) + + # return pairs of x (reference) and y (augmented) values, + # and whether they are close according to the tolerances + return [(x, y, True), (x0, y0, False), (x1, y1, False)] + + +def test_allclose(allclose): + rng = np.random.RandomState(3) + atol = 1e-5 + rtol = 1e-3 + + pairs = get_vector_pairs(atol, rtol, rng) + for x, y, close in pairs: + assert allclose(y, x, atol=atol, rtol=rtol) == close + + +def test_tolerances_big(allclose): + """Make sure tolerances in setup.cfg are properly applied""" + rng = np.random.RandomState(4) + + # atol=0.01 and rtol=0.2 has been set in setup.cfg. + pairs = get_vector_pairs(atol=0.01, rtol=0.2, rng=rng) + for x, y, close in pairs: + assert allclose(y, x) == close + + +def test_tolerances_small(allclose): + """Make sure tolerances in setup.cfg are properly applied""" + rng = np.random.RandomState(5) + + # atol=0.001 and rtol=0.005 has been set in setup.cfg. + pairs = get_vector_pairs(atol=0.001, rtol=0.005, rng=rng) + for x, y, close in pairs: + assert allclose(y, x) == close + + +@pytest.mark.parametrize("big_tols", [False, True]) +def test_parametrized(big_tols, allclose): + rng = np.random.RandomState(6) + atol, rtol = (0.1, 0.2) if big_tols else (0.001, 0.002) + + pairs = get_vector_pairs(atol, rtol, rng) + for x, y, close in pairs: + assert allclose(y, x) == close + + +@pytest.mark.parametrize("order", [1, 2, 3, 4]) +def test_precedence(order, allclose): + closure_vars = inspect.getclosurevars(allclose) + overrides = closure_vars.nonlocals["overrides"] + assert len(overrides) == 1 + + pairs = get_vector_pairs(order, order * 2, np.random.RandomState(6)) + if order in (1, 2): + assert overrides[0] == dict(atol=order, rtol=order * 2) + for x, y, close in pairs: + assert allclose(y, x) == close + else: + assert overrides[0] == dict(atol=2, rtol=4) + x, y, _ = pairs[0] + assert not allclose(y, x) + + +def test_multiple_tolerances(allclose): + # setup.cfg specifies separate first, second, and third tolerances + rng = np.random.RandomState(7) + + x0, y0, close0 = get_vector_pairs(atol=0.001, rtol=0.004, rng=rng)[0] + assert close0 and allclose(y0, x0) + + x1, y1, close1 = get_vector_pairs(atol=0.01, rtol=0.05, rng=rng)[0] + assert close1 and allclose(y1, x1) + + # go back to smaller tols, to test we're not just using last tols + x2, y2, close2 = get_vector_pairs(atol=0.002, rtol=0.005, rng=rng)[0] + assert close2 and allclose(y2, x2) + + +def test_xtol(allclose): + x = np.linspace(-1, 1) + dx = x[1] - x[0] + y = [x + i * dx for i in range(4)] + + assert allclose(y[1], x, atol=1e-8, rtol=1e-8, xtol=1) + assert not allclose(y[2], x, atol=1e-8, rtol=1e-8, xtol=1) + assert allclose(y[2], x, atol=1e-8, rtol=1e-8, xtol=2) + assert not allclose(y[3], x, atol=1e-8, rtol=1e-8, xtol=2) + + +def test_docstring(allclose): + assert allclose.__doc__.startswith( + "Checks if two arrays are close, mimicking `numpy.allclose`." + ) + + assert "Parameters\n ----------" in allclose.__doc__ + assert "Returns\n -------" in allclose.__doc__ diff --git a/pytest_allclose/tests/test_pytest.py b/pytest_allclose/tests/test_pytest.py new file mode 100644 index 0000000..42bbcf2 --- /dev/null +++ b/pytest_allclose/tests/test_pytest.py @@ -0,0 +1,160 @@ +import re +from textwrap import dedent + +import numpy as np +import pytest + + +pytest_plugins = ["pytester"] # adds the `testdir` fixture + + +def assert_all_passed(result): + """Assert that all outcomes are 0 except for 'passed'. + + Also returns the number of passed tests. + """ + outcomes = result.parseoutcomes() + for outcome in outcomes: + if outcome not in ("passed", "seconds"): + assert outcomes[outcome] == 0 + return outcomes.get("passed", 0) + + +@pytest.mark.parametrize("offsets", [(0.001,), (0.001, 0.002)]) +@pytest.mark.parametrize("relative", [False, True]) +def test_rmse_output(offsets, relative, testdir): + testdir.makeconftest( + dedent( + """\ + from pytest_allclose import report_rmses + + def pytest_terminal_summary(terminalreporter): + report_rmses(terminalreporter, relative={relative}) + """.format( + relative=relative + ) + ) + ) + + testdir.makefile( + ".py", + test_rmse_output=dedent( + """\ + import numpy as np + import pytest + + @pytest.mark.parametrize('offset', [{offsets}]) + def test_rmse(offset, allclose): + x = np.linspace(-1, 1) + y = x + offset + assert allclose(y, x, atol=offset + 1e-8) + """.format( + offsets=", ".join(str(x) for x in offsets) + ) + ), + ) + + result = testdir.runpytest("-v") + n_passed = assert_all_passed(result) + assert n_passed > 0 + + tag = "mean %sRMSE: " % ("relative " if relative else "") + lines = [s[len(tag) :] for s in result.outlines if s.startswith(tag)] + assert len(lines) == 1 + line = lines[0] + parts = line.split() + assert len(parts) == 4 and parts[1] == "+/-" and parts[3] == "(std)" + mean, std = float(parts[0]), float(parts[2]) + + x = np.linspace(-1, 1) + x_rms = np.sqrt(np.mean(x ** 2)) + rmses = [offset / x_rms for offset in offsets] if relative else offsets + assert np.allclose(mean, np.mean(rmses), atol=1e-4) + assert np.allclose(std, np.std(rmses), atol=1e-4) + + +@pytest.mark.parametrize("rel_error, print_fail", [(1.00101, 5), (1.0011, 6)]) +def test_print_fail_output(rel_error, print_fail, testdir): + testdir.makefile( + ".py", + test_print_fail_output=dedent( + """\ + import numpy as np + + def test_print_fail(allclose): + t = np.linspace(0, 1) + x = np.sin(2*np.pi*t) + y = x * {rel_error} + assert allclose(y, x, atol=0.001, rtol=0, print_fail={print_fail:d}) + """.format( + rel_error=rel_error, print_fail=print_fail + ) + ), + ) + + result = testdir.runpytest("-v") + outcomes = result.parseoutcomes() + assert outcomes.get("passed", 0) == 0 and outcomes.get("failed", 0) == 1 + + # reference values + ref_inds = (12, 13, 36, 37) + t = np.linspace(0, 1) + x = np.sin(2 * np.pi * t) + y = x * rel_error + ref_inds = (~np.isclose(y, x, atol=0.001, rtol=0)).nonzero()[0] + + # parse output lines + pattern = "allclose first ([0-9]+) failures" + line_inds = [i for i, s in enumerate(result.outlines) if re.match(pattern, s)] + assert len(line_inds) == 1 + line_ind = line_inds[0] + n_lines = int(re.match(pattern, result.outlines[line_ind]).groups()[0]) + lines = result.outlines[line_ind + 1 : line_ind + n_lines + 1] + assert n_lines == min(print_fail, len(ref_inds)) + + # match lines indicating not-close values, extract the indices and values + pattern = r"[ ]*\(([0-9, ]+)\): (\S+) (\S+)" + parsed = [] + for line in lines: + match = re.match(pattern, line) + assert match + groups = match.groups() + parsed.append( + ( + tuple(int(s) for s in groups[0].split(",") if len(s) > 0), + float(groups[1]), + float(groups[2]), + ) + ) + + # check output lines against reference values + for ref_ind, parts in zip(ref_inds, parsed): + assert parts[0] == (ref_ind,) + assert np.allclose(parts[1], y[ref_ind]) + assert np.allclose(parts[2], x[ref_ind]) + + +def test_bad_override_parameter(testdir): + testdir.makeini( + dedent( + """\ + [pytest] + allclose_tolerances = + test_dummy notaparam=3 + """ + ) + ) + + testdir.makefile( + ".py", + test_bad_override_parameter=dedent( + """\ + def test_dummy(allclose): + assert allclose(1, 1) + """ + ), + ) + + result = testdir.runpytest("-v") + outcomes = result.parseoutcomes() + assert outcomes.get("passed", 0) == 0 and outcomes.get("error", 0) == 1 diff --git a/pytest_allclose/version.py b/pytest_allclose/version.py new file mode 100644 index 0000000..2f59d31 --- /dev/null +++ b/pytest_allclose/version.py @@ -0,0 +1,16 @@ +"""pytest-allclose version information. + +We use semantic versioning (see http://semver.org/). +and conform to PEP440 (see https://www.python.org/dev/peps/pep-0440/). +'.devN' will be added to the version unless the code base represents +a release version. Release versions are git tagged with the version. +""" + +name = "pytest_allclose" +version_info = (1, 0, 0) # (major, minor, patch) +dev = 0 + +version = "{v}{dev}".format( + v=".".join(str(v) for v in version_info), + dev=(".dev%d" % dev) if dev is not None else "", +) diff --git a/setup.cfg b/setup.cfg new file mode 100644 index 0000000..cb770f7 --- /dev/null +++ b/setup.cfg @@ -0,0 +1,114 @@ +# Automatically generated by nengo-bones, do not edit this file directly + +[build_sphinx] +source-dir = docs +build-dir = docs/_build +all_files = 1 + +[coverage:run] +source = pytest_allclose + +[coverage:report] +# Regexes for lines to exclude from consideration +exclude_lines = + # Have to re-enable the standard pragma + # place ``# pragma: no cover`` at the end of a line to ignore it + pragma: no cover + + # Don't complain if tests don't hit defensive assertion code: + raise NotImplementedError + + # `pass` is just a placeholder, fine if it's not covered + ^[ \t]*pass$ + + +# Patterns for files to exclude from reporting +omit = + */tests/test* + +[flake8] +exclude = + __init__.py +ignore = + E123 + E133 + E203 + E226 + E241 + E242 + E501 + E731 + F401 + W503 +max-complexity = 10 +max-line-length = 88 + +[tool:pytest] +xfail_strict = False +norecursedirs = + .* + *.egg + build + dist + docs +allclose_tolerances = + test_tolerances_big atol=0.01 rtol=0.2 + test_tolerances_small atol=0.001 rtol=0.005 # test comment + test_parametrized[True] atol=0.1 rtol=0.2 + test_parametrized[False] atol=0.001 rtol=0.002 + test_precedence[1] atol=1 rtol=2 + test_precedence* atol=2 rtol=4 + test_precedence[3] atol=3 rtol=6 + test_multiple_tolerances atol=0.001 rtol=0.004 + test_multiple_tolerances atol=0.01 rtol=0.05 + test_multiple_tolerances atol=0.002 rtol=0.005 + +[pylint] +# note: pylint doesn't look in setup.cfg by default, need to call it with +# `pylint ... --rcfile=setup.cfg` +disable = + arguments-differ, + assignment-from-no-return, + attribute-defined-outside-init, + bad-continuation, + blacklisted-name, + comparison-with-callable, + duplicate-code, + fixme, + import-error, + invalid-name, + invalid-sequence-index, + len-as-condition, + literal-comparison, + no-else-raise, + no-else-return, + no-member, + no-name-in-module, + no-self-use, + not-an-iterable, + not-context-manager, + protected-access, + redefined-builtin, + stop-iteration-return, + too-few-public-methods, + too-many-arguments, + too-many-branches, + too-many-instance-attributes, + too-many-lines, + too-many-locals, + too-many-return-statements, + too-many-statements, + unexpected-keyword-arg, + unidiomatic-typecheck, + unsubscriptable-object, + unsupported-assignment-operation, + unused-argument, +known-third-party = + matplotlib, + nengo, + numpy, + pytest, +max-line-length = 88 +valid-metaclass-classmethod-first-arg = metacls +reports = no +score = no diff --git a/setup.py b/setup.py new file mode 100644 index 0000000..6b9c2df --- /dev/null +++ b/setup.py @@ -0,0 +1,67 @@ +#!/usr/bin/env python + +# Automatically generated by nengo-bones, do not edit this file directly + +import io +import os +import runpy + +try: + from setuptools import find_packages, setup +except ImportError: + raise ImportError( + "'setuptools' is required but not installed. To install it, " + "follow the instructions at " + "https://pip.pypa.io/en/stable/installing/#installing-with-get-pip-py" + ) + + +def read(*filenames, **kwargs): + encoding = kwargs.get("encoding", "utf-8") + sep = kwargs.get("sep", "\n") + buf = [] + for filename in filenames: + with io.open(filename, encoding=encoding) as f: + buf.append(f.read()) + return sep.join(buf) + + +root = os.path.dirname(os.path.realpath(__file__)) +version = runpy.run_path(os.path.join(root, "pytest_allclose", "version.py"))["version"] + +install_req = ["numpy>=1.11", "pytest"] +docs_req = ["nengo_sphinx_theme>=1.0", "sphinx"] +optional_req = [] +tests_req = ["codespell", "coverage>=4.3", "flake8", "gitlint", "pylint"] + +setup( + name="pytest-allclose", + version=version, + author="Applied Brain Research", + author_email="info@appliedbrainresearch.com", + packages=find_packages(), + url="https://www.nengo.ai/pytest-allclose", + include_package_data=False, + license="MIT license", + description="Pytest fixture extending Numpy's allclose function", + long_description=read("README.rst", "CHANGES.rst"), + zip_safe=False, + install_requires=install_req, + extras_require={ + "all": docs_req + optional_req + tests_req, + "docs": docs_req, + "optional": optional_req, + "tests": tests_req, + }, + python_requires=">=3.5", + entry_points={"pytest11": ["allclose = pytest_allclose.plugin"]}, + classifiers=[ + "Development Status :: 5 - Production/Stable", + "Framework :: Pytest", + "License :: OSI Approved :: MIT License", + "Programming Language :: Python :: 3 :: Only", + "Programming Language :: Python :: 3.5", + "Programming Language :: Python :: 3.6", + "Programming Language :: Python :: 3.7", + ], +)