diff --git a/ExampleFiles/test_11.py b/ExampleFiles/test_11.py index d6a3c66..8b8a99a 100644 --- a/ExampleFiles/test_11.py +++ b/ExampleFiles/test_11.py @@ -40,4 +40,4 @@ def test_pytest(): #note that it cannot have any required arguments for pytest t if __name__ == "__main__": #This is the normal way of using the UnitTesterSG module, and will be run by UnitTesterSG or by running this test file by itself. - ut.doTest(resultObj, resultStr, prefix=prefix,suffix=suffix, allowOverwrite = True) + ut.doTest(resultObj, resultStr, prefix=prefix,suffix=suffix, allowOverwrite = True, interactiveTesting=True) diff --git a/ExampleFiles/test_2.py b/ExampleFiles/test_2.py index b7e80c9..029e915 100644 --- a/ExampleFiles/test_2.py +++ b/ExampleFiles/test_2.py @@ -28,4 +28,4 @@ def test_pytest(): #note that it cannot have any required arguments for pytest t if __name__ == "__main__": #This is the normal way of using the UnitTesterSG module, and will be run by UnitTesterSG or by running this test file by itself. - ut.doTest(resultObj, resultStr, prefix=prefix,suffix=suffix, allowOverwrite = True) + ut.doTest(resultObj, resultStr, prefix=prefix,suffix=suffix, allowOverwrite = True, interactiveTesting=True) diff --git a/ExampleFiles/test_3.py b/ExampleFiles/test_3.py index 2c7f23c..ac26153 100644 --- a/ExampleFiles/test_3.py +++ b/ExampleFiles/test_3.py @@ -27,4 +27,4 @@ def test_pytest(): #note that it cannot have any required arguments for pytest t if __name__ == "__main__": #This is the normal way of using the UnitTesterSG module, and will be run by UnitTesterSG or by running this test file by itself. - ut.doTest(resultObj, resultStr, prefix=prefix,suffix=suffix, allowOverwrite = True) + ut.doTest(resultObj, resultStr, prefix=prefix,suffix=suffix, allowOverwrite = True, interactiveTesting=True) diff --git a/ExampleFiles/test_4.py b/ExampleFiles/test_4.py index e579c28..17a6f4c 100644 --- a/ExampleFiles/test_4.py +++ b/ExampleFiles/test_4.py @@ -27,4 +27,4 @@ def test_pytest(): #note that it cannot have any required arguments for pytest t if __name__ == "__main__": #This is the normal way of using the UnitTesterSG module, and will be run by UnitTesterSG or by running this test file by itself. - ut.doTest(resultObj, resultStr, prefix=prefix,suffix=suffix, allowOverwrite = True) + ut.doTest(resultObj, resultStr, prefix=prefix,suffix=suffix, allowOverwrite = True, interactiveTesting=True) diff --git a/ExampleFiles/test_5.py b/ExampleFiles/test_5.py index 8890a5b..a9ce09e 100644 --- a/ExampleFiles/test_5.py +++ b/ExampleFiles/test_5.py @@ -28,4 +28,4 @@ def test_pytest(): #note that it cannot have any required arguments for pytest t if __name__ == "__main__": #This is the normal way of using the UnitTesterSG module, and will be run by UnitTesterSG or by running this test file by itself. - ut.doTest(resultObj, resultStr, prefix=prefix,suffix=suffix, allowOverwrite = True) + ut.doTest(resultObj, resultStr, prefix=prefix,suffix=suffix, allowOverwrite = True, interactiveTesting=True) diff --git a/ExampleFiles/test_6.py b/ExampleFiles/test_6.py index 886badb..e32108d 100644 --- a/ExampleFiles/test_6.py +++ b/ExampleFiles/test_6.py @@ -30,4 +30,4 @@ def test_pytest(): #note that it cannot have any required arguments for pytest t if __name__ == "__main__": #This is the normal way of using the UnitTesterSG module, and will be run by UnitTesterSG or by running this test file by itself. - ut.doTest(resultObj, resultStr, prefix=prefix,suffix=suffix, allowOverwrite = True) + ut.doTest(resultObj, resultStr, prefix=prefix,suffix=suffix, allowOverwrite = True, interactiveTesting=True) diff --git a/ExampleFiles/test_8.py b/ExampleFiles/test_8.py index ce01440..672d67c 100644 --- a/ExampleFiles/test_8.py +++ b/ExampleFiles/test_8.py @@ -27,4 +27,4 @@ def test_pytest(): #note that it cannot have any required arguments for pytest t if __name__ == "__main__": #This is the normal way of using the UnitTesterSG module, and will be run by UnitTesterSG or by running this test file by itself. - ut.doTest(resultObj, resultStr, prefix=prefix,suffix=suffix, allowOverwrite = True) + ut.doTest(resultObj, resultStr, prefix=prefix,suffix=suffix, allowOverwrite = True, interactiveTesting=True) diff --git a/ExampleFiles/test_9.py b/ExampleFiles/test_9.py index 7341e11..e5c2e26 100644 --- a/ExampleFiles/test_9.py +++ b/ExampleFiles/test_9.py @@ -27,4 +27,4 @@ def test_pytest(): #note that it cannot have any required arguments for pytest t if __name__ == "__main__": #This is the normal way of using the UnitTesterSG module, and will be run by UnitTesterSG or by running this test file by itself. - ut.doTest(resultObj, resultStr, prefix=prefix,suffix=suffix, allowOverwrite = True) + ut.doTest(resultObj, resultStr, prefix=prefix,suffix=suffix, allowOverwrite = True, interactiveTesting=True) diff --git a/README.txt b/README.txt index 56e58a6..85a9811 100644 --- a/README.txt +++ b/README.txt @@ -1,4 +1,4 @@ -UnitTesterSG, version 5.1.4 as of Nov 1 2020 +UnitTesterSG The LICENSE and MANUAL are in the UnitTesterSG directory, and at https://github.com/AdityaSavara/UnitTesterSG/tree/master/UnitTesterSG @@ -38,13 +38,13 @@ Note: For any individual test, set allowOverwrite to False when calling doTest i STORING EXPECTED RESULTS: -Initially, there will be no "expected results" to check against. You can create an expected results file ahead of time by using the set_expected_results function. More typically, you would first check that your function works ahead of time, manually, and then make test_1.py, test_2.py, etc. files (see examples). Then, when running UnitTesterSG you would choose the "Y" option to store the (already checked) results as the "Expected" results, when prompted. This way, they will be available for for later in the future. It is important that the function can work with "pickling" objects, which stores them in a way that they can be retrieved even after the program has ended. Thus, during running process, the module does check that it was able to properly pickle and retreive objects, by comparing whether the objects before and afer pickling are the same. +Initially, there will be no "expected results" to check against. You can create an expected results file ahead of time by using the set_expected_results function. More typically, you would first check that your function works ahead of time, manually, and then make test_1.py, test_2.py, etc. files (see examples) with interactiveTesting=True. Then, when running UnitTesterSG you would choose the "Y" option to store the (already checked) results as the "Expected" results, when prompted. This way, they will be available for for later in the future. It is important that the function can work with "pickling" objects, which stores them in a way that they can be retrieved even after the program has ended. Thus, during running process, the module does check that it was able to properly pickle and retreive objects, by comparing whether the objects before and afer pickling are the same. COMPARING TO PREVIOUSLY STORED EXPECTED RESULTS: Later, in the future, after you edit your function, you can check the revised function's outputs against those stored results by making a copy of the full subdirectory where you did the testing and replacing the version of your module in the fresh copy. Note that your (old) stored results will be in the freshly copied directory, so the unit tester will then compare your revised function's output to the stored output. -One can run pytestDriver from a root directory, and then it will use pytest for all of the UnitTesterSG tests automatically to make sure they still pass (relative to stored results). The underscore in the file names are partially to be compatible with pytest. So essentially one can have a UnitTests directory with subdirectories containing the unit test files for various functions, and then can place runUnitTesterSG.py or runPytestDriver.py for running at that time. +One can run pytestDriver from a root directory, and then it will use pytest for all of the UnitTesterSG tests automatically to make sure they still pass (relative to stored results). The underscore in the file names are partially to be compatible with pytest. So essentially one can have a UnitTests directory with subdirectories containing the unit test files for various functions, and then can place runUnitTesterSG.py or runPytestDriver.py in the main directory. Note: For any individual test, set allowOverwrite to False when calling doTest if you want to skip UnitTesterSG from stopping to notify user when results match but strings don't. diff --git a/UnitTesterSG/MANUAL.txt b/UnitTesterSG/MANUAL.txt index 4351a07..e107d5d 100644 --- a/UnitTesterSG/MANUAL.txt +++ b/UnitTesterSG/MANUAL.txt @@ -1,10 +1,10 @@ -UnitTesterSG, version 5.0.4 as of June 14 2020 +UnitTesterSG The LICENSE and MANUAL are in the UnitTesterSG directory, and at https://github.com/AdityaSavara/UnitTesterSG/tree/master/UnitTesterSG QUICK INTRO: -UnitTesterSG is for unit testing scientific/engineering outputs. It accommodates nested and even staggered array type structures, and mixed data types (like strings and integers etc). It also enables you to set numerical and absolute tolerances. +UnitTesterSG is for unit testing scientific/engineering outputs. It accommodates nested and even staggered array type structures, and mixed data types (like strings and integers etc). It also enables you to set numerical and absolute tolerances. Compatible with pytest. Easiest to install by pip UnitTesterSG. (https://pypi.org/project/UnitTesterSG/) , then navigate download from https://github.com/AdityaSavara/UnitTesterSG/ to run the examples. @@ -38,13 +38,14 @@ Note: For any individual test, set allowOverwrite to False when calling doTest i STORING EXPECTED RESULTS: -Initially, there will be no "expected results" to check against. You can create an expected results file ahead of time by using the set_expected_results function. More typically, you would first check that your function works ahead of time, manually, and then make test_1.py, test_2.py, etc. files (see examples). Then, when running UnitTesterSG you would choose the "Y" option to store the (already checked) results as the "Expected" results, when prompted. This way, they will be available for for later in the future. It is important that the function can work with "pickling" objects, which stores them in a way that they can be retrieved even after the program has ended. Thus, during running process, the module does check that it was able to properly pickle and retreive objects, by comparing whether the objects before and afer pickling are the same. +Initially, there will be no "expected results" to check against. You can create an expected results file ahead of time by using the set_expected_results function. More typically, you would first check that your function works ahead of time, manually, and then make test_1.py, test_2.py, etc. files (see examples) with interactiveTesting=True. Then, when running UnitTesterSG you would choose the "Y" option to store the (already checked) results as the "Expected" results, when prompted. This way, they will be available for for later in the future. It is important that the function can work with "pickling" objects, which stores them in a way that they can be retrieved even after the program has ended. Thus, during running process, the module does check that it was able to properly pickle and retreive objects, by comparing whether the objects before and afer pickling are the same. + COMPARING TO PREVIOUSLY STORED EXPECTED RESULTS: Later, in the future, after you edit your function, you can check the revised function's outputs against those stored results by making a copy of the full subdirectory where you did the testing and replacing the version of your module in the fresh copy. Note that your (old) stored results will be in the freshly copied directory, so the unit tester will then compare your revised function's output to the stored output. -One can run pytestDriver from a root directory, and then it will use pytest for all of the UnitTesterSG tests automatically to make sure they still pass (relative to stored results). The underscore in the file names are partially to be compatible with pytest. So essentially one can have a UnitTests directory with subdirectories containing the unit test files for various functions, and then can place runUnitTesterSG.py or runPytestDriver.py for running at that time. +One can run pytestDriver from a root directory, and then it will use pytest for all of the UnitTesterSG tests automatically to make sure they still pass (relative to stored results). The underscore in the file names are partially to be compatible with pytest. So essentially one can have a UnitTests directory with subdirectories containing the unit test files for various functions, and then can place runUnitTesterSG.py or runPytestDriver.py in the main directory. Note: For any individual test, set allowOverwrite to False when calling doTest if you want to skip UnitTesterSG from stopping to notify user when results match but strings don't. diff --git a/UnitTesterSG/UnitTesterSGFunctions.py b/UnitTesterSG/UnitTesterSGFunctions.py index 8264f73..d3dd7fc 100644 --- a/UnitTesterSG/UnitTesterSGFunctions.py +++ b/UnitTesterSG/UnitTesterSGFunctions.py @@ -6,6 +6,7 @@ from UnitTesterSG.nestedObjectsFunctions import * import pickle import os +import sys ''' This function takes in two arrarys (or iterables) and compares them using functions from the nestedObjectsFunctions module @@ -181,18 +182,20 @@ def check_results(calculated_resultObj,calculated_resultStr='',prefix='',suffix= stringMatch = True else: #implies that expected results string does not match calculated result string. stringMatch = False + print('Expected result string and calculated_result string DO NOT MATCH. \nThe compared strings are in files', expected_resultStr_file, calculated_resultStr_file) if (objectMatch == False) and (stringMatch == True): print("Warning: Strings can match for long/large arrays even if objects don't, due to '...'") - if (objectMatch == False) or (stringMatch == False): #if either object or string comparison failed, we consider overwriting old files. - #the if statement is to prevent pytest from needing user input. Perhaps should be changed to "interactiveTesting = True" rather than allowOverwrite = True. - if allowOverwrite==True or interactiveTesting==True: + if (stringMatch == False): + if (interactiveTesting==True and objectMatch == False): #we only consider printing the string if the objectMatch is false. if expected_resultStr_read!=calculated_resultStr_read: #We give the option the user to print out the strings if the string comparison failed. printStringsChoice=str(input('Expected result string does not match calculated_result string. Would you like to print them here now to inspect (Y or N)?')) if str(printStringsChoice) == 'Y': print('Expected result string (top) DOES NOT MATCH calculated_result string (bottom)') print(expected_resultStr_read) print(calculated_resultStr_read) - if allowOverwrite==True: + if (objectMatch == False): #if either object or string comparison failed, we consider overwriting old files. + #the if statement is to prevent pytest from needing user input. Perhaps should be changed to "interactiveTesting = True" rather than allowOverwrite = True. + if allowOverwrite==True and interactiveTesting==True: overwritechoice=str(input('Overwrite (or create) the expected result object and string files from the calculated results provided (Y or N)? ')) if str(overwritechoice)=='Y': #pickling the calculated result into the expected result file @@ -226,16 +229,16 @@ def returnDigitFromFilename(currentFile): extractedDigit = listOfNumbers[0] return extractedDigit -def doTest(resultObj, resultStr, prefix='',suffix='', allowOverwrite = False, relativeTolerance=None, absoluteTolerance=None, softStringCompare=False): +def doTest(resultObj, resultStr, prefix='',suffix='', allowOverwrite = False, relativeTolerance=None, absoluteTolerance=None, softStringCompare=False, interactiveTesting=False): #if the user wants to be able to change what the saved outputs are if allowOverwrite: #This function call is used when this test is run solo as well as by UnitTesterSG - check_results(resultObj, resultStr, prefix = '', suffix=suffix, relativeTolerance=relativeTolerance, absoluteTolerance=absoluteTolerance, softStringCompare=softStringCompare) + check_results(resultObj, resultStr, prefix = '', suffix=suffix, relativeTolerance=relativeTolerance, absoluteTolerance=absoluteTolerance, softStringCompare=softStringCompare, interactiveTesting=interactiveTesting) #this option allows pytest to call the function if not allowOverwrite: #this assert statement is required for the pytest module assert check_results(resultObj, resultStr, prefix = '', suffix=suffix, allowOverwrite = False, - relativeTolerance=relativeTolerance, absoluteTolerance=absoluteTolerance, softStringCompare=softStringCompare) == True #This line is still part of assert. + relativeTolerance=relativeTolerance, absoluteTolerance=absoluteTolerance, softStringCompare=softStringCompare, interactiveTesting=False) == True #This line is still part of assert. def runTestsInSubdirectories(): listOfDirectoriesAndFiles = os.listdir(".") @@ -255,7 +258,7 @@ def runTestsInSubdirectories(): for name in listOfFilesInDirectory: if "test_" in name: print('\n'+ name) - os.system("python " + name) + os.system(sys.executable + " " + name) #sys.executable + name is like typing "python test_1.py". important for virtual environments and different systems os.chdir("..") @@ -268,7 +271,7 @@ def runAllTests(): for name in filesInDirectory: if "test_"in name: print('\n'+ name) - os.system("python " + name) + os.system(sys.executable + " " + name) #sys.executable + name is like typing "python test_1.py". important for virtual environments and different systems runTestsInSubdirectories() diff --git a/UnitTesterSG/pytestDriver.py b/UnitTesterSG/pytestDriver.py index 2e9c476..728d136 100644 --- a/UnitTesterSG/pytestDriver.py +++ b/UnitTesterSG/pytestDriver.py @@ -1,4 +1,5 @@ import os +import sys def runAllTests(): #in Python, the listdir command returns files and directories (like typeing in "dir"). @@ -15,8 +16,9 @@ def runAllTests(): print("Changing directory to "+directory) os.chdir(directory) try: - os.system("del __pycache__ /Q") + os.system("del __pycache__ /Q") #for windows + os.system("rm __pycache__ /Q") #for linux except: pass - os.system("pytest") + os.system(sys.executable +" -m pytest") #this is like typing "python -m pytest" but uses whichever version of python should be used, important for virtual environments and different systems https://stackoverflow.com/questions/8338854/how-to-run-py-test-against-different-versions-of-python os.chdir("..") \ No newline at end of file diff --git a/setup.py b/setup.py index 9fdcb9b..b11a47d 100644 --- a/setup.py +++ b/setup.py @@ -20,7 +20,7 @@ EMAIL = 'AditySavara2008@u.northwestern.edu' AUTHOR = 'Aditya Savara' REQUIRES_PYTHON = '>=3.0.0' -VERSION = '5.3.0' +VERSION = '5.4.0' LICENSE = 'BSD-3-Clause' # What packages are required for this module to be executed? diff --git a/test12/test_12.py b/test12/test_12.py index cc80c0a..31981c8 100644 --- a/test12/test_12.py +++ b/test12/test_12.py @@ -76,4 +76,4 @@ def test_pytest(): #note that it cannot have any required arguments for pytest t """#For any individual test, after finishing getting it working, set allowOverwrite to False in the line below calling doTest if you want to skip UnitTesterSG from stopping to notify user when results match but result strings don't. """ if __name__ == "__main__": #This is the normal way of using the UnitTesterSG module, and will be run by UnitTesterSG or by running this test file by itself. - ut.doTest(resultObj, resultStr, prefix=prefix,suffix=suffix, allowOverwrite = True, relativeTolerance=relativeTolerance, absoluteTolerance=absoluteTolerance) + ut.doTest(resultObj, resultStr, prefix=prefix,suffix=suffix, allowOverwrite = True, relativeTolerance=relativeTolerance, absoluteTolerance=absoluteTolerance, interactiveTesting = True) diff --git a/test13/test_13.py b/test13/test_13.py index 072071d..c19fe4f 100644 --- a/test13/test_13.py +++ b/test13/test_13.py @@ -79,4 +79,4 @@ def test_pytest(): #note that it cannot have any required arguments for pytest t """#For any individual test, after finishing getting it working, set allowOverwrite to False in the line below calling doTest if you want to skip UnitTesterSG from stopping to notify user when results match but result strings don't. """ if __name__ == "__main__": #This is the normal way of using the UnitTesterSG module, and will be run by UnitTesterSG or by running this test file by itself. - ut.doTest(resultObj, resultStr, prefix=prefix,suffix=suffix, allowOverwrite = True, relativeTolerance=relativeTolerance, absoluteTolerance=absoluteTolerance) + ut.doTest(resultObj, resultStr, prefix=prefix,suffix=suffix, allowOverwrite = True, relativeTolerance=relativeTolerance, absoluteTolerance=absoluteTolerance, interactiveTesting=True)