10
10
logger = logging .getLogger (__name__ )
11
11
#run using python -m pytest from the root folder
12
12
13
- test_results = []
13
+ @pytest .fixture (scope = "session" )
14
+ def test_results ():
15
+ return []
16
+
17
+ @pytest .fixture (scope = "session" , autouse = True )
18
+ def write_report (test_results , request ):
19
+ def finalize ():
20
+ with open ('test_results_report.json' , 'w' ) as f :
21
+ json .dump ({"results" : test_results }, f , indent = 4 )
22
+ request .addfinalizer (finalize )
14
23
15
24
16
25
def signal_helper (signal ):
@@ -61,8 +70,7 @@ def data_ivim_fit_saved():
61
70
62
71
63
72
@pytest .mark .parametrize ("name, bvals, data, algorithm, xfail, kwargs, tolerances" , data_ivim_fit_saved ())
64
- def test_ivim_fit_saved (name , bvals , data , algorithm , xfail , kwargs , tolerances , request ):
65
- global test_results
73
+ def test_ivim_fit_saved (name , bvals , data , algorithm , xfail , kwargs , tolerances , request , test_results ):
66
74
if xfail ["xfail" ]:
67
75
mark = pytest .mark .xfail (reason = "xfail" , strict = xfail ["strict" ])
68
76
request .node .add_marker (mark )
@@ -88,9 +96,7 @@ def to_list_if_needed(value):
88
96
test_result ['status' ] = "XFAILED"
89
97
90
98
test_results .append (test_result )
91
- with open ('test_results_report.json' , 'w' ) as f :
92
- json .dump ({"results" : test_results , "rtol" : tolerances ["rtol" ],
93
- "atol" : tolerances ["atol" ], }, f , indent = 4 )
99
+
94
100
npt .assert_allclose (data ['f' ], f_fit , rtol = tolerances ["rtol" ]["f" ], atol = tolerances ["atol" ]["f" ])
95
101
npt .assert_allclose (data ['D' ], D_fit , rtol = tolerances ["rtol" ]["D" ], atol = tolerances ["atol" ]["D" ])
96
102
npt .assert_allclose (data ['Dp' ], Dp_fit , rtol = tolerances ["rtol" ]["Dp" ], atol = tolerances ["atol" ]["Dp" ])
0 commit comments