-
Notifications
You must be signed in to change notification settings - Fork 36
swap actual and desired in test #105
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: main
Are you sure you want to change the base?
Changes from 3 commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -5,6 +5,8 @@ | |
__pycache__/ | ||
*.nii.gz | ||
*.nii | ||
*.bval | ||
*.bvec | ||
*.dcm | ||
*.mat | ||
*.raw | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,73 @@ | ||
{ | ||
"Gray matter": { | ||
"noise": 0.0005, | ||
"D": 0.81e-3, | ||
"f": 0.044, | ||
"Dp": 84e-3, | ||
"data": [ | ||
1.00021099, | ||
0.9668462, | ||
0.94870507, | ||
0.93594347, | ||
0.92702605, | ||
0.89596427, | ||
0.8748837, | ||
0.85339213, | ||
0.83335988, | ||
0.81319343, | ||
0.74911377, | ||
0.69206656, | ||
0.63748683, | ||
0.58790883, | ||
0.54206431, | ||
0.50051437, | ||
0.46152624 | ||
] | ||
}, | ||
"White matter": { | ||
"noise": 0.0005, | ||
"D": 0.86e-3, | ||
"f": 0.033, | ||
"Dp": 76e-3, | ||
"data": [ | ||
0.99968445, | ||
0.97478753, | ||
0.95818188, | ||
0.94546804, | ||
0.93605795, | ||
0.90321907, | ||
0.88051331, | ||
0.85720661, | ||
0.83480693, | ||
0.81373184, | ||
0.74707338, | ||
0.6854536, | ||
0.62867824, | ||
0.57723924, | ||
0.53090183, | ||
0.48602603, | ||
0.44600491 | ||
] | ||
}, | ||
"config": { | ||
"bvalues": [ | ||
0.0, | ||
10.0, | ||
20.0, | ||
30.0, | ||
40.0, | ||
80.0, | ||
110.0, | ||
140.0, | ||
170.0, | ||
200.0, | ||
300.0, | ||
400.0, | ||
500.0, | ||
600.0, | ||
700.0, | ||
800.0, | ||
900.0 | ||
] | ||
} | ||
} |
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -59,15 +59,16 @@ def data_ivim_fit_saved(): | |
first = False | ||
yield name, bvals, data, algorithm, xfail, kwargs, tolerances, skiptime | ||
|
||
@pytest.mark.parametrize("name, bvals, data, algorithm, xfail, kwargs, tolerances, skiptime", data_ivim_fit_saved()) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Why un-parameterize this? It seems to be doing the right thing, but I'm honestly surprised it's working. Perhaps this was somewhat unnecessary, but I'm not even really following how it's getting the data. For example, There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. The function previously used for parameterization (data_ivim_saved()) was hardcoded to load data from generic.json. To make use of --dataFile in the call to pytest I had to switch to parameterization by pytest_generate_tests, but an alterative would have been to change data_ivim_saved() to also loop over json files. I felt that the fomer would be more less work, but I am open for other solutions. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Ah ok, took me a while but I see now. I do like this approach more, it seems to be more how pytest should work. My concern is that your addition doesn't cover everything the previous function, |
||
def test_ivim_fit_saved(name, bvals, data, algorithm, xfail, kwargs, tolerances,skiptime, request, record_property): | ||
if xfail["xfail"]: | ||
mark = pytest.mark.xfail(reason="xfail", strict=xfail["strict"]) | ||
request.node.add_marker(mark) | ||
#@pytest.mark.parametrize("name, bvals, data, algorithm, xfail, kwargs, tolerances, skiptime", data_ivim_fit_saved()) | ||
#def test_ivim_fit_saved(name, bvals, data, algorithm, xfail, kwargs, tolerances,skiptime, request, record_property): | ||
def test_ivim_fit_saved(ivim_algorithm, ivim_data, record_property): | ||
algorithm, options = ivim_algorithm | ||
name, bvals, data = ivim_data | ||
|
||
signal = signal_helper(data["data"]) | ||
tolerances = tolerances_helper(tolerances, data) | ||
tolerances = tolerances_helper(options.get("tolerances", {}), data) | ||
start_time = time.time() # Record the start time | ||
fit = OsipiBase(algorithm=algorithm, **kwargs) | ||
fit = OsipiBase(algorithm=algorithm) | ||
fit_result = fit.osipi_fit(signal, bvals) | ||
elapsed_time = time.time() - start_time # Calculate elapsed time | ||
def to_list_if_needed(value): | ||
|
@@ -85,14 +86,15 @@ def to_list_if_needed(value): | |
"atol": tolerances["atol"] | ||
} | ||
record_property('test_data', test_result) | ||
npt.assert_allclose(fit_result['f'],data['f'], rtol=tolerances["rtol"]["f"], atol=tolerances["atol"]["f"]) | ||
npt.assert_allclose(data['f'], fit_result['f'], rtol=tolerances["rtol"]["f"], atol=tolerances["atol"]["f"]) | ||
if data['f']<0.80: # we need some signal for D to be detected | ||
npt.assert_allclose(fit_result['D'],data['D'], rtol=tolerances["rtol"]["D"], atol=tolerances["atol"]["D"]) | ||
npt.assert_allclose(data['D'], fit_result['D'], rtol=tolerances["rtol"]["D"], atol=tolerances["atol"]["D"]) | ||
if data['f']>0.03: #we need some f for D* to be interpretable | ||
npt.assert_allclose(fit_result['Dp'],data['Dp'], rtol=tolerances["rtol"]["Dp"], atol=tolerances["atol"]["Dp"]) | ||
npt.assert_allclose(data['Dp'], fit_result['Dp'], rtol=tolerances["rtol"]["Dp"], atol=tolerances["atol"]["Dp"]) | ||
#assert fit_result['D'] < fit_result['Dp'], f"D {fit_result['D']} is larger than D* {fit_result['Dp']} for {name}" | ||
skiptime = False | ||
if not skiptime: | ||
assert elapsed_time < 0.5, f"Algorithm {name} took {elapsed_time} seconds, which is longer than 2 second to fit per voxel" #less than 0.5 seconds per voxel | ||
assert elapsed_time < 0.5, f"Algorithm {algorithm} took {elapsed_time} seconds, which is longer than 2 second to fit per voxel" #less than 0.5 seconds per voxel | ||
|
||
|
||
def algorithms(): | ||
|
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This is fine for me; but all of the osipi code is expecting the download to be in the defined folder destination. For what purpose do you want to put it elsewhere? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Indeed. I just wanted to add some flexibility in case you want to place the data somewhere else and use for other purposes. |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I think the
test-results.xml
should be a different file so it doesn't get ovewritten. Perhaps also thecov
, but maybe that's ok?There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Indeed. I have now changed the file naming of the test report. Not sure if the cov report is used, but if it is it should be the same regardless of data, i.e. no problem that it is overwritten?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Looks like
cov-report
can be extended with:DEST
, so perhaps a different destination file for that 2nd test for that piece as well?https://pytest-cov.readthedocs.io/en/latest/config.html