-
Notifications
You must be signed in to change notification settings - Fork 36
Matlab installation + adding Amita's code to wrapper #103
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from 1 commit
f9161f2
3a5095f
9d407fd
d471105
648a7a6
8ffbeb4
1159bc2
c2e9bd2
aa3c5b0
4e2fd54
799920b
e758974
fc31bc6
4e4a6dc
52d7deb
b7f2f8e
c32bfbd
1d48e6c
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -4,6 +4,7 @@ | |
import csv | ||
# import datetime | ||
|
||
|
||
def pytest_addoption(parser): | ||
parser.addoption( | ||
"--SNR", | ||
|
@@ -87,15 +88,17 @@ def pytest_addoption(parser): | |
help="Run MATLAB-dependent tests" | ||
) | ||
|
||
eng = None | ||
|
||
def pytest_configure(config): | ||
global eng | ||
if config.getoption("--withmatlab"): | ||
import matlab.engine | ||
print("Starting MATLAB engine...") | ||
eng = matlab.engine.start_matlab() | ||
print("MATLAB engine started.") | ||
@pytest.fixture(scope="session") | ||
def eng(request): | ||
"""Start and return a MATLAB engine session if --withmatlab is set.""" | ||
if not request.config.getoption("--withmatlab"): | ||
return None | ||
import matlab.engine | ||
print("Starting MATLAB engine...") | ||
eng = matlab.engine.start_matlab() | ||
print("MATLAB engine started.") | ||
return eng | ||
|
||
|
||
@pytest.fixture(scope="session") | ||
|
@@ -164,37 +167,102 @@ def use_prior(request): | |
def pytest_generate_tests(metafunc): | ||
if "SNR" in metafunc.fixturenames: | ||
metafunc.parametrize("SNR", metafunc.config.getoption("SNR")) | ||
if "ivim_algorithm" in metafunc.fixturenames: | ||
algorithms = algorithm_list(metafunc.config.getoption("algorithmFile"), metafunc.config.getoption("selectAlgorithm"), metafunc.config.getoption("dropAlgorithm")) | ||
metafunc.parametrize("ivim_algorithm", algorithms) | ||
if "ivim_data" in metafunc.fixturenames: | ||
data = data_list(metafunc.config.getoption("dataFile")) | ||
metafunc.parametrize("ivim_data", data) | ||
if "data_ivim_fit_saved" in metafunc.fixturenames: | ||
args = data_ivim_fit_saved(metafunc.config.getoption("dataFile"),metafunc.config.getoption("algorithmFile")) | ||
metafunc.parametrize("data_ivim_fit_saved", args) | ||
if "algorithmlist" in metafunc.fixturenames: | ||
args = algorithmlist(metafunc.config.getoption("algorithmFile")) | ||
metafunc.parametrize("algorithmlist", args) | ||
if "bound_input" in metafunc.fixturenames: | ||
args = bound_input(metafunc.config.getoption("dataFile"),metafunc.config.getoption("algorithmFile")) | ||
metafunc.parametrize("bound_input", args) | ||
|
||
|
||
def data_list(filename): | ||
current_folder = pathlib.Path.cwd() | ||
data_path = current_folder / filename | ||
with data_path.open() as f: | ||
all_data = json.load(f) | ||
|
||
bvals = all_data.pop('config') | ||
bvals = bvals['bvalues'] | ||
for name, data in all_data.items(): | ||
yield name, bvals, data | ||
|
||
|
||
def data_ivim_fit_saved(datafile, algorithmFile): | ||
# Find the algorithms from algorithms.json | ||
current_folder = pathlib.Path.cwd() | ||
algorithm_path = current_folder / algorithmFile | ||
with algorithm_path.open() as f: | ||
algorithm_information = json.load(f) | ||
# Load generic test data generated from the included phantom: phantoms/MR_XCAT_qMRI | ||
generic = current_folder / datafile | ||
with generic.open() as f: | ||
all_data = json.load(f) | ||
algorithms = algorithm_information["algorithms"] | ||
bvals = all_data.pop('config') | ||
bvals = bvals['bvalues'] | ||
for algorithm in algorithms: | ||
first = True | ||
for name, data in all_data.items(): | ||
algorithm_dict = algorithm_information.get(algorithm, {}) | ||
xfail = {"xfail": name in algorithm_dict.get("xfail_names", {}), | ||
"strict": algorithm_dict.get("xfail_names", {}).get(name, True)} | ||
kwargs = algorithm_dict.get("options", {}) | ||
tolerances = algorithm_dict.get("tolerances", {}) | ||
skiptime=False | ||
if first == True: | ||
if algorithm_dict.get("fail_first_time", {}) == True: | ||
skiptime = True | ||
first = False | ||
if algorithm_dict.get("requires_matlab", False) == True: | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. No need to There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Yes, similar as previous one. Found this in several locations. Fixed it now |
||
requires_matlab = True | ||
else: | ||
requires_matlab = False | ||
yield name, bvals, data, algorithm, xfail, kwargs, tolerances, skiptime, requires_matlab | ||
|
||
def algorithm_list(filename, selected, dropped): | ||
def algorithmlist(algorithmFile): | ||
# Find the algorithms from algorithms.json | ||
current_folder = pathlib.Path.cwd() | ||
algorithm_path = current_folder / filename | ||
algorithm_path = current_folder / algorithmFile | ||
with algorithm_path.open() as f: | ||
algorithm_information = json.load(f) | ||
algorithms = set(algorithm_information["algorithms"]) | ||
|
||
algorithms = algorithm_information["algorithms"] | ||
for algorithm in algorithms: | ||
algorithm_dict = algorithm_information.get(algorithm, {}) | ||
if algorithm_dict.get("requieres_matlab", {}) == True: | ||
if eng is None: | ||
algorithms = algorithms - set(algorithm) | ||
algorithms = algorithms - set(dropped) | ||
if len(selected) > 0 and selected[0]: | ||
algorithms = algorithms & set(selected) | ||
return list(algorithms) | ||
if algorithm_dict.get("requires_matlab", False) == True: | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. No need for There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. fixed at multiple location |
||
requires_matlab = True | ||
else: | ||
requires_matlab = False | ||
yield algorithm, requires_matlab | ||
|
||
def data_list(filename): | ||
def bound_input(datafile,algorithmFile): | ||
# Find the algorithms from algorithms.json | ||
current_folder = pathlib.Path.cwd() | ||
data_path = current_folder / filename | ||
with data_path.open() as f: | ||
algorithm_path = current_folder / algorithmFile | ||
with algorithm_path.open() as f: | ||
algorithm_information = json.load(f) | ||
# Load generic test data generated from the included phantom: phantoms/MR_XCAT_qMRI | ||
generic = current_folder / datafile | ||
with generic.open() as f: | ||
all_data = json.load(f) | ||
|
||
algorithms = algorithm_information["algorithms"] | ||
bvals = all_data.pop('config') | ||
bvals = bvals['bvalues'] | ||
for name, data in all_data.items(): | ||
yield name, bvals, data | ||
for algorithm in algorithms: | ||
algorithm_dict = algorithm_information.get(algorithm, {}) | ||
xfail = {"xfail": name in algorithm_dict.get("xfail_names", {}), | ||
"strict": algorithm_dict.get("xfail_names", {}).get(name, True)} | ||
kwargs = algorithm_dict.get("options", {}) | ||
tolerances = algorithm_dict.get("tolerances", {}) | ||
if algorithm_dict.get("requires_matlab", False) == True: | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Similarly There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. fixed |
||
requires_matlab = True | ||
else: | ||
requires_matlab = False | ||
yield name, bvals, data, algorithm, xfail, kwargs, tolerances, requires_matlab |
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -5,7 +5,6 @@ | |
import pathlib | ||
import time | ||
from src.wrappers.OsipiBase import OsipiBase | ||
from conftest import eng | ||
#run using python -m pytest from the root folder | ||
|
||
|
||
|
@@ -31,50 +30,24 @@ def tolerances_helper(tolerances, data): | |
tolerances["atol"] = tolerances.get("atol", {"f": 2e-1, "D": 5e-4, "Dp": 4e-2}) | ||
return tolerances | ||
|
||
def data_ivim_fit_saved(): | ||
# Find the algorithms from algorithms.json | ||
file = pathlib.Path(__file__) | ||
algorithm_path = file.with_name('algorithms.json') | ||
with algorithm_path.open() as f: | ||
algorithm_information = json.load(f) | ||
|
||
# Load generic test data generated from the included phantom: phantoms/MR_XCAT_qMRI | ||
generic = file.with_name('generic.json') | ||
with generic.open() as f: | ||
all_data = json.load(f) | ||
|
||
algorithms = algorithm_information["algorithms"] | ||
bvals = all_data.pop('config') | ||
bvals = bvals['bvalues'] | ||
for algorithm in algorithms: | ||
first = True | ||
for name, data in all_data.items(): | ||
algorithm_dict = algorithm_information.get(algorithm, {}) | ||
xfail = {"xfail": name in algorithm_dict.get("xfail_names", {}), | ||
"strict": algorithm_dict.get("xfail_names", {}).get(name, True)} | ||
kwargs = algorithm_dict.get("options", {}) | ||
tolerances = algorithm_dict.get("tolerances", {}) | ||
skiptime=False | ||
if first == True: | ||
if algorithm_dict.get("fail_first_time", {}) == True: | ||
skiptime = True | ||
first = False | ||
if algorithm_dict.get("requieres_matlab", {}) == True: | ||
if eng is None: | ||
continue | ||
else: | ||
kwargs={**kwargs,'eng': eng} | ||
yield name, bvals, data, algorithm, xfail, kwargs, tolerances, skiptime | ||
|
||
@pytest.mark.parametrize("name, bvals, data, algorithm, xfail, kwargs, tolerances, skiptime", data_ivim_fit_saved()) | ||
def test_ivim_fit_saved(name, bvals, data, algorithm, xfail, kwargs, tolerances,skiptime, request, record_property): | ||
def test_ivim_fit_saved(data_ivim_fit_saved, eng, request, record_property): | ||
name, bvals, data, algorithm, xfail, kwargs, tolerances, skiptime, requires_matlab = data_ivim_fit_saved | ||
max_time = 0.5 | ||
if requires_matlab: | ||
max_time = 2 | ||
if eng is None: | ||
print('test is here') | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Print still needed? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. fixed |
||
pytest.skip(reason="Running without matlab; if Matlab is available please run pytest --withmatlab") | ||
else: | ||
print('test is not here') | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. And print here? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. fixed |
||
kwargs = {**kwargs, 'eng': eng} | ||
if xfail["xfail"]: | ||
mark = pytest.mark.xfail(reason="xfail", strict=xfail["strict"]) | ||
request.node.add_marker(mark) | ||
signal = signal_helper(data["data"]) | ||
tolerances = tolerances_helper(tolerances, data) | ||
start_time = time.time() # Record the start time | ||
fit = OsipiBase(algorithm=algorithm, **kwargs) | ||
start_time = time.time() # Record the start time | ||
fit_result = fit.osipi_fit(signal, bvals) | ||
elapsed_time = time.time() - start_time # Calculate elapsed time | ||
def to_list_if_needed(value): | ||
|
@@ -99,29 +72,18 @@ def to_list_if_needed(value): | |
npt.assert_allclose(fit_result['Dp'],data['Dp'], rtol=tolerances["rtol"]["Dp"], atol=tolerances["atol"]["Dp"]) | ||
#assert fit_result['D'] < fit_result['Dp'], f"D {fit_result['D']} is larger than D* {fit_result['Dp']} for {name}" | ||
if not skiptime: | ||
assert elapsed_time < 0.5, f"Algorithm {name} took {elapsed_time} seconds, which is longer than 2 second to fit per voxel" #less than 0.5 seconds per voxel | ||
|
||
|
||
def algorithmlist(): | ||
# Find the algorithms from algorithms.json | ||
file = pathlib.Path(__file__) | ||
algorithm_path = file.with_name('algorithms.json') | ||
with algorithm_path.open() as f: | ||
algorithm_information = json.load(f) | ||
algorithms = algorithm_information["algorithms"] | ||
for algorithm in algorithms: | ||
algorithm_dict = algorithm_information.get(algorithm, {}) | ||
args={} | ||
if algorithm_dict.get("requieres_matlab", {}) == True: | ||
if eng is None: | ||
continue | ||
else: | ||
args = {**args, 'eng': eng} | ||
yield algorithm, args | ||
|
||
@pytest.mark.parametrize("algorithm, args", algorithmlist()) | ||
def test_default_bounds_and_initial_guesses(algorithm, args): | ||
fit = OsipiBase(algorithm=algorithm,**args) | ||
assert elapsed_time < max_time, f"Algorithm {name} took {elapsed_time} seconds, which is longer than 2 second to fit per voxel" #less than 0.5 seconds per voxel | ||
|
||
def test_default_bounds_and_initial_guesses(algorithmlist,eng): | ||
algorithm, requires_matlab = algorithmlist | ||
if requires_matlab: | ||
if eng is None: | ||
pytest.skip(reason="Running without matlab; if Matlab is available please run pytest --withmatlab") | ||
else: | ||
kwargs = {'eng': eng} | ||
else: | ||
kwargs={} | ||
fit = OsipiBase(algorithm=algorithm,**kwargs) | ||
#assert fit.bounds is not None, f"For {algorithm}, there is no default fit boundary" | ||
#assert fit.initial_guess is not None, f"For {algorithm}, there is no default fit initial guess" | ||
if fit.use_bounds: | ||
|
@@ -141,38 +103,13 @@ def test_default_bounds_and_initial_guesses(algorithm, args): | |
assert 0.9 <= fit.initial_guess[3] <= 1.1, f"For {algorithm}, the default initial guess for S {fit.initial_guess[3]} is unrealistic; note signal is normalized" | ||
|
||
|
||
def bound_input(): | ||
# Find the algorithms from algorithms.json | ||
file = pathlib.Path(__file__) | ||
algorithm_path = file.with_name('algorithms.json') | ||
with algorithm_path.open() as f: | ||
algorithm_information = json.load(f) | ||
|
||
# Load generic test data generated from the included phantom: phantoms/MR_XCAT_qMRI | ||
generic = file.with_name('generic.json') | ||
with generic.open() as f: | ||
all_data = json.load(f) | ||
|
||
algorithms = algorithm_information["algorithms"] | ||
bvals = all_data.pop('config') | ||
bvals = bvals['bvalues'] | ||
for name, data in all_data.items(): | ||
for algorithm in algorithms: | ||
algorithm_dict = algorithm_information.get(algorithm, {}) | ||
xfail = {"xfail": name in algorithm_dict.get("xfail_names", {}), | ||
"strict": algorithm_dict.get("xfail_names", {}).get(name, True)} | ||
kwargs = algorithm_dict.get("options", {}) | ||
tolerances = algorithm_dict.get("tolerances", {}) | ||
if algorithm_dict.get("requieres_matlab", {}) == True: | ||
if eng is None: | ||
continue | ||
else: | ||
kwargs = {**kwargs, 'eng': eng} | ||
yield name, bvals, data, algorithm, xfail, kwargs, tolerances | ||
|
||
|
||
@pytest.mark.parametrize("name, bvals, data, algorithm, xfail, kwargs, tolerances", bound_input()) | ||
def test_bounds(name, bvals, data, algorithm, xfail, kwargs, tolerances, request): | ||
def test_bounds(bound_input, eng): | ||
name, bvals, data, algorithm, xfail, kwargs, tolerances, requires_matlab = bound_input | ||
if requires_matlab: | ||
if eng is None: | ||
pytest.skip(reason="Running without matlab; if Matlab is available please run pytest --withmatlab") | ||
else: | ||
kwargs = {**kwargs, 'eng': eng} | ||
bounds = ([0.0008, 0.2, 0.01, 1.1], [0.0012, 0.3, 0.02, 1.3]) | ||
# deliberately have silly bounds to see whether they are used | ||
fit = OsipiBase(algorithm=algorithm, bounds=bounds, initial_guess = [0.001, 0.25, 0.015, 1.2], **kwargs) | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -14,8 +14,12 @@ | |
#run using pytest <path_to_this_file> --saveFileName test_output.txt --SNR 50 100 200 | ||
#e.g. pytest -m slow tests/IVIMmodels/unit_tests/test_ivim_synthetic.py --saveFileName test_output.csv --SNR 10 50 100 200 --fitCount 20 | ||
@pytest.mark.slow | ||
def test_generated(ivim_algorithm, ivim_data, SNR, rtol, atol, fit_count, rician_noise, save_file, save_duration_file, use_prior): | ||
def test_generated(algorithmlist, skip_list, ivim_data, SNR, rtol, atol, fit_count, rician_noise, save_file, save_duration_file, use_prior): | ||
# assert save_file == "test" | ||
ivim_algorithm, requires_matlab = algorithmlist | ||
if requires_matlab: | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Can be simplified to There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. thanks! |
||
if eng is None: | ||
pytest.skip(reason="Running without matlab; if Matlab is available please run pytest --withmatlab") | ||
rng = np.random.RandomState(42) | ||
# random.seed(42) | ||
S0 = 1 | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Here's another case of comparing an empty dict to
True
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Fixed