From 6de026bd8fe340483231e7d7628ba901442a1d06 Mon Sep 17 00:00:00 2001 From: dominikwelke Date: Wed, 26 Mar 2025 20:28:08 +0000 Subject: [PATCH 01/27] initial commit --- mne/io/curry/curry.py | 692 +++++++----------------------------- mne/io/curry/curryreader.py | 629 ++++++++++++++++++++++++++++++++ 2 files changed, 762 insertions(+), 559 deletions(-) create mode 100644 mne/io/curry/curryreader.py diff --git a/mne/io/curry/curry.py b/mne/io/curry/curry.py index 3e8347fba0d..dda983e5a51 100644 --- a/mne/io/curry/curry.py +++ b/mne/io/curry/curry.py @@ -3,539 +3,19 @@ # License: BSD-3-Clause # Copyright the MNE-Python contributors. -import os.path as op import re -from collections import namedtuple -from datetime import datetime, timezone from pathlib import Path import numpy as np -from ..._fiff._digitization import _make_dig_points -from ..._fiff.constants import FIFF from ..._fiff.meas_info import create_info -from ..._fiff.tag import _coil_trans_to_loc -from ..._fiff.utils import _mult_cal_one, _read_segments_file -from ...annotations import Annotations -from ...surface import _normal_orth -from ...transforms import ( - Transform, - _angle_between_quats, - apply_trans, - combine_transforms, - get_ras_to_neuromag_trans, - invert_transform, - rot_to_quat, -) -from ...utils import _check_fname, check_fname, logger, verbose +from ...annotations import annotations_from_events +from ...channels import make_dig_montage +from ...utils import verbose from ..base import BaseRaw -from ..ctf.trans import _quaternion_align - -FILE_EXTENSIONS = { - "Curry 7": { - "info": ".dap", - "data": ".dat", - "labels": ".rs3", - "events_cef": ".cef", - "events_ceo": ".ceo", - "hpi": ".hpi", - }, - "Curry 8": { - "info": ".cdt.dpa", - "data": ".cdt", - "labels": ".cdt.dpa", - "events_cef": ".cdt.cef", - "events_ceo": ".cdt.ceo", - "hpi": ".cdt.hpi", - }, -} -CHANTYPES = {"meg": "_MAG1", "eeg": "", "misc": "_OTHERS"} -FIFFV_CHANTYPES = { - "meg": FIFF.FIFFV_MEG_CH, - "eeg": FIFF.FIFFV_EEG_CH, - "misc": FIFF.FIFFV_MISC_CH, -} -FIFFV_COILTYPES = { - "meg": FIFF.FIFFV_COIL_CTF_GRAD, - "eeg": FIFF.FIFFV_COIL_EEG, - "misc": FIFF.FIFFV_COIL_NONE, -} -SI_UNITS = dict(V=FIFF.FIFF_UNIT_V, T=FIFF.FIFF_UNIT_T) -SI_UNIT_SCALE = dict(c=1e-2, m=1e-3, u=1e-6, µ=1e-6, n=1e-9, p=1e-12, f=1e-15) - -CurryParameters = namedtuple( - "CurryParameters", - "n_samples, sfreq, is_ascii, unit_dict, n_chans, dt_start, chanidx_in_file", -) - - -def _get_curry_version(file_extension): - """Check out the curry file version.""" - return "Curry 8" if "cdt" in file_extension else "Curry 7" - - -def _get_curry_file_structure(fname, required=()): - """Store paths to a dict and check for required files.""" - _msg = ( - "The following required files cannot be found: {0}.\nPlease make " - "sure all required files are located in the same directory as {1}." - ) - fname = Path(_check_fname(fname, "read", True, "fname")) - - # we don't use os.path.splitext to also handle extensions like .cdt.dpa - # this won't handle a dot in the filename, but it should handle it in - # the parent directories - fname_base = fname.name.split(".", maxsplit=1)[0] - ext = fname.name[len(fname_base) :] - fname_base = str(fname) - fname_base = fname_base[: len(fname_base) - len(ext)] - del fname - version = _get_curry_version(ext) - my_curry = dict() - for key in ("info", "data", "labels", "events_cef", "events_ceo", "hpi"): - fname = fname_base + FILE_EXTENSIONS[version][key] - if op.isfile(fname): - _key = "events" if key.startswith("events") else key - my_curry[_key] = fname - - missing = [field for field in required if field not in my_curry] - if missing: - raise FileNotFoundError(_msg.format(np.unique(missing), fname)) - - return my_curry - - -def _read_curry_lines(fname, regex_list): - """Read through the lines of a curry parameter files and save data. - - Parameters - ---------- - fname : path-like - Path to a curry file. - regex_list : list of str - A list of strings or regular expressions to search within the file. - Each element `regex` in `regex_list` must be formulated so that - `regex + " START_LIST"` initiates the start and `regex + " END_LIST"` - initiates the end of the elements that should be saved. - - Returns - ------- - data_dict : dict - A dictionary containing the extracted data. For each element `regex` - in `regex_list` a dictionary key `data_dict[regex]` is created, which - contains a list of the according data. - - """ - save_lines = {} - data_dict = {} - - for regex in regex_list: - save_lines[regex] = False - data_dict[regex] = [] - - with open(fname) as fid: - for line in fid: - for regex in regex_list: - if re.match(regex + " END_LIST", line): - save_lines[regex] = False - - if save_lines[regex] and line != "\n": - result = line.replace("\n", "") - if "\t" in result: - result = result.split("\t") - data_dict[regex].append(result) - - if re.match(regex + " START_LIST", line): - save_lines[regex] = True - - return data_dict - - -def _read_curry_parameters(fname): - """Extract Curry params from a Curry info file.""" - _msg_match = ( - "The sampling frequency and the time steps extracted from " - "the parameter file do not match." - ) - _msg_invalid = "sfreq must be greater than 0. Got sfreq = {0}" - - var_names = [ - "NumSamples", - "SampleFreqHz", - "DataFormat", - "SampleTimeUsec", - "NumChannels", - "StartYear", - "StartMonth", - "StartDay", - "StartHour", - "StartMin", - "StartSec", - "StartMillisec", - "NUM_SAMPLES", - "SAMPLE_FREQ_HZ", - "DATA_FORMAT", - "SAMPLE_TIME_USEC", - "NUM_CHANNELS", - "START_YEAR", - "START_MONTH", - "START_DAY", - "START_HOUR", - "START_MIN", - "START_SEC", - "START_MILLISEC", - ] - - param_dict = dict() - unit_dict = dict() - - with open(fname) as fid: - for line in iter(fid): - if any(var_name in line for var_name in var_names): - key, val = line.replace(" ", "").replace("\n", "").split("=") - param_dict[key.lower().replace("_", "")] = val - for key, type_ in CHANTYPES.items(): - if f"DEVICE_PARAMETERS{type_} START" in line: - data_unit = next(fid) - unit_dict[key] = ( - data_unit.replace(" ", "").replace("\n", "").split("=")[-1] - ) - - # look for CHAN_IN_FILE sections, which may or may not exist; issue #8391 - types = ["meg", "eeg", "misc"] - chanidx_in_file = _read_curry_lines( - fname, ["CHAN_IN_FILE" + CHANTYPES[key] for key in types] - ) - - n_samples = int(param_dict["numsamples"]) - sfreq = float(param_dict["samplefreqhz"]) - time_step = float(param_dict["sampletimeusec"]) * 1e-6 - is_ascii = param_dict["dataformat"] == "ASCII" - n_channels = int(param_dict["numchannels"]) - try: - dt_start = datetime( - int(param_dict["startyear"]), - int(param_dict["startmonth"]), - int(param_dict["startday"]), - int(param_dict["starthour"]), - int(param_dict["startmin"]), - int(param_dict["startsec"]), - int(param_dict["startmillisec"]) * 1000, - timezone.utc, - ) - # Note that the time zone information is not stored in the Curry info - # file, and it seems the start time info is in the local timezone - # of the acquisition system (which is unknown); therefore, just set - # the timezone to be UTC. If the user knows otherwise, they can - # change it later. (Some Curry files might include StartOffsetUTCMin, - # but its presence is unpredictable, so we won't rely on it.) - except (ValueError, KeyError): - dt_start = None # if missing keywords or illegal values, don't set - - if time_step == 0: - true_sfreq = sfreq - elif sfreq == 0: - true_sfreq = 1 / time_step - elif not np.isclose(sfreq, 1 / time_step): - raise ValueError(_msg_match) - else: # they're equal and != 0 - true_sfreq = sfreq - if true_sfreq <= 0: - raise ValueError(_msg_invalid.format(true_sfreq)) - - return CurryParameters( - n_samples, - true_sfreq, - is_ascii, - unit_dict, - n_channels, - dt_start, - chanidx_in_file, - ) - - -def _read_curry_info(curry_paths): - """Extract info from curry parameter files.""" - curry_params = _read_curry_parameters(curry_paths["info"]) - R = np.eye(4) - R[[0, 1], [0, 1]] = -1 # rotate 180 deg - # shift down and back - # (chosen by eyeballing to make the CTF helmet look roughly correct) - R[:3, 3] = [0.0, -0.015, -0.12] - curry_dev_dev_t = Transform("ctf_meg", "meg", R) - - # read labels from label files - label_fname = curry_paths["labels"] - types = ["meg", "eeg", "misc"] - labels = _read_curry_lines( - label_fname, ["LABELS" + CHANTYPES[key] for key in types] - ) - sensors = _read_curry_lines( - label_fname, ["SENSORS" + CHANTYPES[key] for key in types] - ) - normals = _read_curry_lines( - label_fname, ["NORMALS" + CHANTYPES[key] for key in types] - ) - assert len(labels) == len(sensors) == len(normals) - - all_chans = list() - dig_ch_pos = dict() - for key in ["meg", "eeg", "misc"]: - chanidx_is_explicit = ( - len(curry_params.chanidx_in_file["CHAN_IN_FILE" + CHANTYPES[key]]) > 0 - ) # channel index - # position in the datafile may or may not be explicitly declared, - # based on the CHAN_IN_FILE section in info file - for ind, chan in enumerate(labels["LABELS" + CHANTYPES[key]]): - chanidx = len(all_chans) + 1 # by default, just assume the - # channel index in the datafile is in order of the channel - # names as we found them in the labels file - if chanidx_is_explicit: # but, if explicitly declared, use - # that index number - chanidx = int( - curry_params.chanidx_in_file["CHAN_IN_FILE" + CHANTYPES[key]][ind] - ) - if chanidx <= 0: # if chanidx was explicitly declared to be ' 0', - # it means the channel is not actually saved in the data file - # (e.g. the "Ref" channel), so don't add it to our list. - # Git issue #8391 - continue - ch = { - "ch_name": chan, - "unit": curry_params.unit_dict[key], - "kind": FIFFV_CHANTYPES[key], - "coil_type": FIFFV_COILTYPES[key], - "ch_idx": chanidx, - } - if key == "eeg": - loc = np.array(sensors["SENSORS" + CHANTYPES[key]][ind], float) - # XXX just the sensor, where is ref (next 3)? - assert loc.shape == (3,) - loc /= 1000.0 # to meters - loc = np.concatenate([loc, np.zeros(9)]) - ch["loc"] = loc - # XXX need to check/ensure this - ch["coord_frame"] = FIFF.FIFFV_COORD_HEAD - dig_ch_pos[chan] = loc[:3] - elif key == "meg": - pos = np.array(sensors["SENSORS" + CHANTYPES[key]][ind], float) - pos /= 1000.0 # to meters - pos = pos[:3] # just the inner coil - pos = apply_trans(curry_dev_dev_t, pos) - nn = np.array(normals["NORMALS" + CHANTYPES[key]][ind], float) - assert np.isclose(np.linalg.norm(nn), 1.0, atol=1e-4) - nn /= np.linalg.norm(nn) - nn = apply_trans(curry_dev_dev_t, nn, move=False) - trans = np.eye(4) - trans[:3, 3] = pos - trans[:3, :3] = _normal_orth(nn).T - ch["loc"] = _coil_trans_to_loc(trans) - ch["coord_frame"] = FIFF.FIFFV_COORD_DEVICE - all_chans.append(ch) - dig = _make_dig_points( - dig_ch_pos=dig_ch_pos, coord_frame="head", add_missing_fiducials=True - ) - del dig_ch_pos - - ch_count = len(all_chans) - assert ch_count == curry_params.n_chans # ensure that we have assembled - # the same number of channels as declared in the info (.DAP) file in the - # DATA_PARAMETERS section. Git issue #8391 - - # sort the channels to assure they are in the order that matches how - # recorded in the datafile. In general they most likely are already in - # the correct order, but if the channel index in the data file was - # explicitly declared we might as well use it. - all_chans = sorted(all_chans, key=lambda ch: ch["ch_idx"]) - - ch_names = [chan["ch_name"] for chan in all_chans] - info = create_info(ch_names, curry_params.sfreq) - with info._unlock(): - info["meas_date"] = curry_params.dt_start # for Git issue #8398 - info["dig"] = dig - _make_trans_dig(curry_paths, info, curry_dev_dev_t) - - for ind, ch_dict in enumerate(info["chs"]): - all_chans[ind].pop("ch_idx") - ch_dict.update(all_chans[ind]) - assert ch_dict["loc"].shape == (12,) - ch_dict["unit"] = SI_UNITS[all_chans[ind]["unit"][1]] - ch_dict["cal"] = SI_UNIT_SCALE[all_chans[ind]["unit"][0]] - - return info, curry_params.n_samples, curry_params.is_ascii - - -_card_dict = { - "Left ear": FIFF.FIFFV_POINT_LPA, - "Nasion": FIFF.FIFFV_POINT_NASION, - "Right ear": FIFF.FIFFV_POINT_RPA, -} - - -def _make_trans_dig(curry_paths, info, curry_dev_dev_t): - # Coordinate frame transformations and definitions - no_msg = "Leaving device<->head transform as None" - info["dev_head_t"] = None - label_fname = curry_paths["labels"] - key = "LANDMARKS" + CHANTYPES["meg"] - lm = _read_curry_lines(label_fname, [key])[key] - lm = np.array(lm, float) - lm.shape = (-1, 3) - if len(lm) == 0: - # no dig - logger.info(no_msg + " (no landmarks found)") - return - lm /= 1000.0 - key = "LM_REMARKS" + CHANTYPES["meg"] - remarks = _read_curry_lines(label_fname, [key])[key] - assert len(remarks) == len(lm) - with info._unlock(): - info["dig"] = list() - cards = dict() - for remark, r in zip(remarks, lm): - kind = ident = None - if remark in _card_dict: - kind = FIFF.FIFFV_POINT_CARDINAL - ident = _card_dict[remark] - cards[ident] = r - elif remark.startswith("HPI"): - kind = FIFF.FIFFV_POINT_HPI - ident = int(remark[3:]) - 1 - if kind is not None: - info["dig"].append( - dict(kind=kind, ident=ident, r=r, coord_frame=FIFF.FIFFV_COORD_UNKNOWN) - ) - with info._unlock(): - info["dig"].sort(key=lambda x: (x["kind"], x["ident"])) - has_cards = len(cards) == 3 - has_hpi = "hpi" in curry_paths - if has_cards and has_hpi: # have all three - logger.info("Composing device<->head transformation from dig points") - hpi_u = np.array( - [d["r"] for d in info["dig"] if d["kind"] == FIFF.FIFFV_POINT_HPI], float - ) - hpi_c = np.ascontiguousarray(_first_hpi(curry_paths["hpi"])[: len(hpi_u), 1:4]) - unknown_curry_t = _quaternion_align("unknown", "ctf_meg", hpi_u, hpi_c, 1e-2) - angle = np.rad2deg( - _angle_between_quats( - np.zeros(3), rot_to_quat(unknown_curry_t["trans"][:3, :3]) - ) - ) - dist = 1000 * np.linalg.norm(unknown_curry_t["trans"][:3, 3]) - logger.info(f" Fit a {angle:0.1f}° rotation, {dist:0.1f} mm translation") - unknown_dev_t = combine_transforms( - unknown_curry_t, curry_dev_dev_t, "unknown", "meg" - ) - unknown_head_t = Transform( - "unknown", - "head", - get_ras_to_neuromag_trans( - *( - cards[key] - for key in ( - FIFF.FIFFV_POINT_NASION, - FIFF.FIFFV_POINT_LPA, - FIFF.FIFFV_POINT_RPA, - ) - ) - ), - ) - with info._unlock(): - info["dev_head_t"] = combine_transforms( - invert_transform(unknown_dev_t), unknown_head_t, "meg", "head" - ) - for d in info["dig"]: - d.update( - coord_frame=FIFF.FIFFV_COORD_HEAD, - r=apply_trans(unknown_head_t, d["r"]), - ) - else: - if has_cards: - no_msg += " (no .hpi file found)" - elif has_hpi: - no_msg += " (not all cardinal points found)" - else: - no_msg += " (neither cardinal points nor .hpi file found)" - logger.info(no_msg) - - -def _first_hpi(fname): - # Get the first HPI result - with open(fname) as fid: - for line in fid: - line = line.strip() - if any(x in line for x in ("FileVersion", "NumCoils")) or not line: - continue - hpi = np.array(line.split(), float) - break - else: - raise RuntimeError(f"Could not find valid HPI in {fname}") - # t is the first entry - assert hpi.ndim == 1 - hpi = hpi[1:] - hpi.shape = (-1, 5) - hpi /= 1000.0 - return hpi +from .curryreader import read - -def _read_events_curry(fname): - """Read events from Curry event files. - - Parameters - ---------- - fname : path-like - Path to a curry event file with extensions .cef, .ceo, - .cdt.cef, or .cdt.ceo - - Returns - ------- - events : ndarray, shape (n_events, 3) - The array of events. - """ - check_fname( - fname, - "curry event", - (".cef", ".ceo", ".cdt.cef", ".cdt.ceo"), - endings_err=(".cef", ".ceo", ".cdt.cef", ".cdt.ceo"), - ) - - events_dict = _read_curry_lines(fname, ["NUMBER_LIST"]) - # The first 3 column seem to contain the event information - curry_events = np.array(events_dict["NUMBER_LIST"], dtype=int)[:, 0:3] - - return curry_events - - -def _read_annotations_curry(fname, sfreq="auto"): - r"""Read events from Curry event files. - - Parameters - ---------- - fname : str - The filename. - sfreq : float | 'auto' - The sampling frequency in the file. If set to 'auto' then the - ``sfreq`` is taken from the respective info file of the same name with - according file extension (\*.dap for Curry 7; \*.cdt.dpa for Curry8). - So data.cef looks in data.dap and data.cdt.cef looks in data.cdt.dpa. - - Returns - ------- - annot : instance of Annotations | None - The annotations. - """ - required = ["events", "info"] if sfreq == "auto" else ["events"] - curry_paths = _get_curry_file_structure(fname, required) - events = _read_events_curry(curry_paths["events"]) - - if sfreq == "auto": - sfreq = _read_curry_parameters(curry_paths["info"]).sfreq - - onset = events[:, 0] / sfreq - duration = np.zeros(events.shape[0]) - description = events[:, 2] - - return Annotations(onset, duration, description) +_RE_COMBINE_WHITESPACE = re.compile(r"\s+") @verbose @@ -582,50 +62,144 @@ class RawCurry(BaseRaw): @verbose def __init__(self, fname, preload=False, verbose=None): - curry_paths = _get_curry_file_structure( - fname, required=["info", "data", "labels"] + fname = Path(fname) + + # use curry-python-reader + try: + currydata = read(str(fname), plotdata=0, verbosity=1) + except Exception as e: + raise ValueError(f"file could not be read - {e}") + + # extract info + sfreq = currydata["info"]["samplingfreq"] + n_samples = currydata["info"]["samples"] + n_ch = currydata["info"]["channels"] + ch_names = currydata["labels"] + ch_pos = currydata["sensorpos"] + landmarks = currydata["landmarks"] + landmarkslabels = currydata["landmarkslabels"] + + # extract data + orig_format = ( + "single" + if isinstance(currydata["data"].dtype, type(np.dtype("float32"))) + else None ) + preload = currydata["data"].T.astype( + "float64" + ) # curryreader returns float32, but mne seems to need float64 + events = currydata["events"] + # annotations = currydata[ + # "annotations" + # ] # dont always seem to correspond to events?! + # epochinfo = currydata["epochinfo"] # TODO + # epochlabels = currydata["epochlabels"] # TODO + # impedances = currydata["impedances"] # TODO + # hpimatrix = currydata["hpimatrix"] # TODO + + # extract some more essential info not provided by reader + fname_hdr = None + for hdr_suff in [".cdt.dpa", ".cdt.dpo", ".dap"]: + if fname.with_suffix(hdr_suff).exists(): + fname_hdr = fname.with_suffix(hdr_suff) + + ch_types, units = [], [] + if fname_hdr: + changroups = fname_hdr.read_text().split("DEVICE_PARAMETERS")[1::2] + for changroup_info in changroups: + changroup_info = _RE_COMBINE_WHITESPACE.sub(" ", changroup_info).strip() + groupid = changroup_info.split()[0] + unit = changroup_info.split("DataUnit = ")[1].split()[0] + n_ch_group = int( + changroup_info.split("NumChanThisGroup = ")[1].split()[0] + ) + ch_type = ( + "mag" + if ("MAG" in groupid) + else "misc" + if ("OTHER") in groupid + else "eeg" + ) + # combine info + ch_types += [ch_type] * n_ch_group + units += [unit] * n_ch_group - data_fname = op.abspath(curry_paths["data"]) - - info, n_samples, is_ascii = _read_curry_info(curry_paths) + assert len(ch_types) == len(units) == len(ch_names) == n_ch + else: + # not implemented + raise NotImplementedError + + # finetune channel types (e.g. stim, eog etc might be identified by name) + # TODO + + # scale data to SI units + orig_units = dict(zip(ch_names, units)) + for i_ch, unit in enumerate(units): + if unit == "fT": # femtoTesla + preload[i_ch, :] /= 1e15 + elif unit == "uV": # microVolt + preload[i_ch, :] /= 1e6 + else: # leave as is + pass + + # construct info + info = create_info(ch_names=ch_names, sfreq=sfreq, ch_types=ch_types) last_samps = [n_samples - 1] - raw_extras = dict(is_ascii=is_ascii) + # create raw object super().__init__( info, preload, - filenames=[data_fname], + filenames=[fname], last_samps=last_samps, - orig_format="int", - raw_extras=[raw_extras], + orig_format=orig_format, + orig_units=orig_units, verbose=verbose, ) - if "events" in curry_paths: - logger.info( - "Event file found. Extracting Annotations from " - f"{curry_paths['events']}..." - ) - annots = _read_annotations_curry( - curry_paths["events"], sfreq=self.info["sfreq"] - ) - self.set_annotations(annots) - else: - logger.info("Event file not found. No Annotations set.") - - def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): - """Read a chunk of raw data.""" - if self._raw_extras[fi]["is_ascii"]: - if isinstance(idx, slice): - idx = np.arange(idx.start, idx.stop) - block = np.loadtxt( - self.filenames[0], skiprows=start, max_rows=stop - start, ndmin=2 - ).T - _mult_cal_one(data, block, idx, cals, mult) + # set events / annotations + # format from curryreader: sample, etype, startsample, endsample + if isinstance(events, np.ndarray): # if there are events + events = events.astype("int") + events = np.insert(events, 1, np.diff(events[:, 2:]).flatten(), axis=1)[ + :, :3 + ] + annot = annotations_from_events(events, sfreq) + self.set_annotations(annot) + + # make montage + mont = _make_curry_montage(ch_names, ch_pos, landmarks, landmarkslabels) + self.set_montage(mont, on_missing="ignore") + + +def _make_curry_montage(ch_names, ch_pos, landmarks, landmarkslabels): + ch_pos_dict = dict(zip(ch_names, ch_pos)) + landmark_dict = dict(zip(landmarkslabels, landmarks)) + for k in ["Nas", "RPA", "LPA"]: + if k not in landmark_dict.keys(): + landmark_dict[k] = None + if len(landmarkslabels) > 0: + hpi_pos = landmarks[[i for i, n in enumerate(landmarkslabels) if "HPI" in n], :] + else: + hpi_pos = None + # TODO headshape (H1,2,3..) + + mont = None + if ch_pos.shape[1] == 3: # eeg xyz space + mont = make_dig_montage( + ch_pos=ch_pos_dict, + nasion=landmark_dict["Nas"], + lpa=landmark_dict["LPA"], + rpa=landmark_dict["RPA"], + hsp=None, + hpi=hpi_pos, + coord_frame="unknown", + ) + elif ch_pos.shape[1] == 6: # meg? + # TODO + pass + else: # not recorded? + pass - else: - _read_segments_file( - self, data, idx, fi, start, stop, cals, mult, dtype=" 1, shows and automatically closes plot + after x seconds + verbosity: 1 is low, 2 is medium (default) and 3 is high + + Output as dictionary with keys: + 'data' functional data matrix (e.g. EEG, MEG) with dimensions (samples, + channels) + 'info' data information with keys: + {'samples', 'channels', 'trials', 'samplingfreq'} + 'labels' channel labels list + 'sensorpos' channel locations matrix [x,y,z] + 'events' events matrix where every row corresponds to: + [event latency, event type, event start, event stop] + 'annotations' events annotation list + 'epochinfo' epochs matrix where every row corresponds to: + [number of averages, total epochs, type, accept, correct, + response, response time] + 'epochlabels' epoch labels list + 'impedancematrix' impedance matrix with max size (channels, 10), corresponding to + last ten impedance measurements + 'landmarks' functional, HPI or headshape landmarks locations + 'landmarkslabels' labels for functional (e.g. LPA, Nasion,...), HPI (e.g. HPI 1, + HPI 2,...) or headshape (e.g. H1, H2,...) landmarks + 'hpimatrix' HPI-coil measurements matrix (Orion-MEG only) where every row is + [measurementsample, dipolefitflag, x, y, z, deviation] + + 2021 - Compumedics Neuroscan + """ + # configure verbosity logging + verbositylevel = ( + log.WARNING + if verbosity == 1 + else log.INFO + if verbosity == 2 + else log.DEBUG + if verbosity == 3 + else log.INFO + ) + + log.basicConfig(format="%(levelname)s: %(message)s", level=verbositylevel) + + if inputfilename == "": + try: + # create root window for filedialog + root = tk.Tk() + root.withdraw() + + # check if last used directory was kept + lastdirfilename = "lastdir.txt" + if os.path.isfile(lastdirfilename): + lastdirfile = open(lastdirfilename) + initdir = lastdirfile.read().strip() + lastdirfile.close() + else: + initdir = "/" + + filepath = filedialog.askopenfilename( + initialdir=initdir, + title="Open Curry Data File", + filetypes=( + ("All Curry files", "*.cdt *.dat"), + ("cdt files", "*.cdt"), + ("dat files", "*.dat"), + ("all files", "*.*"), + ), + ) + root.destroy() + + lastdirfile = open(lastdirfilename, "w") + lastdirfile.write(os.path.dirname(filepath)) + lastdirfile.close() + + # handle cancel + if not filepath: + raise Exception + except Exception as _: + raise Exception("Unable to open file") + else: + filepath = inputfilename + + # pathname = os.path.dirname(filepath) + filename = os.path.basename(filepath) + + try: + basename, extension = filepath.split(".", maxsplit=1) + except Exception as _: + raise Exception("Unsupported file, choose a cdt or dat file") + + parameterfile = "" + parameterfile2 = "" + labelfile = "" + # labelfile2 = "" + eventfile = "" + eventfile2 = "" + hpifile = "" + + if extension == "dat": + parameterfile = basename + ".dap" + labelfile = basename + ".rs3" + eventfile = basename + ".cef" + eventfile2 = basename + ".ceo" + elif extension == "cdt": + parameterfile = filepath + ".dpa" + parameterfile2 = filepath + ".dpo" + eventfile = filepath + ".cef" + eventfile2 = filepath + ".ceo" + hpifile = filepath + ".hpi" + else: + raise Exception("Unsupported extension, choose a cdt or dat file") + + log.info("Reading file %s ...", filename) + + # %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + # open parameter file + + contents = [] + + try: + fid = open(parameterfile) + contents = fid.read() + except Exception as _: + log.debug("Could not open parameter file, trying alternative extension...") + + # open alternative parameter file + if not contents: + try: + fid = open(parameterfile2) + contents = fid.read() + except FileNotFoundError: + raise FileNotFoundError("Parameter file not found") + except Exception as _: + raise Exception("Could not open alternative parameter file") + + fid.close() + + if not contents: + raise Exception("Parameter file is empty") + + # check for compressed file format + ctok = "DataGuid" + ix = contents.find(ctok) + ixstart = contents.find("=", ix) + 1 + ixstop = contents.find("\n", ix) + + if ix != -1: + text = contents[ixstart:ixstop].strip() + if text == "{2912E8D8-F5C8-4E25-A8E7-A1385967DA09}": + raise Exception( + "Unsupported compressed data format, use Curry to convert file to raw " + "float format" + ) + + # read parameters from parameter file + # tokens (second line is for Curry 6 notation) + tok = [ + "NumSamples", + "NumChannels", + "NumTrials", + "SampleFreqHz", + "TriggerOffsetUsec", + "DataFormat", + "DataSampOrder", + "SampleTimeUsec", + "NUM_SAMPLES", + "NUM_CHANNELS", + "NUM_TRIALS", + "SAMPLE_FREQ_HZ", + "TRIGGER_OFFSET_USEC", + "DATA_FORMAT", + "DATA_SAMP_ORDER", + "SAMPLE_TIME_USEC", + ] + + # scan keywords - all keywords must exist! + nt = len(tok) + a = [0] * nt # initialize + for i in range(nt): + ctok = tok[i] + ix = contents.find(ctok) + ixstart = contents.find("=", ix) + 1 # skip = + ixstop = contents.find("\n", ix) + if ix != -1: + text = contents[ixstart:ixstop].strip() + if text == "ASCII" or text == "CHAN": # test for alphanumeric values + a[i] = 1 + elif text.isnumeric(): + a[i] = float(text) # assign if it was a number + + # derived variables. numbers (1) (2) etc are the token numbers + nSamples = int(a[0] + a[int(0 + nt / 2)]) + nChannels = int(a[1] + a[int(1 + nt / 2)]) + nTrials = int(a[2] + a[int(2 + nt / 2)]) + fFrequency = a[3] + a[int(3 + nt / 2)] + fOffsetUsec = a[4] + a[int(4 + nt / 2)] + nASCII = int(a[5] + a[int(5 + nt / 2)]) + nMultiplex = int(a[6] + a[int(6 + nt / 2)]) + fSampleTime = a[7] + a[int(7 + nt / 2)] + + datainfo = { + "samples": nSamples, + "channels": nChannels, + "trials": nTrials, + "samplingfreq": fFrequency, + } + log.info( + "Number of samples = %s, number of channels = %s, number of trials/epochs = %s," + " sampling frequency = %s Hz", + str(nSamples), + str(nChannels), + str(nTrials), + str(fFrequency), + ) + + if fFrequency == 0 or fSampleTime != 0: + fFrequency = 1000000 / fSampleTime + + # try to guess number of samples based on datafile size + if nSamples < 0: + if nASCII == 1: + raise Exception( + "Number of samples cannot be guessed from ASCII data file. " + "Use Curry to convert this file to Raw Float format" + ) + else: + log.warning( + "Number of samples not present in parameter file. It will be estimated " + "from size of data file" + ) + fileSize = os.path.getsize(filepath) + nSamples = fileSize / (4 * nChannels * nTrials) + + # %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + # search for Impedance Values + tixstart = contents.find("IMPEDANCE_VALUES START_LIST") + tixstart = contents.find("\n", tixstart) + tixstop = contents.find("IMPEDANCE_VALUES END_LIST") + + impedancelist = [] + + if tixstart != -1 and tixstop != 1: + text = contents[tixstart : tixstop - 1].split() + for imp in text: + if int(imp) != -1: # skip? + impedancelist.append(float(imp)) + + # Curry records last 10 impedances + try: + impedancematrix = np.asarray(impedancelist, dtype=float).reshape( + int(len(impedancelist) / nChannels), nChannels + ) + except ValueError: + impedancematrix = np.empty((int(len(impedancelist) / nChannels), nChannels)) + + if impedancematrix.any(): + log.info("Found impedance matrix") + + # %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + # open label file + if extension == "dat": + try: + fid = open(labelfile) + contents = fid.read() + fid.close() + except Exception as _: + log.warning("Found no label file") + + # read labels from label file + # initialize labels + labels = [""] * nChannels + + for i in range(nChannels): + labels[i] = "EEG" + str(i + 1) + + # scan for LABELS (occurs four times per channel group) + ix = findtokens("\nLABELS", contents) + nc = 0 + + if ix: + for i in range(3, len(ix), 4): # loop over channel groups + text = contents[ix[i - 1] : ix[i]] + text = text[text.find("\n", 1) :].split() + last = nChannels - nc + numLabels = min(last, len(text)) + for j in range(numLabels): # loop over labels + labels[nc] = text[j] + nc += 1 + log.info("Found channel labels") + else: + log.warning("Using dummy labels (EEG1, EEG2, ...)") + + # %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + # search for landmarks + landmarks = [] + landmarkslabels = [] + + # scan for LANDMARKS (occurs four times per channel group) + ix = findtokens("\nLANDMARKS", contents) + nc = 0 + totallandmarks = 0 + numlandmarksgroup = [] # number of landmarks per group + + if ix: + for i in range( + 3, len(ix), 4 + ): # first pass over groups to find total of landmarks + text = contents[ix[i - 1] : ix[i]] + text = text[text.find("\n", 1) :].splitlines()[1:] + totallandmarks += len(text) + numlandmarksgroup.append(len(text)) + + lmpositions = np.zeros([totallandmarks, 3]) + for i in range(3, len(ix), 4): # loop over channel groups + text = contents[ix[i - 1] : ix[i]] + text = text[text.find("\n", 1) :].split() + last = totallandmarks - nc + numlandmarks = min(last, int(len(text) / 3)) + for j in range(0, numlandmarks * 3, 3): + lmpositions[nc][:] = np.array(text[j : j + 3]) + nc += 1 + + landmarks = lmpositions + log.info("Found landmarks") + + # landmark labels + ix = findtokens("\nLM_REMARKS", contents) + landmarkslabels = [""] * totallandmarks + startindex = 0 + count = 0 + + if ix and totallandmarks: + for i in range(3, len(ix), 4): # loop over channel groups + text = contents[ix[i - 1] : ix[i]] + text = text[text.find("\n", 1) :].splitlines()[1:] + landmarkslabels[startindex : startindex + len(text)] = text + startindex += numlandmarksgroup[count] + count += 1 + + ########################################################################## + # read sensor locations from label file + sensorpos = [] + + # scan for SENSORS (occurs four times per channel group) + ix = findtokens("\nSENSORS", contents) + nc = 0 + + if ix: + grouppospersensor = [] + maxpersensor = 0 + numchanswithpos = 0 + for i in range( + 3, len(ix), 4 + ): # first pass over groups to determine sensorpos and maxpersensor sizes + text = contents[ix[i - 1] : ix[i]] + text = text[text.find("\n", 1) :].splitlines()[1:] + numchanswithpos += len(text) + pospersensor = len(text[0].split()) + maxpersensor = max(pospersensor, maxpersensor) + grouppospersensor.append(pospersensor) + + if ( + (maxpersensor == 3 or maxpersensor == 6) + # 3 is (x,y,z) per sensor (EEG,MEG) + # 6 is (x,y,z,x1,y1,z1) per sensor (MEG) + and numchanswithpos > 0 + and numchanswithpos <= nChannels + ): + positions = np.zeros((numchanswithpos, maxpersensor)) + + for group, i in enumerate(range(3, len(ix), 4)): # loop over channel groups + text = contents[ix[i - 1] : ix[i]] + text = text[text.find("\n", 1) :].split() + last = nChannels - nc + pospersensor = grouppospersensor[group] + numchannels = min(last, int(len(text) / pospersensor)) + for j in range(0, numchannels * pospersensor, pospersensor): + positions[nc][:pospersensor] = np.array(text[j : j + pospersensor]) + nc += 1 + + sensorpos = positions + log.info("Found sensor positions") + else: + log.warning("Reading sensor positions failed (dimensions inconsistency)") + else: + log.warning("No sensor positions were found") + + # %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + # search for epoch labels + epochlabelslist = [] + + if extension == "dat": + try: + fid = open(parameterfile) + contents = fid.read() + fid.close() + except Exception as _: + log.warning("Found no parameter file") + + ctok = "\nEPOCH_LABELS" + if ctok in contents: + tixstart = contents.find("EPOCH_LABELS START_LIST") + tixstart = contents.find("\n", tixstart) + tixstop = contents.find("EPOCH_LABELS END_LIST") + + if tixstart != -1 and tixstop != 1: + epochlabelslist = contents[tixstart : tixstop - 1].split() + + if epochlabelslist: + log.info("Found epoch labels") + + # %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + # search for epoch information + tixstart = contents.find("EPOCH_INFORMATION START_LIST") + tixstart = contents.find("\n", tixstart) + tixstop = contents.find("EPOCH_INFORMATION END_LIST") + infoelements = 7 + epochinformation = [] + + if tixstart != -1 and tixstop != 1: + epochinformation = np.zeros((len(epochlabelslist), infoelements)) + text = contents[tixstart : tixstop - 1].split() + for i in range(0, len(text), infoelements): + for j in range(infoelements): + epochinformation[int(i / infoelements)][j] = int(text[i + j]) + + if epochinformation.any(): + log.info("Found epoch information") + + # %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + # read events from event file + # initialize events + events = [] + annotations = [] + contents = [] + + try: + fid = open(eventfile) + contents = fid.read() + except Exception as _: + log.debug("Trying event file alternative extension...") + + # open alternative event file + if fid.closed: + try: + fid = open(eventfile2) + contents = fid.read() + except Exception as _: + log.debug("Found no event file") + + fid.close() + + if contents: + # scan for NUMBER_LIST (occurs five times) + tixstart = contents.find("NUMBER_LIST START_LIST") + tixstart = contents.find("\n", tixstart) + tixstop = contents.find("NUMBER_LIST END_LIST") + numberelements = 11 + numbereventprops = 4 + + text = contents[tixstart : tixstop - 1].split() + events = np.zeros((0, numbereventprops)) + + for i in range(0, len(text), numberelements): + sample = int(text[i]) + etype = int(text[i + 2]) + startsample = int(text[i + 4]) + endsample = int(text[i + 5]) + newevent = np.array([sample, etype, startsample, endsample]) + events = np.vstack([events, newevent]) # concat new event in events matrix + + # scan for REMARK_LIST (occurs five times) + tixstart = contents.find("REMARK_LIST START_LIST") + tixstart = contents.find("\n", tixstart) + tixstop = contents.find("REMARK_LIST END_LIST") + + if tixstart != -1 and tixstop != 1: + annotations = contents[tixstart : tixstop - 1].splitlines() + + log.info("Found events") + + # %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + # read HPI coils (only Orion-MEG) if present + + hpimatrix = [] + contents = [] + + try: + fid = open(hpifile) + contents = fid.read() + except Exception as _: + log.debug("Found no HPI file") + + fid.close() + + if contents: + # get file version and number of coils + tixstart = contents.find("FileVersion") + tixstop = contents.find("\n", tixstart) + text = contents[tixstart:tixstop].split() + # hpifileversion = text[1] + + tixstart = contents.find("NumCoils") + tixstop = contents.find("\n", tixstart) + text = contents[tixstart:tixstop].split() + # numberofcoils = text[1] + + hpimatrix = np.loadtxt(hpifile, dtype=np.float32, skiprows=3) + log.info("Found HPI matrix") + + # %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + # % read data file + + data = [] + + try: + itemstoread = nSamples * nTrials * nChannels + if nASCII == 1: + data = np.fromfile( + filepath, dtype=np.float32, count=itemstoread, sep=" " + ).reshape(nSamples * nTrials, nChannels) + else: + data = np.fromfile( + filepath, + dtype=np.float32, + count=itemstoread, + ).reshape(nSamples * nTrials, nChannels) + except FileNotFoundError: + raise FileNotFoundError("Data file not found") + except Exception as _: + raise Exception("Could not open data file") + + if nSamples * nTrials != data.shape[0]: + log.warning( + "Inconsistent number of samples. File may be displayed incompletely" + ) + nSamples = data.shape[0] / nTrials + + # transpose? + if nMultiplex == 1: + data = data.transpose() + + if plotdata > 0 and data.any(): + time = np.linspace( + fOffsetUsec / 1000, + fOffsetUsec / 1000 + (nSamples * nTrials - 1) * 1000 / fFrequency, + nSamples * nTrials, + dtype=np.float32, + ) + # avoid logging output from matplotlib + log.getLogger("matplotlib.font_manager").disabled = True + # stacked plot + amprange = max(abs(data.min()), abs(data.max())) + shift = np.linspace( + (nChannels - 1) * amprange * 0.3, 0, nChannels, dtype=np.float32 + ) + data += np.tile(shift, (nSamples * nTrials, 1)) + fig, ax = plt.subplots() + ax.plot(time, data) + ax.set_yticks(shift) + ax.set_yticklabels(labels) + ax.set_xlabel("Time [ms]") + ax.set_title(filename) + log.info("Found data file") + if plotdata == 1: + plt.show() + elif plotdata > 1: + plt.show(block=False) + plt.pause(plotdata) + plt.close() + else: + log.warning("Invalid plotdata input: please see description in help") + + # assemble output dict + output = { + "data": data, + "info": datainfo, + "labels": labels, + "sensorpos": sensorpos, + "events": events, + "annotations": annotations, + "epochinfo": epochinformation, + "epochlabels": epochlabelslist, + "impedances": impedancematrix, + "landmarks": landmarks, + "landmarkslabels": landmarkslabels, + "hpimatrix": hpimatrix, + } + + return output + + +def findtokens(token, contents): + """Findtoken. + + Returns indices of token occurrences in input string contents. + """ + if not token or not contents: + raise Exception("Invalid input for finding token") + + tokenindices = [] + index = 0 + while index < len(contents): + index = contents.find(token, index) + if index == -1: + break + tokenindices.append(index) + index += len(token) + return tokenindices From e640023cfb0453dec4201e3c17300f56aedaa58b Mon Sep 17 00:00:00 2001 From: "autofix-ci[bot]" <114827586+autofix-ci[bot]@users.noreply.github.com> Date: Wed, 26 Mar 2025 20:55:22 +0000 Subject: [PATCH 02/27] [autofix.ci] apply automated fixes --- mne/io/curry/curryreader.py | 1262 ++++++++++++++++++----------------- 1 file changed, 633 insertions(+), 629 deletions(-) diff --git a/mne/io/curry/curryreader.py b/mne/io/curry/curryreader.py index c13039a34b9..0d893bdfc8e 100644 --- a/mne/io/curry/curryreader.py +++ b/mne/io/curry/curryreader.py @@ -1,629 +1,633 @@ -import logging as log -import os -import tkinter as tk -from tkinter import filedialog - -import matplotlib.pyplot as plt -import numpy as np - - -def read(inputfilename="", plotdata=1, verbosity=2): - """Curry Reader Help. - - Usage: - currydata = read(inputfilename = '', plotdata = 1, verbosity = 2) - - Inputs: - inputfilename: if left empty, reader will prompt user with file selection box, - otherwise specify filename with path; - supported files are: raw float (cdt), ascii (cdt), legacy raw - float (dat) and legacy ascii (dat) - plotdata: plotdata = 0, don't show plot - plotdata = 1, show plot (default) - plotdata = x, with x > 1, shows and automatically closes plot - after x seconds - verbosity: 1 is low, 2 is medium (default) and 3 is high - - Output as dictionary with keys: - 'data' functional data matrix (e.g. EEG, MEG) with dimensions (samples, - channels) - 'info' data information with keys: - {'samples', 'channels', 'trials', 'samplingfreq'} - 'labels' channel labels list - 'sensorpos' channel locations matrix [x,y,z] - 'events' events matrix where every row corresponds to: - [event latency, event type, event start, event stop] - 'annotations' events annotation list - 'epochinfo' epochs matrix where every row corresponds to: - [number of averages, total epochs, type, accept, correct, - response, response time] - 'epochlabels' epoch labels list - 'impedancematrix' impedance matrix with max size (channels, 10), corresponding to - last ten impedance measurements - 'landmarks' functional, HPI or headshape landmarks locations - 'landmarkslabels' labels for functional (e.g. LPA, Nasion,...), HPI (e.g. HPI 1, - HPI 2,...) or headshape (e.g. H1, H2,...) landmarks - 'hpimatrix' HPI-coil measurements matrix (Orion-MEG only) where every row is - [measurementsample, dipolefitflag, x, y, z, deviation] - - 2021 - Compumedics Neuroscan - """ - # configure verbosity logging - verbositylevel = ( - log.WARNING - if verbosity == 1 - else log.INFO - if verbosity == 2 - else log.DEBUG - if verbosity == 3 - else log.INFO - ) - - log.basicConfig(format="%(levelname)s: %(message)s", level=verbositylevel) - - if inputfilename == "": - try: - # create root window for filedialog - root = tk.Tk() - root.withdraw() - - # check if last used directory was kept - lastdirfilename = "lastdir.txt" - if os.path.isfile(lastdirfilename): - lastdirfile = open(lastdirfilename) - initdir = lastdirfile.read().strip() - lastdirfile.close() - else: - initdir = "/" - - filepath = filedialog.askopenfilename( - initialdir=initdir, - title="Open Curry Data File", - filetypes=( - ("All Curry files", "*.cdt *.dat"), - ("cdt files", "*.cdt"), - ("dat files", "*.dat"), - ("all files", "*.*"), - ), - ) - root.destroy() - - lastdirfile = open(lastdirfilename, "w") - lastdirfile.write(os.path.dirname(filepath)) - lastdirfile.close() - - # handle cancel - if not filepath: - raise Exception - except Exception as _: - raise Exception("Unable to open file") - else: - filepath = inputfilename - - # pathname = os.path.dirname(filepath) - filename = os.path.basename(filepath) - - try: - basename, extension = filepath.split(".", maxsplit=1) - except Exception as _: - raise Exception("Unsupported file, choose a cdt or dat file") - - parameterfile = "" - parameterfile2 = "" - labelfile = "" - # labelfile2 = "" - eventfile = "" - eventfile2 = "" - hpifile = "" - - if extension == "dat": - parameterfile = basename + ".dap" - labelfile = basename + ".rs3" - eventfile = basename + ".cef" - eventfile2 = basename + ".ceo" - elif extension == "cdt": - parameterfile = filepath + ".dpa" - parameterfile2 = filepath + ".dpo" - eventfile = filepath + ".cef" - eventfile2 = filepath + ".ceo" - hpifile = filepath + ".hpi" - else: - raise Exception("Unsupported extension, choose a cdt or dat file") - - log.info("Reading file %s ...", filename) - - # %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% - # open parameter file - - contents = [] - - try: - fid = open(parameterfile) - contents = fid.read() - except Exception as _: - log.debug("Could not open parameter file, trying alternative extension...") - - # open alternative parameter file - if not contents: - try: - fid = open(parameterfile2) - contents = fid.read() - except FileNotFoundError: - raise FileNotFoundError("Parameter file not found") - except Exception as _: - raise Exception("Could not open alternative parameter file") - - fid.close() - - if not contents: - raise Exception("Parameter file is empty") - - # check for compressed file format - ctok = "DataGuid" - ix = contents.find(ctok) - ixstart = contents.find("=", ix) + 1 - ixstop = contents.find("\n", ix) - - if ix != -1: - text = contents[ixstart:ixstop].strip() - if text == "{2912E8D8-F5C8-4E25-A8E7-A1385967DA09}": - raise Exception( - "Unsupported compressed data format, use Curry to convert file to raw " - "float format" - ) - - # read parameters from parameter file - # tokens (second line is for Curry 6 notation) - tok = [ - "NumSamples", - "NumChannels", - "NumTrials", - "SampleFreqHz", - "TriggerOffsetUsec", - "DataFormat", - "DataSampOrder", - "SampleTimeUsec", - "NUM_SAMPLES", - "NUM_CHANNELS", - "NUM_TRIALS", - "SAMPLE_FREQ_HZ", - "TRIGGER_OFFSET_USEC", - "DATA_FORMAT", - "DATA_SAMP_ORDER", - "SAMPLE_TIME_USEC", - ] - - # scan keywords - all keywords must exist! - nt = len(tok) - a = [0] * nt # initialize - for i in range(nt): - ctok = tok[i] - ix = contents.find(ctok) - ixstart = contents.find("=", ix) + 1 # skip = - ixstop = contents.find("\n", ix) - if ix != -1: - text = contents[ixstart:ixstop].strip() - if text == "ASCII" or text == "CHAN": # test for alphanumeric values - a[i] = 1 - elif text.isnumeric(): - a[i] = float(text) # assign if it was a number - - # derived variables. numbers (1) (2) etc are the token numbers - nSamples = int(a[0] + a[int(0 + nt / 2)]) - nChannels = int(a[1] + a[int(1 + nt / 2)]) - nTrials = int(a[2] + a[int(2 + nt / 2)]) - fFrequency = a[3] + a[int(3 + nt / 2)] - fOffsetUsec = a[4] + a[int(4 + nt / 2)] - nASCII = int(a[5] + a[int(5 + nt / 2)]) - nMultiplex = int(a[6] + a[int(6 + nt / 2)]) - fSampleTime = a[7] + a[int(7 + nt / 2)] - - datainfo = { - "samples": nSamples, - "channels": nChannels, - "trials": nTrials, - "samplingfreq": fFrequency, - } - log.info( - "Number of samples = %s, number of channels = %s, number of trials/epochs = %s," - " sampling frequency = %s Hz", - str(nSamples), - str(nChannels), - str(nTrials), - str(fFrequency), - ) - - if fFrequency == 0 or fSampleTime != 0: - fFrequency = 1000000 / fSampleTime - - # try to guess number of samples based on datafile size - if nSamples < 0: - if nASCII == 1: - raise Exception( - "Number of samples cannot be guessed from ASCII data file. " - "Use Curry to convert this file to Raw Float format" - ) - else: - log.warning( - "Number of samples not present in parameter file. It will be estimated " - "from size of data file" - ) - fileSize = os.path.getsize(filepath) - nSamples = fileSize / (4 * nChannels * nTrials) - - # %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% - # search for Impedance Values - tixstart = contents.find("IMPEDANCE_VALUES START_LIST") - tixstart = contents.find("\n", tixstart) - tixstop = contents.find("IMPEDANCE_VALUES END_LIST") - - impedancelist = [] - - if tixstart != -1 and tixstop != 1: - text = contents[tixstart : tixstop - 1].split() - for imp in text: - if int(imp) != -1: # skip? - impedancelist.append(float(imp)) - - # Curry records last 10 impedances - try: - impedancematrix = np.asarray(impedancelist, dtype=float).reshape( - int(len(impedancelist) / nChannels), nChannels - ) - except ValueError: - impedancematrix = np.empty((int(len(impedancelist) / nChannels), nChannels)) - - if impedancematrix.any(): - log.info("Found impedance matrix") - - # %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% - # open label file - if extension == "dat": - try: - fid = open(labelfile) - contents = fid.read() - fid.close() - except Exception as _: - log.warning("Found no label file") - - # read labels from label file - # initialize labels - labels = [""] * nChannels - - for i in range(nChannels): - labels[i] = "EEG" + str(i + 1) - - # scan for LABELS (occurs four times per channel group) - ix = findtokens("\nLABELS", contents) - nc = 0 - - if ix: - for i in range(3, len(ix), 4): # loop over channel groups - text = contents[ix[i - 1] : ix[i]] - text = text[text.find("\n", 1) :].split() - last = nChannels - nc - numLabels = min(last, len(text)) - for j in range(numLabels): # loop over labels - labels[nc] = text[j] - nc += 1 - log.info("Found channel labels") - else: - log.warning("Using dummy labels (EEG1, EEG2, ...)") - - # %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% - # search for landmarks - landmarks = [] - landmarkslabels = [] - - # scan for LANDMARKS (occurs four times per channel group) - ix = findtokens("\nLANDMARKS", contents) - nc = 0 - totallandmarks = 0 - numlandmarksgroup = [] # number of landmarks per group - - if ix: - for i in range( - 3, len(ix), 4 - ): # first pass over groups to find total of landmarks - text = contents[ix[i - 1] : ix[i]] - text = text[text.find("\n", 1) :].splitlines()[1:] - totallandmarks += len(text) - numlandmarksgroup.append(len(text)) - - lmpositions = np.zeros([totallandmarks, 3]) - for i in range(3, len(ix), 4): # loop over channel groups - text = contents[ix[i - 1] : ix[i]] - text = text[text.find("\n", 1) :].split() - last = totallandmarks - nc - numlandmarks = min(last, int(len(text) / 3)) - for j in range(0, numlandmarks * 3, 3): - lmpositions[nc][:] = np.array(text[j : j + 3]) - nc += 1 - - landmarks = lmpositions - log.info("Found landmarks") - - # landmark labels - ix = findtokens("\nLM_REMARKS", contents) - landmarkslabels = [""] * totallandmarks - startindex = 0 - count = 0 - - if ix and totallandmarks: - for i in range(3, len(ix), 4): # loop over channel groups - text = contents[ix[i - 1] : ix[i]] - text = text[text.find("\n", 1) :].splitlines()[1:] - landmarkslabels[startindex : startindex + len(text)] = text - startindex += numlandmarksgroup[count] - count += 1 - - ########################################################################## - # read sensor locations from label file - sensorpos = [] - - # scan for SENSORS (occurs four times per channel group) - ix = findtokens("\nSENSORS", contents) - nc = 0 - - if ix: - grouppospersensor = [] - maxpersensor = 0 - numchanswithpos = 0 - for i in range( - 3, len(ix), 4 - ): # first pass over groups to determine sensorpos and maxpersensor sizes - text = contents[ix[i - 1] : ix[i]] - text = text[text.find("\n", 1) :].splitlines()[1:] - numchanswithpos += len(text) - pospersensor = len(text[0].split()) - maxpersensor = max(pospersensor, maxpersensor) - grouppospersensor.append(pospersensor) - - if ( - (maxpersensor == 3 or maxpersensor == 6) - # 3 is (x,y,z) per sensor (EEG,MEG) - # 6 is (x,y,z,x1,y1,z1) per sensor (MEG) - and numchanswithpos > 0 - and numchanswithpos <= nChannels - ): - positions = np.zeros((numchanswithpos, maxpersensor)) - - for group, i in enumerate(range(3, len(ix), 4)): # loop over channel groups - text = contents[ix[i - 1] : ix[i]] - text = text[text.find("\n", 1) :].split() - last = nChannels - nc - pospersensor = grouppospersensor[group] - numchannels = min(last, int(len(text) / pospersensor)) - for j in range(0, numchannels * pospersensor, pospersensor): - positions[nc][:pospersensor] = np.array(text[j : j + pospersensor]) - nc += 1 - - sensorpos = positions - log.info("Found sensor positions") - else: - log.warning("Reading sensor positions failed (dimensions inconsistency)") - else: - log.warning("No sensor positions were found") - - # %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% - # search for epoch labels - epochlabelslist = [] - - if extension == "dat": - try: - fid = open(parameterfile) - contents = fid.read() - fid.close() - except Exception as _: - log.warning("Found no parameter file") - - ctok = "\nEPOCH_LABELS" - if ctok in contents: - tixstart = contents.find("EPOCH_LABELS START_LIST") - tixstart = contents.find("\n", tixstart) - tixstop = contents.find("EPOCH_LABELS END_LIST") - - if tixstart != -1 and tixstop != 1: - epochlabelslist = contents[tixstart : tixstop - 1].split() - - if epochlabelslist: - log.info("Found epoch labels") - - # %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% - # search for epoch information - tixstart = contents.find("EPOCH_INFORMATION START_LIST") - tixstart = contents.find("\n", tixstart) - tixstop = contents.find("EPOCH_INFORMATION END_LIST") - infoelements = 7 - epochinformation = [] - - if tixstart != -1 and tixstop != 1: - epochinformation = np.zeros((len(epochlabelslist), infoelements)) - text = contents[tixstart : tixstop - 1].split() - for i in range(0, len(text), infoelements): - for j in range(infoelements): - epochinformation[int(i / infoelements)][j] = int(text[i + j]) - - if epochinformation.any(): - log.info("Found epoch information") - - # %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% - # read events from event file - # initialize events - events = [] - annotations = [] - contents = [] - - try: - fid = open(eventfile) - contents = fid.read() - except Exception as _: - log.debug("Trying event file alternative extension...") - - # open alternative event file - if fid.closed: - try: - fid = open(eventfile2) - contents = fid.read() - except Exception as _: - log.debug("Found no event file") - - fid.close() - - if contents: - # scan for NUMBER_LIST (occurs five times) - tixstart = contents.find("NUMBER_LIST START_LIST") - tixstart = contents.find("\n", tixstart) - tixstop = contents.find("NUMBER_LIST END_LIST") - numberelements = 11 - numbereventprops = 4 - - text = contents[tixstart : tixstop - 1].split() - events = np.zeros((0, numbereventprops)) - - for i in range(0, len(text), numberelements): - sample = int(text[i]) - etype = int(text[i + 2]) - startsample = int(text[i + 4]) - endsample = int(text[i + 5]) - newevent = np.array([sample, etype, startsample, endsample]) - events = np.vstack([events, newevent]) # concat new event in events matrix - - # scan for REMARK_LIST (occurs five times) - tixstart = contents.find("REMARK_LIST START_LIST") - tixstart = contents.find("\n", tixstart) - tixstop = contents.find("REMARK_LIST END_LIST") - - if tixstart != -1 and tixstop != 1: - annotations = contents[tixstart : tixstop - 1].splitlines() - - log.info("Found events") - - # %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% - # read HPI coils (only Orion-MEG) if present - - hpimatrix = [] - contents = [] - - try: - fid = open(hpifile) - contents = fid.read() - except Exception as _: - log.debug("Found no HPI file") - - fid.close() - - if contents: - # get file version and number of coils - tixstart = contents.find("FileVersion") - tixstop = contents.find("\n", tixstart) - text = contents[tixstart:tixstop].split() - # hpifileversion = text[1] - - tixstart = contents.find("NumCoils") - tixstop = contents.find("\n", tixstart) - text = contents[tixstart:tixstop].split() - # numberofcoils = text[1] - - hpimatrix = np.loadtxt(hpifile, dtype=np.float32, skiprows=3) - log.info("Found HPI matrix") - - # %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% - # % read data file - - data = [] - - try: - itemstoread = nSamples * nTrials * nChannels - if nASCII == 1: - data = np.fromfile( - filepath, dtype=np.float32, count=itemstoread, sep=" " - ).reshape(nSamples * nTrials, nChannels) - else: - data = np.fromfile( - filepath, - dtype=np.float32, - count=itemstoread, - ).reshape(nSamples * nTrials, nChannels) - except FileNotFoundError: - raise FileNotFoundError("Data file not found") - except Exception as _: - raise Exception("Could not open data file") - - if nSamples * nTrials != data.shape[0]: - log.warning( - "Inconsistent number of samples. File may be displayed incompletely" - ) - nSamples = data.shape[0] / nTrials - - # transpose? - if nMultiplex == 1: - data = data.transpose() - - if plotdata > 0 and data.any(): - time = np.linspace( - fOffsetUsec / 1000, - fOffsetUsec / 1000 + (nSamples * nTrials - 1) * 1000 / fFrequency, - nSamples * nTrials, - dtype=np.float32, - ) - # avoid logging output from matplotlib - log.getLogger("matplotlib.font_manager").disabled = True - # stacked plot - amprange = max(abs(data.min()), abs(data.max())) - shift = np.linspace( - (nChannels - 1) * amprange * 0.3, 0, nChannels, dtype=np.float32 - ) - data += np.tile(shift, (nSamples * nTrials, 1)) - fig, ax = plt.subplots() - ax.plot(time, data) - ax.set_yticks(shift) - ax.set_yticklabels(labels) - ax.set_xlabel("Time [ms]") - ax.set_title(filename) - log.info("Found data file") - if plotdata == 1: - plt.show() - elif plotdata > 1: - plt.show(block=False) - plt.pause(plotdata) - plt.close() - else: - log.warning("Invalid plotdata input: please see description in help") - - # assemble output dict - output = { - "data": data, - "info": datainfo, - "labels": labels, - "sensorpos": sensorpos, - "events": events, - "annotations": annotations, - "epochinfo": epochinformation, - "epochlabels": epochlabelslist, - "impedances": impedancematrix, - "landmarks": landmarks, - "landmarkslabels": landmarkslabels, - "hpimatrix": hpimatrix, - } - - return output - - -def findtokens(token, contents): - """Findtoken. - - Returns indices of token occurrences in input string contents. - """ - if not token or not contents: - raise Exception("Invalid input for finding token") - - tokenindices = [] - index = 0 - while index < len(contents): - index = contents.find(token, index) - if index == -1: - break - tokenindices.append(index) - index += len(token) - return tokenindices +# Authors: The MNE-Python contributors. +# License: BSD-3-Clause +# Copyright the MNE-Python contributors. + +import logging as log +import os +import tkinter as tk +from tkinter import filedialog + +import matplotlib.pyplot as plt +import numpy as np + + +def read(inputfilename="", plotdata=1, verbosity=2): + """Curry Reader Help. + + Usage: + currydata = read(inputfilename = '', plotdata = 1, verbosity = 2) + + Inputs: + inputfilename: if left empty, reader will prompt user with file selection box, + otherwise specify filename with path; + supported files are: raw float (cdt), ascii (cdt), legacy raw + float (dat) and legacy ascii (dat) + plotdata: plotdata = 0, don't show plot + plotdata = 1, show plot (default) + plotdata = x, with x > 1, shows and automatically closes plot + after x seconds + verbosity: 1 is low, 2 is medium (default) and 3 is high + + Output as dictionary with keys: + 'data' functional data matrix (e.g. EEG, MEG) with dimensions (samples, + channels) + 'info' data information with keys: + {'samples', 'channels', 'trials', 'samplingfreq'} + 'labels' channel labels list + 'sensorpos' channel locations matrix [x,y,z] + 'events' events matrix where every row corresponds to: + [event latency, event type, event start, event stop] + 'annotations' events annotation list + 'epochinfo' epochs matrix where every row corresponds to: + [number of averages, total epochs, type, accept, correct, + response, response time] + 'epochlabels' epoch labels list + 'impedancematrix' impedance matrix with max size (channels, 10), corresponding to + last ten impedance measurements + 'landmarks' functional, HPI or headshape landmarks locations + 'landmarkslabels' labels for functional (e.g. LPA, Nasion,...), HPI (e.g. HPI 1, + HPI 2,...) or headshape (e.g. H1, H2,...) landmarks + 'hpimatrix' HPI-coil measurements matrix (Orion-MEG only) where every row is + [measurementsample, dipolefitflag, x, y, z, deviation] + + 2021 - Compumedics Neuroscan + """ + # configure verbosity logging + verbositylevel = ( + log.WARNING + if verbosity == 1 + else log.INFO + if verbosity == 2 + else log.DEBUG + if verbosity == 3 + else log.INFO + ) + + log.basicConfig(format="%(levelname)s: %(message)s", level=verbositylevel) + + if inputfilename == "": + try: + # create root window for filedialog + root = tk.Tk() + root.withdraw() + + # check if last used directory was kept + lastdirfilename = "lastdir.txt" + if os.path.isfile(lastdirfilename): + lastdirfile = open(lastdirfilename) + initdir = lastdirfile.read().strip() + lastdirfile.close() + else: + initdir = "/" + + filepath = filedialog.askopenfilename( + initialdir=initdir, + title="Open Curry Data File", + filetypes=( + ("All Curry files", "*.cdt *.dat"), + ("cdt files", "*.cdt"), + ("dat files", "*.dat"), + ("all files", "*.*"), + ), + ) + root.destroy() + + lastdirfile = open(lastdirfilename, "w") + lastdirfile.write(os.path.dirname(filepath)) + lastdirfile.close() + + # handle cancel + if not filepath: + raise Exception + except Exception as _: + raise Exception("Unable to open file") + else: + filepath = inputfilename + + # pathname = os.path.dirname(filepath) + filename = os.path.basename(filepath) + + try: + basename, extension = filepath.split(".", maxsplit=1) + except Exception as _: + raise Exception("Unsupported file, choose a cdt or dat file") + + parameterfile = "" + parameterfile2 = "" + labelfile = "" + # labelfile2 = "" + eventfile = "" + eventfile2 = "" + hpifile = "" + + if extension == "dat": + parameterfile = basename + ".dap" + labelfile = basename + ".rs3" + eventfile = basename + ".cef" + eventfile2 = basename + ".ceo" + elif extension == "cdt": + parameterfile = filepath + ".dpa" + parameterfile2 = filepath + ".dpo" + eventfile = filepath + ".cef" + eventfile2 = filepath + ".ceo" + hpifile = filepath + ".hpi" + else: + raise Exception("Unsupported extension, choose a cdt or dat file") + + log.info("Reading file %s ...", filename) + + # %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + # open parameter file + + contents = [] + + try: + fid = open(parameterfile) + contents = fid.read() + except Exception as _: + log.debug("Could not open parameter file, trying alternative extension...") + + # open alternative parameter file + if not contents: + try: + fid = open(parameterfile2) + contents = fid.read() + except FileNotFoundError: + raise FileNotFoundError("Parameter file not found") + except Exception as _: + raise Exception("Could not open alternative parameter file") + + fid.close() + + if not contents: + raise Exception("Parameter file is empty") + + # check for compressed file format + ctok = "DataGuid" + ix = contents.find(ctok) + ixstart = contents.find("=", ix) + 1 + ixstop = contents.find("\n", ix) + + if ix != -1: + text = contents[ixstart:ixstop].strip() + if text == "{2912E8D8-F5C8-4E25-A8E7-A1385967DA09}": + raise Exception( + "Unsupported compressed data format, use Curry to convert file to raw " + "float format" + ) + + # read parameters from parameter file + # tokens (second line is for Curry 6 notation) + tok = [ + "NumSamples", + "NumChannels", + "NumTrials", + "SampleFreqHz", + "TriggerOffsetUsec", + "DataFormat", + "DataSampOrder", + "SampleTimeUsec", + "NUM_SAMPLES", + "NUM_CHANNELS", + "NUM_TRIALS", + "SAMPLE_FREQ_HZ", + "TRIGGER_OFFSET_USEC", + "DATA_FORMAT", + "DATA_SAMP_ORDER", + "SAMPLE_TIME_USEC", + ] + + # scan keywords - all keywords must exist! + nt = len(tok) + a = [0] * nt # initialize + for i in range(nt): + ctok = tok[i] + ix = contents.find(ctok) + ixstart = contents.find("=", ix) + 1 # skip = + ixstop = contents.find("\n", ix) + if ix != -1: + text = contents[ixstart:ixstop].strip() + if text == "ASCII" or text == "CHAN": # test for alphanumeric values + a[i] = 1 + elif text.isnumeric(): + a[i] = float(text) # assign if it was a number + + # derived variables. numbers (1) (2) etc are the token numbers + nSamples = int(a[0] + a[int(0 + nt / 2)]) + nChannels = int(a[1] + a[int(1 + nt / 2)]) + nTrials = int(a[2] + a[int(2 + nt / 2)]) + fFrequency = a[3] + a[int(3 + nt / 2)] + fOffsetUsec = a[4] + a[int(4 + nt / 2)] + nASCII = int(a[5] + a[int(5 + nt / 2)]) + nMultiplex = int(a[6] + a[int(6 + nt / 2)]) + fSampleTime = a[7] + a[int(7 + nt / 2)] + + datainfo = { + "samples": nSamples, + "channels": nChannels, + "trials": nTrials, + "samplingfreq": fFrequency, + } + log.info( + "Number of samples = %s, number of channels = %s, number of trials/epochs = %s," + " sampling frequency = %s Hz", + str(nSamples), + str(nChannels), + str(nTrials), + str(fFrequency), + ) + + if fFrequency == 0 or fSampleTime != 0: + fFrequency = 1000000 / fSampleTime + + # try to guess number of samples based on datafile size + if nSamples < 0: + if nASCII == 1: + raise Exception( + "Number of samples cannot be guessed from ASCII data file. " + "Use Curry to convert this file to Raw Float format" + ) + else: + log.warning( + "Number of samples not present in parameter file. It will be estimated " + "from size of data file" + ) + fileSize = os.path.getsize(filepath) + nSamples = fileSize / (4 * nChannels * nTrials) + + # %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + # search for Impedance Values + tixstart = contents.find("IMPEDANCE_VALUES START_LIST") + tixstart = contents.find("\n", tixstart) + tixstop = contents.find("IMPEDANCE_VALUES END_LIST") + + impedancelist = [] + + if tixstart != -1 and tixstop != 1: + text = contents[tixstart : tixstop - 1].split() + for imp in text: + if int(imp) != -1: # skip? + impedancelist.append(float(imp)) + + # Curry records last 10 impedances + try: + impedancematrix = np.asarray(impedancelist, dtype=float).reshape( + int(len(impedancelist) / nChannels), nChannels + ) + except ValueError: + impedancematrix = np.empty((int(len(impedancelist) / nChannels), nChannels)) + + if impedancematrix.any(): + log.info("Found impedance matrix") + + # %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + # open label file + if extension == "dat": + try: + fid = open(labelfile) + contents = fid.read() + fid.close() + except Exception as _: + log.warning("Found no label file") + + # read labels from label file + # initialize labels + labels = [""] * nChannels + + for i in range(nChannels): + labels[i] = "EEG" + str(i + 1) + + # scan for LABELS (occurs four times per channel group) + ix = findtokens("\nLABELS", contents) + nc = 0 + + if ix: + for i in range(3, len(ix), 4): # loop over channel groups + text = contents[ix[i - 1] : ix[i]] + text = text[text.find("\n", 1) :].split() + last = nChannels - nc + numLabels = min(last, len(text)) + for j in range(numLabels): # loop over labels + labels[nc] = text[j] + nc += 1 + log.info("Found channel labels") + else: + log.warning("Using dummy labels (EEG1, EEG2, ...)") + + # %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + # search for landmarks + landmarks = [] + landmarkslabels = [] + + # scan for LANDMARKS (occurs four times per channel group) + ix = findtokens("\nLANDMARKS", contents) + nc = 0 + totallandmarks = 0 + numlandmarksgroup = [] # number of landmarks per group + + if ix: + for i in range( + 3, len(ix), 4 + ): # first pass over groups to find total of landmarks + text = contents[ix[i - 1] : ix[i]] + text = text[text.find("\n", 1) :].splitlines()[1:] + totallandmarks += len(text) + numlandmarksgroup.append(len(text)) + + lmpositions = np.zeros([totallandmarks, 3]) + for i in range(3, len(ix), 4): # loop over channel groups + text = contents[ix[i - 1] : ix[i]] + text = text[text.find("\n", 1) :].split() + last = totallandmarks - nc + numlandmarks = min(last, int(len(text) / 3)) + for j in range(0, numlandmarks * 3, 3): + lmpositions[nc][:] = np.array(text[j : j + 3]) + nc += 1 + + landmarks = lmpositions + log.info("Found landmarks") + + # landmark labels + ix = findtokens("\nLM_REMARKS", contents) + landmarkslabels = [""] * totallandmarks + startindex = 0 + count = 0 + + if ix and totallandmarks: + for i in range(3, len(ix), 4): # loop over channel groups + text = contents[ix[i - 1] : ix[i]] + text = text[text.find("\n", 1) :].splitlines()[1:] + landmarkslabels[startindex : startindex + len(text)] = text + startindex += numlandmarksgroup[count] + count += 1 + + ########################################################################## + # read sensor locations from label file + sensorpos = [] + + # scan for SENSORS (occurs four times per channel group) + ix = findtokens("\nSENSORS", contents) + nc = 0 + + if ix: + grouppospersensor = [] + maxpersensor = 0 + numchanswithpos = 0 + for i in range( + 3, len(ix), 4 + ): # first pass over groups to determine sensorpos and maxpersensor sizes + text = contents[ix[i - 1] : ix[i]] + text = text[text.find("\n", 1) :].splitlines()[1:] + numchanswithpos += len(text) + pospersensor = len(text[0].split()) + maxpersensor = max(pospersensor, maxpersensor) + grouppospersensor.append(pospersensor) + + if ( + (maxpersensor == 3 or maxpersensor == 6) + # 3 is (x,y,z) per sensor (EEG,MEG) + # 6 is (x,y,z,x1,y1,z1) per sensor (MEG) + and numchanswithpos > 0 + and numchanswithpos <= nChannels + ): + positions = np.zeros((numchanswithpos, maxpersensor)) + + for group, i in enumerate(range(3, len(ix), 4)): # loop over channel groups + text = contents[ix[i - 1] : ix[i]] + text = text[text.find("\n", 1) :].split() + last = nChannels - nc + pospersensor = grouppospersensor[group] + numchannels = min(last, int(len(text) / pospersensor)) + for j in range(0, numchannels * pospersensor, pospersensor): + positions[nc][:pospersensor] = np.array(text[j : j + pospersensor]) + nc += 1 + + sensorpos = positions + log.info("Found sensor positions") + else: + log.warning("Reading sensor positions failed (dimensions inconsistency)") + else: + log.warning("No sensor positions were found") + + # %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + # search for epoch labels + epochlabelslist = [] + + if extension == "dat": + try: + fid = open(parameterfile) + contents = fid.read() + fid.close() + except Exception as _: + log.warning("Found no parameter file") + + ctok = "\nEPOCH_LABELS" + if ctok in contents: + tixstart = contents.find("EPOCH_LABELS START_LIST") + tixstart = contents.find("\n", tixstart) + tixstop = contents.find("EPOCH_LABELS END_LIST") + + if tixstart != -1 and tixstop != 1: + epochlabelslist = contents[tixstart : tixstop - 1].split() + + if epochlabelslist: + log.info("Found epoch labels") + + # %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + # search for epoch information + tixstart = contents.find("EPOCH_INFORMATION START_LIST") + tixstart = contents.find("\n", tixstart) + tixstop = contents.find("EPOCH_INFORMATION END_LIST") + infoelements = 7 + epochinformation = [] + + if tixstart != -1 and tixstop != 1: + epochinformation = np.zeros((len(epochlabelslist), infoelements)) + text = contents[tixstart : tixstop - 1].split() + for i in range(0, len(text), infoelements): + for j in range(infoelements): + epochinformation[int(i / infoelements)][j] = int(text[i + j]) + + if epochinformation.any(): + log.info("Found epoch information") + + # %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + # read events from event file + # initialize events + events = [] + annotations = [] + contents = [] + + try: + fid = open(eventfile) + contents = fid.read() + except Exception as _: + log.debug("Trying event file alternative extension...") + + # open alternative event file + if fid.closed: + try: + fid = open(eventfile2) + contents = fid.read() + except Exception as _: + log.debug("Found no event file") + + fid.close() + + if contents: + # scan for NUMBER_LIST (occurs five times) + tixstart = contents.find("NUMBER_LIST START_LIST") + tixstart = contents.find("\n", tixstart) + tixstop = contents.find("NUMBER_LIST END_LIST") + numberelements = 11 + numbereventprops = 4 + + text = contents[tixstart : tixstop - 1].split() + events = np.zeros((0, numbereventprops)) + + for i in range(0, len(text), numberelements): + sample = int(text[i]) + etype = int(text[i + 2]) + startsample = int(text[i + 4]) + endsample = int(text[i + 5]) + newevent = np.array([sample, etype, startsample, endsample]) + events = np.vstack([events, newevent]) # concat new event in events matrix + + # scan for REMARK_LIST (occurs five times) + tixstart = contents.find("REMARK_LIST START_LIST") + tixstart = contents.find("\n", tixstart) + tixstop = contents.find("REMARK_LIST END_LIST") + + if tixstart != -1 and tixstop != 1: + annotations = contents[tixstart : tixstop - 1].splitlines() + + log.info("Found events") + + # %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + # read HPI coils (only Orion-MEG) if present + + hpimatrix = [] + contents = [] + + try: + fid = open(hpifile) + contents = fid.read() + except Exception as _: + log.debug("Found no HPI file") + + fid.close() + + if contents: + # get file version and number of coils + tixstart = contents.find("FileVersion") + tixstop = contents.find("\n", tixstart) + text = contents[tixstart:tixstop].split() + # hpifileversion = text[1] + + tixstart = contents.find("NumCoils") + tixstop = contents.find("\n", tixstart) + text = contents[tixstart:tixstop].split() + # numberofcoils = text[1] + + hpimatrix = np.loadtxt(hpifile, dtype=np.float32, skiprows=3) + log.info("Found HPI matrix") + + # %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + # % read data file + + data = [] + + try: + itemstoread = nSamples * nTrials * nChannels + if nASCII == 1: + data = np.fromfile( + filepath, dtype=np.float32, count=itemstoread, sep=" " + ).reshape(nSamples * nTrials, nChannels) + else: + data = np.fromfile( + filepath, + dtype=np.float32, + count=itemstoread, + ).reshape(nSamples * nTrials, nChannels) + except FileNotFoundError: + raise FileNotFoundError("Data file not found") + except Exception as _: + raise Exception("Could not open data file") + + if nSamples * nTrials != data.shape[0]: + log.warning( + "Inconsistent number of samples. File may be displayed incompletely" + ) + nSamples = data.shape[0] / nTrials + + # transpose? + if nMultiplex == 1: + data = data.transpose() + + if plotdata > 0 and data.any(): + time = np.linspace( + fOffsetUsec / 1000, + fOffsetUsec / 1000 + (nSamples * nTrials - 1) * 1000 / fFrequency, + nSamples * nTrials, + dtype=np.float32, + ) + # avoid logging output from matplotlib + log.getLogger("matplotlib.font_manager").disabled = True + # stacked plot + amprange = max(abs(data.min()), abs(data.max())) + shift = np.linspace( + (nChannels - 1) * amprange * 0.3, 0, nChannels, dtype=np.float32 + ) + data += np.tile(shift, (nSamples * nTrials, 1)) + fig, ax = plt.subplots() + ax.plot(time, data) + ax.set_yticks(shift) + ax.set_yticklabels(labels) + ax.set_xlabel("Time [ms]") + ax.set_title(filename) + log.info("Found data file") + if plotdata == 1: + plt.show() + elif plotdata > 1: + plt.show(block=False) + plt.pause(plotdata) + plt.close() + else: + log.warning("Invalid plotdata input: please see description in help") + + # assemble output dict + output = { + "data": data, + "info": datainfo, + "labels": labels, + "sensorpos": sensorpos, + "events": events, + "annotations": annotations, + "epochinfo": epochinformation, + "epochlabels": epochlabelslist, + "impedances": impedancematrix, + "landmarks": landmarks, + "landmarkslabels": landmarkslabels, + "hpimatrix": hpimatrix, + } + + return output + + +def findtokens(token, contents): + """Findtoken. + + Returns indices of token occurrences in input string contents. + """ + if not token or not contents: + raise Exception("Invalid input for finding token") + + tokenindices = [] + index = 0 + while index < len(contents): + index = contents.find(token, index) + if index == -1: + break + tokenindices.append(index) + index += len(token) + return tokenindices From d533a5f687492412b6735b825a2964280036d7cf Mon Sep 17 00:00:00 2001 From: dominikwelke Date: Wed, 2 Apr 2025 23:25:50 +0100 Subject: [PATCH 03/27] remove local curryreader module + impedance reader + minor fixes --- mne/io/curry/curry.py | 98 ++++-- mne/io/curry/curryreader.py | 633 ------------------------------------ 2 files changed, 72 insertions(+), 659 deletions(-) delete mode 100644 mne/io/curry/curryreader.py diff --git a/mne/io/curry/curry.py b/mne/io/curry/curry.py index dda983e5a51..0bc7db068cf 100644 --- a/mne/io/curry/curry.py +++ b/mne/io/curry/curry.py @@ -6,6 +6,7 @@ import re from pathlib import Path +import curryreader import numpy as np from ..._fiff.meas_info import create_info @@ -13,9 +14,6 @@ from ...channels import make_dig_montage from ...utils import verbose from ..base import BaseRaw -from .curryreader import read - -_RE_COMBINE_WHITESPACE = re.compile(r"\s+") @verbose @@ -61,12 +59,12 @@ class RawCurry(BaseRaw): """ @verbose - def __init__(self, fname, preload=False, verbose=None): + def __init__(self, fname, preload=True, verbose=None): fname = Path(fname) # use curry-python-reader try: - currydata = read(str(fname), plotdata=0, verbosity=1) + currydata = curryreader.read(str(fname), plotdata=0, verbosity=1) except Exception as e: raise ValueError(f"file could not be read - {e}") @@ -80,11 +78,8 @@ def __init__(self, fname, preload=False, verbose=None): landmarkslabels = currydata["landmarkslabels"] # extract data - orig_format = ( - "single" - if isinstance(currydata["data"].dtype, type(np.dtype("float32"))) - else None - ) + orig_format = "single" # curryreader.py always reads float32. is this correct? + preload = currydata["data"].T.astype( "float64" ) # curryreader returns float32, but mne seems to need float64 @@ -92,12 +87,12 @@ def __init__(self, fname, preload=False, verbose=None): # annotations = currydata[ # "annotations" # ] # dont always seem to correspond to events?! + # impedances = currydata["impedances"] # see read_impedances_curry # epochinfo = currydata["epochinfo"] # TODO # epochlabels = currydata["epochlabels"] # TODO - # impedances = currydata["impedances"] # TODO # hpimatrix = currydata["hpimatrix"] # TODO - # extract some more essential info not provided by reader + # extract other essential info not provided by curryreader fname_hdr = None for hdr_suff in [".cdt.dpa", ".cdt.dpo", ".dap"]: if fname.with_suffix(hdr_suff).exists(): @@ -105,19 +100,17 @@ def __init__(self, fname, preload=False, verbose=None): ch_types, units = [], [] if fname_hdr: - changroups = fname_hdr.read_text().split("DEVICE_PARAMETERS")[1::2] - for changroup_info in changroups: - changroup_info = _RE_COMBINE_WHITESPACE.sub(" ", changroup_info).strip() - groupid = changroup_info.split()[0] - unit = changroup_info.split("DataUnit = ")[1].split()[0] - n_ch_group = int( - changroup_info.split("NumChanThisGroup = ")[1].split()[0] - ) + ch_groups = fname_hdr.read_text().split("DEVICE_PARAMETERS")[1::2] + for ch_group in ch_groups: + ch_group = re.compile(r"\s+").sub(" ", ch_group).strip() + groupid = ch_group.split()[0] + unit = ch_group.split("DataUnit = ")[1].split()[0] + n_ch_group = int(ch_group.split("NumChanThisGroup = ")[1].split()[0]) ch_type = ( "mag" if ("MAG" in groupid) else "misc" - if ("OTHER") in groupid + if ("OTHER" in groupid) else "eeg" ) # combine info @@ -127,11 +120,10 @@ def __init__(self, fname, preload=False, verbose=None): assert len(ch_types) == len(units) == len(ch_names) == n_ch else: - # not implemented raise NotImplementedError # finetune channel types (e.g. stim, eog etc might be identified by name) - # TODO + # TODO? # scale data to SI units orig_units = dict(zip(ch_names, units)) @@ -172,6 +164,9 @@ def __init__(self, fname, preload=False, verbose=None): mont = _make_curry_montage(ch_names, ch_pos, landmarks, landmarkslabels) self.set_montage(mont, on_missing="ignore") + # add HPI data (if present) + # TODO + def _make_curry_montage(ch_names, ch_pos, landmarks, landmarkslabels): ch_pos_dict = dict(zip(ch_names, ch_pos)) @@ -180,10 +175,17 @@ def _make_curry_montage(ch_names, ch_pos, landmarks, landmarkslabels): if k not in landmark_dict.keys(): landmark_dict[k] = None if len(landmarkslabels) > 0: - hpi_pos = landmarks[[i for i, n in enumerate(landmarkslabels) if "HPI" in n], :] + hpi_pos = landmarks[ + [i for i, n in enumerate(landmarkslabels) if re.match("HPI[1-99]", n)], : + ] else: hpi_pos = None - # TODO headshape (H1,2,3..) + if len(landmarkslabels) > 0: + hsp_pos = landmarks[ + [i for i, n in enumerate(landmarkslabels) if re.match("H[1-99]", n)], : + ] + else: + hsp_pos = None mont = None if ch_pos.shape[1] == 3: # eeg xyz space @@ -192,7 +194,7 @@ def _make_curry_montage(ch_names, ch_pos, landmarks, landmarkslabels): nasion=landmark_dict["Nas"], lpa=landmark_dict["LPA"], rpa=landmark_dict["RPA"], - hsp=None, + hsp=hsp_pos, hpi=hpi_pos, coord_frame="unknown", ) @@ -203,3 +205,47 @@ def _make_curry_montage(ch_names, ch_pos, landmarks, landmarkslabels): pass return mont + + +def read_impedances_curry(fname): + """Read impedance measurements from Curry files. + + Parameters + ---------- + fname : path-like + Path to a curry file with extensions ``.dat``, ``.dap``, ``.rs3``, + ``.cdt``, ``.cdt.dpa``, ``.cdt.cef`` or ``.cef``. + + Returns + ------- + ch_names : list + A list object containing channel names + impedances : np.ndarray + An array containing up to 10 impedance measurements for all recorded channels. + + """ + # use curry-python-reader to load data + try: + currydata = curryreader.read(str(fname), plotdata=0, verbosity=1) + except Exception as e: + raise ValueError(f"file could not be read - {e}") + + impedances = currydata["impedances"] + ch_names = currydata["labels"] + + # try get measurement times + # TODO possible? + annotations = currydata[ + "annotations" + ] # dont really seem to correspond to events!?! + for anno in set(annotations): + if "impedance" in anno.lower(): + print("FOUND IMPEDANCE ANNOTATION!") + print(f"'{anno}' - N={len([a for a in annotations if a == anno])}") + + # print impedances + print("impedance measurements:") + for iimp in range(impedances.shape[0]): + print({ch: float(imp) for ch, imp in zip(ch_names, impedances[iimp])}) + + return ch_names, impedances diff --git a/mne/io/curry/curryreader.py b/mne/io/curry/curryreader.py deleted file mode 100644 index 0d893bdfc8e..00000000000 --- a/mne/io/curry/curryreader.py +++ /dev/null @@ -1,633 +0,0 @@ -# Authors: The MNE-Python contributors. -# License: BSD-3-Clause -# Copyright the MNE-Python contributors. - -import logging as log -import os -import tkinter as tk -from tkinter import filedialog - -import matplotlib.pyplot as plt -import numpy as np - - -def read(inputfilename="", plotdata=1, verbosity=2): - """Curry Reader Help. - - Usage: - currydata = read(inputfilename = '', plotdata = 1, verbosity = 2) - - Inputs: - inputfilename: if left empty, reader will prompt user with file selection box, - otherwise specify filename with path; - supported files are: raw float (cdt), ascii (cdt), legacy raw - float (dat) and legacy ascii (dat) - plotdata: plotdata = 0, don't show plot - plotdata = 1, show plot (default) - plotdata = x, with x > 1, shows and automatically closes plot - after x seconds - verbosity: 1 is low, 2 is medium (default) and 3 is high - - Output as dictionary with keys: - 'data' functional data matrix (e.g. EEG, MEG) with dimensions (samples, - channels) - 'info' data information with keys: - {'samples', 'channels', 'trials', 'samplingfreq'} - 'labels' channel labels list - 'sensorpos' channel locations matrix [x,y,z] - 'events' events matrix where every row corresponds to: - [event latency, event type, event start, event stop] - 'annotations' events annotation list - 'epochinfo' epochs matrix where every row corresponds to: - [number of averages, total epochs, type, accept, correct, - response, response time] - 'epochlabels' epoch labels list - 'impedancematrix' impedance matrix with max size (channels, 10), corresponding to - last ten impedance measurements - 'landmarks' functional, HPI or headshape landmarks locations - 'landmarkslabels' labels for functional (e.g. LPA, Nasion,...), HPI (e.g. HPI 1, - HPI 2,...) or headshape (e.g. H1, H2,...) landmarks - 'hpimatrix' HPI-coil measurements matrix (Orion-MEG only) where every row is - [measurementsample, dipolefitflag, x, y, z, deviation] - - 2021 - Compumedics Neuroscan - """ - # configure verbosity logging - verbositylevel = ( - log.WARNING - if verbosity == 1 - else log.INFO - if verbosity == 2 - else log.DEBUG - if verbosity == 3 - else log.INFO - ) - - log.basicConfig(format="%(levelname)s: %(message)s", level=verbositylevel) - - if inputfilename == "": - try: - # create root window for filedialog - root = tk.Tk() - root.withdraw() - - # check if last used directory was kept - lastdirfilename = "lastdir.txt" - if os.path.isfile(lastdirfilename): - lastdirfile = open(lastdirfilename) - initdir = lastdirfile.read().strip() - lastdirfile.close() - else: - initdir = "/" - - filepath = filedialog.askopenfilename( - initialdir=initdir, - title="Open Curry Data File", - filetypes=( - ("All Curry files", "*.cdt *.dat"), - ("cdt files", "*.cdt"), - ("dat files", "*.dat"), - ("all files", "*.*"), - ), - ) - root.destroy() - - lastdirfile = open(lastdirfilename, "w") - lastdirfile.write(os.path.dirname(filepath)) - lastdirfile.close() - - # handle cancel - if not filepath: - raise Exception - except Exception as _: - raise Exception("Unable to open file") - else: - filepath = inputfilename - - # pathname = os.path.dirname(filepath) - filename = os.path.basename(filepath) - - try: - basename, extension = filepath.split(".", maxsplit=1) - except Exception as _: - raise Exception("Unsupported file, choose a cdt or dat file") - - parameterfile = "" - parameterfile2 = "" - labelfile = "" - # labelfile2 = "" - eventfile = "" - eventfile2 = "" - hpifile = "" - - if extension == "dat": - parameterfile = basename + ".dap" - labelfile = basename + ".rs3" - eventfile = basename + ".cef" - eventfile2 = basename + ".ceo" - elif extension == "cdt": - parameterfile = filepath + ".dpa" - parameterfile2 = filepath + ".dpo" - eventfile = filepath + ".cef" - eventfile2 = filepath + ".ceo" - hpifile = filepath + ".hpi" - else: - raise Exception("Unsupported extension, choose a cdt or dat file") - - log.info("Reading file %s ...", filename) - - # %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% - # open parameter file - - contents = [] - - try: - fid = open(parameterfile) - contents = fid.read() - except Exception as _: - log.debug("Could not open parameter file, trying alternative extension...") - - # open alternative parameter file - if not contents: - try: - fid = open(parameterfile2) - contents = fid.read() - except FileNotFoundError: - raise FileNotFoundError("Parameter file not found") - except Exception as _: - raise Exception("Could not open alternative parameter file") - - fid.close() - - if not contents: - raise Exception("Parameter file is empty") - - # check for compressed file format - ctok = "DataGuid" - ix = contents.find(ctok) - ixstart = contents.find("=", ix) + 1 - ixstop = contents.find("\n", ix) - - if ix != -1: - text = contents[ixstart:ixstop].strip() - if text == "{2912E8D8-F5C8-4E25-A8E7-A1385967DA09}": - raise Exception( - "Unsupported compressed data format, use Curry to convert file to raw " - "float format" - ) - - # read parameters from parameter file - # tokens (second line is for Curry 6 notation) - tok = [ - "NumSamples", - "NumChannels", - "NumTrials", - "SampleFreqHz", - "TriggerOffsetUsec", - "DataFormat", - "DataSampOrder", - "SampleTimeUsec", - "NUM_SAMPLES", - "NUM_CHANNELS", - "NUM_TRIALS", - "SAMPLE_FREQ_HZ", - "TRIGGER_OFFSET_USEC", - "DATA_FORMAT", - "DATA_SAMP_ORDER", - "SAMPLE_TIME_USEC", - ] - - # scan keywords - all keywords must exist! - nt = len(tok) - a = [0] * nt # initialize - for i in range(nt): - ctok = tok[i] - ix = contents.find(ctok) - ixstart = contents.find("=", ix) + 1 # skip = - ixstop = contents.find("\n", ix) - if ix != -1: - text = contents[ixstart:ixstop].strip() - if text == "ASCII" or text == "CHAN": # test for alphanumeric values - a[i] = 1 - elif text.isnumeric(): - a[i] = float(text) # assign if it was a number - - # derived variables. numbers (1) (2) etc are the token numbers - nSamples = int(a[0] + a[int(0 + nt / 2)]) - nChannels = int(a[1] + a[int(1 + nt / 2)]) - nTrials = int(a[2] + a[int(2 + nt / 2)]) - fFrequency = a[3] + a[int(3 + nt / 2)] - fOffsetUsec = a[4] + a[int(4 + nt / 2)] - nASCII = int(a[5] + a[int(5 + nt / 2)]) - nMultiplex = int(a[6] + a[int(6 + nt / 2)]) - fSampleTime = a[7] + a[int(7 + nt / 2)] - - datainfo = { - "samples": nSamples, - "channels": nChannels, - "trials": nTrials, - "samplingfreq": fFrequency, - } - log.info( - "Number of samples = %s, number of channels = %s, number of trials/epochs = %s," - " sampling frequency = %s Hz", - str(nSamples), - str(nChannels), - str(nTrials), - str(fFrequency), - ) - - if fFrequency == 0 or fSampleTime != 0: - fFrequency = 1000000 / fSampleTime - - # try to guess number of samples based on datafile size - if nSamples < 0: - if nASCII == 1: - raise Exception( - "Number of samples cannot be guessed from ASCII data file. " - "Use Curry to convert this file to Raw Float format" - ) - else: - log.warning( - "Number of samples not present in parameter file. It will be estimated " - "from size of data file" - ) - fileSize = os.path.getsize(filepath) - nSamples = fileSize / (4 * nChannels * nTrials) - - # %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% - # search for Impedance Values - tixstart = contents.find("IMPEDANCE_VALUES START_LIST") - tixstart = contents.find("\n", tixstart) - tixstop = contents.find("IMPEDANCE_VALUES END_LIST") - - impedancelist = [] - - if tixstart != -1 and tixstop != 1: - text = contents[tixstart : tixstop - 1].split() - for imp in text: - if int(imp) != -1: # skip? - impedancelist.append(float(imp)) - - # Curry records last 10 impedances - try: - impedancematrix = np.asarray(impedancelist, dtype=float).reshape( - int(len(impedancelist) / nChannels), nChannels - ) - except ValueError: - impedancematrix = np.empty((int(len(impedancelist) / nChannels), nChannels)) - - if impedancematrix.any(): - log.info("Found impedance matrix") - - # %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% - # open label file - if extension == "dat": - try: - fid = open(labelfile) - contents = fid.read() - fid.close() - except Exception as _: - log.warning("Found no label file") - - # read labels from label file - # initialize labels - labels = [""] * nChannels - - for i in range(nChannels): - labels[i] = "EEG" + str(i + 1) - - # scan for LABELS (occurs four times per channel group) - ix = findtokens("\nLABELS", contents) - nc = 0 - - if ix: - for i in range(3, len(ix), 4): # loop over channel groups - text = contents[ix[i - 1] : ix[i]] - text = text[text.find("\n", 1) :].split() - last = nChannels - nc - numLabels = min(last, len(text)) - for j in range(numLabels): # loop over labels - labels[nc] = text[j] - nc += 1 - log.info("Found channel labels") - else: - log.warning("Using dummy labels (EEG1, EEG2, ...)") - - # %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% - # search for landmarks - landmarks = [] - landmarkslabels = [] - - # scan for LANDMARKS (occurs four times per channel group) - ix = findtokens("\nLANDMARKS", contents) - nc = 0 - totallandmarks = 0 - numlandmarksgroup = [] # number of landmarks per group - - if ix: - for i in range( - 3, len(ix), 4 - ): # first pass over groups to find total of landmarks - text = contents[ix[i - 1] : ix[i]] - text = text[text.find("\n", 1) :].splitlines()[1:] - totallandmarks += len(text) - numlandmarksgroup.append(len(text)) - - lmpositions = np.zeros([totallandmarks, 3]) - for i in range(3, len(ix), 4): # loop over channel groups - text = contents[ix[i - 1] : ix[i]] - text = text[text.find("\n", 1) :].split() - last = totallandmarks - nc - numlandmarks = min(last, int(len(text) / 3)) - for j in range(0, numlandmarks * 3, 3): - lmpositions[nc][:] = np.array(text[j : j + 3]) - nc += 1 - - landmarks = lmpositions - log.info("Found landmarks") - - # landmark labels - ix = findtokens("\nLM_REMARKS", contents) - landmarkslabels = [""] * totallandmarks - startindex = 0 - count = 0 - - if ix and totallandmarks: - for i in range(3, len(ix), 4): # loop over channel groups - text = contents[ix[i - 1] : ix[i]] - text = text[text.find("\n", 1) :].splitlines()[1:] - landmarkslabels[startindex : startindex + len(text)] = text - startindex += numlandmarksgroup[count] - count += 1 - - ########################################################################## - # read sensor locations from label file - sensorpos = [] - - # scan for SENSORS (occurs four times per channel group) - ix = findtokens("\nSENSORS", contents) - nc = 0 - - if ix: - grouppospersensor = [] - maxpersensor = 0 - numchanswithpos = 0 - for i in range( - 3, len(ix), 4 - ): # first pass over groups to determine sensorpos and maxpersensor sizes - text = contents[ix[i - 1] : ix[i]] - text = text[text.find("\n", 1) :].splitlines()[1:] - numchanswithpos += len(text) - pospersensor = len(text[0].split()) - maxpersensor = max(pospersensor, maxpersensor) - grouppospersensor.append(pospersensor) - - if ( - (maxpersensor == 3 or maxpersensor == 6) - # 3 is (x,y,z) per sensor (EEG,MEG) - # 6 is (x,y,z,x1,y1,z1) per sensor (MEG) - and numchanswithpos > 0 - and numchanswithpos <= nChannels - ): - positions = np.zeros((numchanswithpos, maxpersensor)) - - for group, i in enumerate(range(3, len(ix), 4)): # loop over channel groups - text = contents[ix[i - 1] : ix[i]] - text = text[text.find("\n", 1) :].split() - last = nChannels - nc - pospersensor = grouppospersensor[group] - numchannels = min(last, int(len(text) / pospersensor)) - for j in range(0, numchannels * pospersensor, pospersensor): - positions[nc][:pospersensor] = np.array(text[j : j + pospersensor]) - nc += 1 - - sensorpos = positions - log.info("Found sensor positions") - else: - log.warning("Reading sensor positions failed (dimensions inconsistency)") - else: - log.warning("No sensor positions were found") - - # %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% - # search for epoch labels - epochlabelslist = [] - - if extension == "dat": - try: - fid = open(parameterfile) - contents = fid.read() - fid.close() - except Exception as _: - log.warning("Found no parameter file") - - ctok = "\nEPOCH_LABELS" - if ctok in contents: - tixstart = contents.find("EPOCH_LABELS START_LIST") - tixstart = contents.find("\n", tixstart) - tixstop = contents.find("EPOCH_LABELS END_LIST") - - if tixstart != -1 and tixstop != 1: - epochlabelslist = contents[tixstart : tixstop - 1].split() - - if epochlabelslist: - log.info("Found epoch labels") - - # %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% - # search for epoch information - tixstart = contents.find("EPOCH_INFORMATION START_LIST") - tixstart = contents.find("\n", tixstart) - tixstop = contents.find("EPOCH_INFORMATION END_LIST") - infoelements = 7 - epochinformation = [] - - if tixstart != -1 and tixstop != 1: - epochinformation = np.zeros((len(epochlabelslist), infoelements)) - text = contents[tixstart : tixstop - 1].split() - for i in range(0, len(text), infoelements): - for j in range(infoelements): - epochinformation[int(i / infoelements)][j] = int(text[i + j]) - - if epochinformation.any(): - log.info("Found epoch information") - - # %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% - # read events from event file - # initialize events - events = [] - annotations = [] - contents = [] - - try: - fid = open(eventfile) - contents = fid.read() - except Exception as _: - log.debug("Trying event file alternative extension...") - - # open alternative event file - if fid.closed: - try: - fid = open(eventfile2) - contents = fid.read() - except Exception as _: - log.debug("Found no event file") - - fid.close() - - if contents: - # scan for NUMBER_LIST (occurs five times) - tixstart = contents.find("NUMBER_LIST START_LIST") - tixstart = contents.find("\n", tixstart) - tixstop = contents.find("NUMBER_LIST END_LIST") - numberelements = 11 - numbereventprops = 4 - - text = contents[tixstart : tixstop - 1].split() - events = np.zeros((0, numbereventprops)) - - for i in range(0, len(text), numberelements): - sample = int(text[i]) - etype = int(text[i + 2]) - startsample = int(text[i + 4]) - endsample = int(text[i + 5]) - newevent = np.array([sample, etype, startsample, endsample]) - events = np.vstack([events, newevent]) # concat new event in events matrix - - # scan for REMARK_LIST (occurs five times) - tixstart = contents.find("REMARK_LIST START_LIST") - tixstart = contents.find("\n", tixstart) - tixstop = contents.find("REMARK_LIST END_LIST") - - if tixstart != -1 and tixstop != 1: - annotations = contents[tixstart : tixstop - 1].splitlines() - - log.info("Found events") - - # %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% - # read HPI coils (only Orion-MEG) if present - - hpimatrix = [] - contents = [] - - try: - fid = open(hpifile) - contents = fid.read() - except Exception as _: - log.debug("Found no HPI file") - - fid.close() - - if contents: - # get file version and number of coils - tixstart = contents.find("FileVersion") - tixstop = contents.find("\n", tixstart) - text = contents[tixstart:tixstop].split() - # hpifileversion = text[1] - - tixstart = contents.find("NumCoils") - tixstop = contents.find("\n", tixstart) - text = contents[tixstart:tixstop].split() - # numberofcoils = text[1] - - hpimatrix = np.loadtxt(hpifile, dtype=np.float32, skiprows=3) - log.info("Found HPI matrix") - - # %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% - # % read data file - - data = [] - - try: - itemstoread = nSamples * nTrials * nChannels - if nASCII == 1: - data = np.fromfile( - filepath, dtype=np.float32, count=itemstoread, sep=" " - ).reshape(nSamples * nTrials, nChannels) - else: - data = np.fromfile( - filepath, - dtype=np.float32, - count=itemstoread, - ).reshape(nSamples * nTrials, nChannels) - except FileNotFoundError: - raise FileNotFoundError("Data file not found") - except Exception as _: - raise Exception("Could not open data file") - - if nSamples * nTrials != data.shape[0]: - log.warning( - "Inconsistent number of samples. File may be displayed incompletely" - ) - nSamples = data.shape[0] / nTrials - - # transpose? - if nMultiplex == 1: - data = data.transpose() - - if plotdata > 0 and data.any(): - time = np.linspace( - fOffsetUsec / 1000, - fOffsetUsec / 1000 + (nSamples * nTrials - 1) * 1000 / fFrequency, - nSamples * nTrials, - dtype=np.float32, - ) - # avoid logging output from matplotlib - log.getLogger("matplotlib.font_manager").disabled = True - # stacked plot - amprange = max(abs(data.min()), abs(data.max())) - shift = np.linspace( - (nChannels - 1) * amprange * 0.3, 0, nChannels, dtype=np.float32 - ) - data += np.tile(shift, (nSamples * nTrials, 1)) - fig, ax = plt.subplots() - ax.plot(time, data) - ax.set_yticks(shift) - ax.set_yticklabels(labels) - ax.set_xlabel("Time [ms]") - ax.set_title(filename) - log.info("Found data file") - if plotdata == 1: - plt.show() - elif plotdata > 1: - plt.show(block=False) - plt.pause(plotdata) - plt.close() - else: - log.warning("Invalid plotdata input: please see description in help") - - # assemble output dict - output = { - "data": data, - "info": datainfo, - "labels": labels, - "sensorpos": sensorpos, - "events": events, - "annotations": annotations, - "epochinfo": epochinformation, - "epochlabels": epochlabelslist, - "impedances": impedancematrix, - "landmarks": landmarks, - "landmarkslabels": landmarkslabels, - "hpimatrix": hpimatrix, - } - - return output - - -def findtokens(token, contents): - """Findtoken. - - Returns indices of token occurrences in input string contents. - """ - if not token or not contents: - raise Exception("Invalid input for finding token") - - tokenindices = [] - index = 0 - while index < len(contents): - index = contents.find(token, index) - if index == -1: - break - tokenindices.append(index) - index += len(token) - return tokenindices From 63c4da3abebe13f27425b1d5e77b66e481bad85a Mon Sep 17 00:00:00 2001 From: dominikwelke Date: Fri, 11 Apr 2025 11:21:04 +0100 Subject: [PATCH 04/27] add dependencies --- azure-pipelines.yml | 2 +- environment.yml | 1 + pyproject.toml | 1 + tools/install_pre_requirements.sh | 2 +- 4 files changed, 4 insertions(+), 2 deletions(-) diff --git a/azure-pipelines.yml b/azure-pipelines.yml index da85c3c729b..c4149914775 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -113,7 +113,7 @@ stages: - bash: | set -e python -m pip install --progress-bar off --upgrade pip - python -m pip install --progress-bar off "mne-qt-browser[opengl] @ git+https://github.com/mne-tools/mne-qt-browser.git" pyvista scikit-learn python-picard qtpy nibabel sphinx-gallery "PySide6!=6.8.0,!=6.8.0.1,!=6.8.1.1" pandas neo pymatreader antio defusedxml + python -m pip install --progress-bar off "mne-qt-browser[opengl] @ git+https://github.com/mne-tools/mne-qt-browser.git" pyvista scikit-learn python-picard qtpy nibabel sphinx-gallery "PySide6!=6.8.0,!=6.8.0.1,!=6.8.1.1" pandas neo pymatreader antio defusedxml curryreader python -m pip uninstall -yq mne python -m pip install --progress-bar off --upgrade -e .[test] displayName: 'Install dependencies with pip' diff --git a/environment.yml b/environment.yml index 78c773e56bf..74e525dd1f7 100644 --- a/environment.yml +++ b/environment.yml @@ -5,6 +5,7 @@ channels: dependencies: - python >=3.10 - antio >=0.5.0 + - curryreader >=0.0.1 - darkdetect - decorator - defusedxml diff --git a/pyproject.toml b/pyproject.toml index 59e5a1703e3..25c2ea213b9 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -88,6 +88,7 @@ full = ["mne[full-no-qt]", "PyQt6 != 6.6.0", "PyQt6-Qt6 != 6.6.0, != 6.7.0"] # and mne[full-pyside6], which will install PySide6 instead of PyQt6. full-no-qt = [ "antio >= 0.5.0", + "curryreader >= 0.0.1", "darkdetect", "defusedxml", "dipy", diff --git a/tools/install_pre_requirements.sh b/tools/install_pre_requirements.sh index a87ef5d9a68..ad6bbb51d56 100755 --- a/tools/install_pre_requirements.sh +++ b/tools/install_pre_requirements.sh @@ -60,7 +60,7 @@ echo "pyvistaqt" pip install $STD_ARGS git+https://github.com/pyvista/pyvistaqt echo "imageio-ffmpeg, xlrd, mffpy" -pip install $STD_ARGS imageio-ffmpeg xlrd mffpy traitlets pybv eeglabio defusedxml antio +pip install $STD_ARGS imageio-ffmpeg xlrd mffpy traitlets pybv eeglabio defusedxml antio curryreader echo "mne-qt-browser" pip install $STD_ARGS git+https://github.com/mne-tools/mne-qt-browser From a8286d8c4fb8bb90be57c010e3c08b59bed124f6 Mon Sep 17 00:00:00 2001 From: dominikwelke Date: Fri, 11 Apr 2025 18:21:46 +0100 Subject: [PATCH 05/27] add preload option --- mne/io/curry/curry.py | 85 +++++++++++++++++++++++++++++++++++-------- 1 file changed, 69 insertions(+), 16 deletions(-) diff --git a/mne/io/curry/curry.py b/mne/io/curry/curry.py index 0bc7db068cf..ed2cc886f84 100644 --- a/mne/io/curry/curry.py +++ b/mne/io/curry/curry.py @@ -10,9 +10,10 @@ import numpy as np from ..._fiff.meas_info import create_info +from ..._fiff.utils import _mult_cal_one, _read_segments_file from ...annotations import annotations_from_events from ...channels import make_dig_montage -from ...utils import verbose +from ...utils import verbose, warn from ..base import BaseRaw @@ -59,7 +60,7 @@ class RawCurry(BaseRaw): """ @verbose - def __init__(self, fname, preload=True, verbose=None): + def __init__(self, fname, preload=False, verbose=None): fname = Path(fname) # use curry-python-reader @@ -70,7 +71,15 @@ def __init__(self, fname, preload=True, verbose=None): # extract info sfreq = currydata["info"]["samplingfreq"] - n_samples = currydata["info"]["samples"] + if currydata["info"]["samples"] == currydata["data"].shape[0]: + n_samples = currydata["info"]["samples"] + else: + n_samples = currydata["data"].shape[0] + warn( + "sample count from header doesn't match actual data! " + "file corrupted? will use data shape" + ) + n_ch = currydata["info"]["channels"] ch_names = currydata["labels"] ch_pos = currydata["sensorpos"] @@ -80,17 +89,20 @@ def __init__(self, fname, preload=True, verbose=None): # extract data orig_format = "single" # curryreader.py always reads float32. is this correct? - preload = currydata["data"].T.astype( - "float64" - ) # curryreader returns float32, but mne seems to need float64 + if isinstance(preload, bool | np.bool_) and preload: + preload = currydata["data"].T.astype( + "float64" + ) # curryreader returns float32, but mne seems to need float64 events = currydata["events"] # annotations = currydata[ # "annotations" # ] # dont always seem to correspond to events?! # impedances = currydata["impedances"] # see read_impedances_curry # epochinfo = currydata["epochinfo"] # TODO - # epochlabels = currydata["epochlabels"] # TODO - # hpimatrix = currydata["hpimatrix"] # TODO + epochlabels = currydata["epochlabels"] # TODO + if epochlabels != []: + warn("epoched recording detected; WIP") + hpimatrix = currydata["hpimatrix"] # TODO # extract other essential info not provided by curryreader fname_hdr = None @@ -100,6 +112,7 @@ def __init__(self, fname, preload=True, verbose=None): ch_types, units = [], [] if fname_hdr: + # read channel types ch_groups = fname_hdr.read_text().split("DEVICE_PARAMETERS")[1::2] for ch_group in ch_groups: ch_group = re.compile(r"\s+").sub(" ", ch_group).strip() @@ -116,9 +129,14 @@ def __init__(self, fname, preload=True, verbose=None): # combine info ch_types += [ch_type] * n_ch_group units += [unit] * n_ch_group - assert len(ch_types) == len(units) == len(ch_names) == n_ch + # read datatype + byteorder = ( + fname_hdr.read_text().split("DataByteOrder")[1].strip().split(" ")[1] + ) + is_ascii = "ASCII" in byteorder + else: raise NotImplementedError @@ -127,17 +145,23 @@ def __init__(self, fname, preload=True, verbose=None): # scale data to SI units orig_units = dict(zip(ch_names, units)) - for i_ch, unit in enumerate(units): - if unit == "fT": # femtoTesla - preload[i_ch, :] /= 1e15 - elif unit == "uV": # microVolt - preload[i_ch, :] /= 1e6 - else: # leave as is - pass + cals = [ + 1.0 / 1e15 if (u == "fT") else 1.0 / 1e6 if (u == "uV") else 1.0 + for u in units + ] + if isinstance(preload, np.ndarray): + for i_ch, unit in enumerate(units): + if unit == "fT": # femtoTesla + preload[i_ch, :] /= 1e15 + elif unit == "uV": # microVolt + preload[i_ch, :] /= 1e6 + else: # leave as is + pass # construct info info = create_info(ch_names=ch_names, sfreq=sfreq, ch_types=ch_types) last_samps = [n_samples - 1] + raw_extras = dict(is_ascii=is_ascii) # create raw object super().__init__( @@ -146,9 +170,11 @@ def __init__(self, fname, preload=True, verbose=None): filenames=[fname], last_samps=last_samps, orig_format=orig_format, + raw_extras=[raw_extras], orig_units=orig_units, verbose=verbose, ) + self._cals = np.array(cals) # set events / annotations # format from curryreader: sample, etype, startsample, endsample @@ -166,6 +192,33 @@ def __init__(self, fname, preload=True, verbose=None): # add HPI data (if present) # TODO + if not isinstance(hpimatrix, list): + warn("HPI data found, but reader not implemented.") + + def _rescale_curry_data(self): + orig_units = self._orig_units + for i_ch, unit in enumerate(orig_units): + if unit == "fT": # femtoTesla + self._data[i_ch, :] /= 1e15 + elif unit == "µV": # microVolt + self._data[i_ch, :] /= 1e6 + else: # leave as is + pass + + def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): + """Read a chunk of raw data.""" + if self._raw_extras[fi]["is_ascii"]: + if isinstance(idx, slice): + idx = np.arange(idx.start, idx.stop) + block = np.loadtxt( + self.filenames[0], skiprows=start, max_rows=stop - start, ndmin=2 + ).T + _mult_cal_one(data, block, idx, cals, mult) + + else: + _read_segments_file( + self, data, idx, fi, start, stop, cals, mult, dtype=" Date: Mon, 14 Apr 2025 12:26:59 +0200 Subject: [PATCH 06/27] [ci skip] handle supply of wrong file extensions --- mne/io/curry/curry.py | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/mne/io/curry/curry.py b/mne/io/curry/curry.py index ed2cc886f84..6f11c552ffc 100644 --- a/mne/io/curry/curry.py +++ b/mne/io/curry/curry.py @@ -16,6 +16,9 @@ from ...utils import verbose, warn from ..base import BaseRaw +CURRY_SUFFIX_DATA = [".cdt", ".dat"] +CURRY_SUFFIX_HDR = [".cdt.dpa", ".cdt.dpo", ".dap"] + @verbose def read_raw_curry(fname, preload=False, verbose=None) -> "RawCurry": @@ -61,7 +64,17 @@ class RawCurry(BaseRaw): @verbose def __init__(self, fname, preload=False, verbose=None): - fname = Path(fname) + fname_in = Path(fname) + fname = None + if fname_in.suffix in CURRY_SUFFIX_DATA: + fname = fname_in + else: + for data_suff in CURRY_SUFFIX_DATA: + if fname_in.with_suffix(data_suff).exists(): + fname = fname_in.with_suffix(data_suff) + break + if not fname: + raise FileNotFoundError("no curry data file found (.dat or .cdt)") # use curry-python-reader try: @@ -106,7 +119,7 @@ def __init__(self, fname, preload=False, verbose=None): # extract other essential info not provided by curryreader fname_hdr = None - for hdr_suff in [".cdt.dpa", ".cdt.dpo", ".dap"]: + for hdr_suff in CURRY_SUFFIX_HDR: if fname.with_suffix(hdr_suff).exists(): fname_hdr = fname.with_suffix(hdr_suff) From 4cd41d651d05fd86b5dcc2dd8d51baf8cb48fb0a Mon Sep 17 00:00:00 2001 From: dominikwelke Date: Wed, 23 Apr 2025 13:46:49 +0200 Subject: [PATCH 07/27] refactoring --- mne/io/curry/curry.py | 495 +++++++++++++++++++++++++++++------------- 1 file changed, 339 insertions(+), 156 deletions(-) diff --git a/mne/io/curry/curry.py b/mne/io/curry/curry.py index 6f11c552ffc..0f8f8a4a23b 100644 --- a/mne/io/curry/curry.py +++ b/mne/io/curry/curry.py @@ -4,15 +4,19 @@ # Copyright the MNE-Python contributors. import re +from datetime import datetime, timezone from pathlib import Path import curryreader import numpy as np +import pandas as pd +# from ..._fiff._digitization import _make_dig_points from ..._fiff.meas_info import create_info from ..._fiff.utils import _mult_cal_one, _read_segments_file from ...annotations import annotations_from_events from ...channels import make_dig_montage +from ...epochs import Epochs from ...utils import verbose, warn from ..base import BaseRaw @@ -20,8 +24,280 @@ CURRY_SUFFIX_HDR = [".cdt.dpa", ".cdt.dpo", ".dap"] +def _check_curry_filename(fname): + fname_in = Path(fname) + fname_out = None + # try suffixes + if fname_in.suffix in CURRY_SUFFIX_DATA: + fname_out = fname_in + else: + for data_suff in CURRY_SUFFIX_DATA: + if fname_in.with_suffix(data_suff).exists(): + fname_out = fname_in.with_suffix(data_suff) + break + # final check + if not fname_out or not fname_out.exists(): + raise FileNotFoundError("no curry data file found (.dat or .cdt)") + return fname_out + + +def _check_curry_header_filename(fname): + fname_hdr = None + # try suffixes + for hdr_suff in CURRY_SUFFIX_HDR: + if fname.with_suffix(hdr_suff).exists(): + fname_hdr = fname.with_suffix(hdr_suff) + break + # final check + if not fname_hdr or not fname.exists(): + raise FileNotFoundError( + f"no corresponding header file found {CURRY_SUFFIX_HDR}" + ) + return fname_hdr + + +def _get_curry_recording_type(fname): + epochinfo = curryreader.read(str(fname), plotdata=0, verbosity=1)["epochinfo"] + if epochinfo.size == 0: + return "raw" + else: + n_average = epochinfo[:, 0] + if (n_average == 1).all(): + return "epochs" + else: + return "evoked" + + +def _get_curry_epoch_info(fname): + # use curry-python-reader + currydata = curryreader.read(str(fname), plotdata=0, verbosity=1) + + # get epoch info + sfreq = currydata["info"]["samplingfreq"] + n_samples = currydata["info"]["samples"] + n_epochs = len(currydata["epochlabels"]) + epochinfo = currydata["epochinfo"] + epochtypes = epochinfo[:, 2].astype(int).tolist() + epochlabels = currydata["epochlabels"] + epochmetainfo = pd.DataFrame( + epochinfo[:, -4:], columns=["accept", "correct", "response", "response time"] + ) + # create mne events + events = np.array( + [[i * n_samples for i in range(n_epochs)], [0] * n_epochs, epochtypes] + ).T + event_id = dict(zip(epochlabels, epochtypes)) + return dict( + events=events, + event_id=event_id, + tmin=0.0, + tmax=(n_samples - 1) / sfreq, + baseline=(0, 0), + metadata=epochmetainfo, + reject_by_annotation=False, + ) + + +def _extract_curry_info(fname, preload): + # use curry-python-reader + currydata = curryreader.read(str(fname), plotdata=0, verbosity=1) + + # basic info + sfreq = currydata["info"]["samplingfreq"] + n_samples = currydata["info"]["samples"] + if n_samples != currydata["data"].shape[0]: # normal in epoched data + n_samples = currydata["data"].shape[0] + if _get_curry_recording_type(fname) == "raw": + warn( + "sample count from header doesn't match actual data! " + "file corrupted? will use data shape" + ) + + # channel information + n_ch = currydata["info"]["channels"] + ch_names = currydata["labels"] + ch_pos = currydata["sensorpos"] + landmarks = currydata["landmarks"] + landmarkslabels = currydata["landmarkslabels"] + hpimatrix = currydata["hpimatrix"] + + # data + orig_format = "single" # curryreader.py always reads float32. is this correct? + if isinstance(preload, bool | np.bool_) and preload: + preload = currydata["data"].T.astype( + "float64" + ) # curryreader returns float32, but mne seems to need float64 + + # events + events = currydata["events"] + # annotations = currydata[ + # "annotations" + # ] # TODO these dont really seem to correspond to events! what is it? + + # impedance measurements + # moved to standalone def; see read_impedances_curry + # impedances = currydata["impedances"] + + # get other essential info not provided by curryreader + fname_hdr = _check_curry_header_filename(fname) + content_hdr = fname_hdr.read_text() + + # read meas_date + meas_date = [ + int(re.compile(rf"{v}\s*=\s*-?\d+").search(content_hdr).group(0).split()[-1]) + for v in [ + "StartYear", + "StartMonth", + "StartDay", + "StartHour", + "StartMin", + "StartSec", + "StartMillisec", + ] + ] + try: + meas_date = datetime( + *meas_date[:-1], + meas_date[-1] * 1000, # -> microseconds + timezone.utc, + ) + except Exception: + meas_date = None + + print(f"meas_date: {meas_date}") + + # read datatype + byteorder = ( + re.compile(r"DataByteOrder\s*=\s*[A-Z]+") + .search(content_hdr) + .group() + .split()[-1] + ) + is_ascii = byteorder == "ASCII" + + # amp info + # TODO + # amp_info = ( + # re.compile(r"AmplifierInfo\s*=.*\n").search(content_hdr).group().split("= ") + # ) + + # channel types and units + ch_types, units = [], [] + ch_groups = fname_hdr.read_text().split("DEVICE_PARAMETERS")[1::2] + for ch_group in ch_groups: + ch_group = re.compile(r"\s+").sub(" ", ch_group).strip() + groupid = ch_group.split()[0] + unit = ch_group.split("DataUnit = ")[1].split()[0] + n_ch_group = int(ch_group.split("NumChanThisGroup = ")[1].split()[0]) + ch_type = ( + "mag" if ("MAG" in groupid) else "misc" if ("OTHER" in groupid) else "eeg" + ) + # combine info + ch_types += [ch_type] * n_ch_group + units += [unit] * n_ch_group + assert len(ch_types) == len(units) == len(ch_names) == n_ch + + # finetune channel types (e.g. stim, eog etc might be identified by name) + # TODO? + + # scale data to SI units + orig_units = dict(zip(ch_names, units)) + cals = [ + 1.0 / 1e15 if (u == "fT") else 1.0 / 1e6 if (u == "uV") else 1.0 for u in units + ] + # if isinstance(preload, np.ndarray): + # for i_ch, unit in enumerate(units): + # if unit == "fT": # femtoTesla + # preload[i_ch, :] /= 1e15 + # elif unit == "uV": # microVolt + # preload[i_ch, :] /= 1e6 + # else: # leave as is + # pass + + return ( + sfreq, + n_samples, + ch_names, + ch_types, + ch_pos, + landmarks, + landmarkslabels, + hpimatrix, + events, + orig_format, + orig_units, + is_ascii, + cals, + preload, + meas_date, + ) + + +def _make_curry_montage(ch_names, ch_types, ch_pos, landmarks, landmarkslabels): + # scale ch_pos to m?! + ch_pos /= 1000.0 + # channel locations + # only take inner coil for MEG (ch_pos[i,:3]) + # TODO what about misc without pos? can they mess things up if unordered? + assert len(ch_pos) >= (ch_types.count("mag") + ch_types.count("eeg")) + ch_pos_meg = { + ch_names[i]: ch_pos[i, :3] for i, t in enumerate(ch_types) if t == "mag" + } + ch_pos_eeg = { + ch_names[i]: ch_pos[i, :3] for i, t in enumerate(ch_types) if t == "eeg" + } + # landmarks and headshape + landmark_dict = dict(zip(landmarkslabels, landmarks)) + for k in ["Nas", "RPA", "LPA"]: + if k not in landmark_dict.keys(): + landmark_dict[k] = None + if len(landmarkslabels) > 0: + hpi_pos = landmarks[ + [i for i, n in enumerate(landmarkslabels) if re.match("HPI[1-99]", n)], : + ] + else: + hpi_pos = None + if len(landmarkslabels) > 0: + hsp_pos = landmarks[ + [i for i, n in enumerate(landmarkslabels) if re.match("H[1-99]", n)], : + ] + else: + hsp_pos = None + # make dig montage for eeg + mont = None + if ch_pos.shape[1] in [3, 6]: # eeg xyz space + mont = make_dig_montage( + ch_pos=ch_pos_eeg, + nasion=landmark_dict["Nas"], + lpa=landmark_dict["LPA"], + rpa=landmark_dict["RPA"], + hsp=hsp_pos, + hpi=hpi_pos, + coord_frame="unknown", + ) + # dig = _make_dig_points( + # nasion=landmark_dict["Nas"], + # lpa=landmark_dict["LPA"], + # rpa=landmark_dict["RPA"], + # hpi=hpi_pos, + # extra_points=hsp_pos, + # dig_ch_pos=ch_pos_eeg, + # coord_frame="unknown", + # ) + else: # not recorded? + pass + + # collect pos for meg + if ch_pos_meg != dict(): + warn("reading MEG sensor locations not yet implemented!") + + return mont + + @verbose -def read_raw_curry(fname, preload=False, verbose=None) -> "RawCurry": +def read_raw_curry( + fname, import_epochs_as_events=False, preload=False, verbose=None +) -> "RawCurry": """Read raw data from Curry files. Parameters @@ -29,6 +305,9 @@ def read_raw_curry(fname, preload=False, verbose=None) -> "RawCurry": fname : path-like Path to a curry file with extensions ``.dat``, ``.dap``, ``.rs3``, ``.cdt``, ``.cdt.dpa``, ``.cdt.cef`` or ``.cef``. + import_epochs_as_events : bool + Set to ``True`` if you want to import epoched recordings as continuous ``raw`` + object with event annotations. Only do this if you know your data allows this! %(preload)s %(verbose)s @@ -42,7 +321,26 @@ def read_raw_curry(fname, preload=False, verbose=None) -> "RawCurry": -------- mne.io.Raw : Documentation of attributes and methods of RawCurry. """ - return RawCurry(fname, preload, verbose) + fname = _check_curry_filename(fname) + rectype = _get_curry_recording_type(fname) + + inst = RawCurry(fname, preload, verbose) + if rectype in ["epochs", "evoked"]: + curry_epoch_info = _get_curry_epoch_info(fname) + if import_epochs_as_events: + epoch_annotations = annotations_from_events( + events=curry_epoch_info["events"], + event_desc={v: k for k, v in curry_epoch_info["event_id"].items()}, + sfreq=inst.info["sfreq"], + ) + inst.set_annotations(inst.annotations + epoch_annotations) + else: + inst = Epochs( + inst, **curry_epoch_info + ) # TODO seems to rejects flat channel + if rectype == "evoked": + raise NotImplementedError + return inst class RawCurry(BaseRaw): @@ -64,119 +362,32 @@ class RawCurry(BaseRaw): @verbose def __init__(self, fname, preload=False, verbose=None): - fname_in = Path(fname) - fname = None - if fname_in.suffix in CURRY_SUFFIX_DATA: - fname = fname_in - else: - for data_suff in CURRY_SUFFIX_DATA: - if fname_in.with_suffix(data_suff).exists(): - fname = fname_in.with_suffix(data_suff) - break - if not fname: - raise FileNotFoundError("no curry data file found (.dat or .cdt)") - - # use curry-python-reader - try: - currydata = curryreader.read(str(fname), plotdata=0, verbosity=1) - except Exception as e: - raise ValueError(f"file could not be read - {e}") - - # extract info - sfreq = currydata["info"]["samplingfreq"] - if currydata["info"]["samples"] == currydata["data"].shape[0]: - n_samples = currydata["info"]["samples"] - else: - n_samples = currydata["data"].shape[0] - warn( - "sample count from header doesn't match actual data! " - "file corrupted? will use data shape" - ) - - n_ch = currydata["info"]["channels"] - ch_names = currydata["labels"] - ch_pos = currydata["sensorpos"] - landmarks = currydata["landmarks"] - landmarkslabels = currydata["landmarkslabels"] - - # extract data - orig_format = "single" # curryreader.py always reads float32. is this correct? - - if isinstance(preload, bool | np.bool_) and preload: - preload = currydata["data"].T.astype( - "float64" - ) # curryreader returns float32, but mne seems to need float64 - events = currydata["events"] - # annotations = currydata[ - # "annotations" - # ] # dont always seem to correspond to events?! - # impedances = currydata["impedances"] # see read_impedances_curry - # epochinfo = currydata["epochinfo"] # TODO - epochlabels = currydata["epochlabels"] # TODO - if epochlabels != []: - warn("epoched recording detected; WIP") - hpimatrix = currydata["hpimatrix"] # TODO - - # extract other essential info not provided by curryreader - fname_hdr = None - for hdr_suff in CURRY_SUFFIX_HDR: - if fname.with_suffix(hdr_suff).exists(): - fname_hdr = fname.with_suffix(hdr_suff) - - ch_types, units = [], [] - if fname_hdr: - # read channel types - ch_groups = fname_hdr.read_text().split("DEVICE_PARAMETERS")[1::2] - for ch_group in ch_groups: - ch_group = re.compile(r"\s+").sub(" ", ch_group).strip() - groupid = ch_group.split()[0] - unit = ch_group.split("DataUnit = ")[1].split()[0] - n_ch_group = int(ch_group.split("NumChanThisGroup = ")[1].split()[0]) - ch_type = ( - "mag" - if ("MAG" in groupid) - else "misc" - if ("OTHER" in groupid) - else "eeg" - ) - # combine info - ch_types += [ch_type] * n_ch_group - units += [unit] * n_ch_group - assert len(ch_types) == len(units) == len(ch_names) == n_ch - - # read datatype - byteorder = ( - fname_hdr.read_text().split("DataByteOrder")[1].strip().split(" ")[1] - ) - is_ascii = "ASCII" in byteorder - - else: - raise NotImplementedError - - # finetune channel types (e.g. stim, eog etc might be identified by name) - # TODO? - - # scale data to SI units - orig_units = dict(zip(ch_names, units)) - cals = [ - 1.0 / 1e15 if (u == "fT") else 1.0 / 1e6 if (u == "uV") else 1.0 - for u in units - ] - if isinstance(preload, np.ndarray): - for i_ch, unit in enumerate(units): - if unit == "fT": # femtoTesla - preload[i_ch, :] /= 1e15 - elif unit == "uV": # microVolt - preload[i_ch, :] /= 1e6 - else: # leave as is - pass + fname = _check_curry_filename(fname) + + ( + sfreq, + n_samples, + ch_names, + ch_types, + ch_pos, + landmarks, + landmarkslabels, + hpimatrix, + events, + orig_format, + orig_units, + is_ascii, + cals, + preload, + meas_date, + ) = _extract_curry_info(fname, preload) # construct info info = create_info(ch_names=ch_names, sfreq=sfreq, ch_types=ch_types) - last_samps = [n_samples - 1] - raw_extras = dict(is_ascii=is_ascii) # create raw object + last_samps = [n_samples - 1] + raw_extras = dict(is_ascii=is_ascii) super().__init__( info, preload, @@ -187,7 +398,14 @@ def __init__(self, fname, preload=False, verbose=None): orig_units=orig_units, verbose=verbose, ) + + # set meas_date + self.set_meas_date(meas_date) + + # scale data to SI units self._cals = np.array(cals) + if isinstance(preload, np.ndarray): + self._rescale_curry_data() # set events / annotations # format from curryreader: sample, etype, startsample, endsample @@ -200,8 +418,12 @@ def __init__(self, fname, preload=False, verbose=None): self.set_annotations(annot) # make montage - mont = _make_curry_montage(ch_names, ch_pos, landmarks, landmarkslabels) + mont = _make_curry_montage( + ch_names, ch_types, ch_pos, landmarks, landmarkslabels + ) self.set_montage(mont, on_missing="ignore") + # with self.info._unlock(): + # self.info['dig'] = mont.dig # add HPI data (if present) # TODO @@ -210,7 +432,7 @@ def __init__(self, fname, preload=False, verbose=None): def _rescale_curry_data(self): orig_units = self._orig_units - for i_ch, unit in enumerate(orig_units): + for i_ch, unit in enumerate(orig_units.values()): if unit == "fT": # femtoTesla self._data[i_ch, :] /= 1e15 elif unit == "µV": # microVolt @@ -234,46 +456,8 @@ def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): ) -def _make_curry_montage(ch_names, ch_pos, landmarks, landmarkslabels): - ch_pos_dict = dict(zip(ch_names, ch_pos)) - landmark_dict = dict(zip(landmarkslabels, landmarks)) - for k in ["Nas", "RPA", "LPA"]: - if k not in landmark_dict.keys(): - landmark_dict[k] = None - if len(landmarkslabels) > 0: - hpi_pos = landmarks[ - [i for i, n in enumerate(landmarkslabels) if re.match("HPI[1-99]", n)], : - ] - else: - hpi_pos = None - if len(landmarkslabels) > 0: - hsp_pos = landmarks[ - [i for i, n in enumerate(landmarkslabels) if re.match("H[1-99]", n)], : - ] - else: - hsp_pos = None - - mont = None - if ch_pos.shape[1] == 3: # eeg xyz space - mont = make_dig_montage( - ch_pos=ch_pos_dict, - nasion=landmark_dict["Nas"], - lpa=landmark_dict["LPA"], - rpa=landmark_dict["RPA"], - hsp=hsp_pos, - hpi=hpi_pos, - coord_frame="unknown", - ) - elif ch_pos.shape[1] == 6: # meg? - # TODO - pass - else: # not recorded? - pass - - return mont - - -def read_impedances_curry(fname): +@verbose +def read_impedances_curry(fname, verbose=None): """Read impedance measurements from Curry files. Parameters @@ -281,6 +465,7 @@ def read_impedances_curry(fname): fname : path-like Path to a curry file with extensions ``.dat``, ``.dap``, ``.rs3``, ``.cdt``, ``.cdt.dpa``, ``.cdt.cef`` or ``.cef``. + %(verbose)s Returns ------- @@ -291,10 +476,8 @@ def read_impedances_curry(fname): """ # use curry-python-reader to load data - try: - currydata = curryreader.read(str(fname), plotdata=0, verbosity=1) - except Exception as e: - raise ValueError(f"file could not be read - {e}") + fname = _check_curry_filename(fname) + currydata = curryreader.read(str(fname), plotdata=0, verbosity=1) impedances = currydata["impedances"] ch_names = currydata["labels"] From d338280b3a04f3de8fb3021c0409514defd92732 Mon Sep 17 00:00:00 2001 From: dominikwelke Date: Fri, 9 May 2025 18:30:46 +0100 Subject: [PATCH 08/27] add high level unit test --- mne/io/curry/tests/test_curry.py | 567 ++----------------------------- 1 file changed, 35 insertions(+), 532 deletions(-) diff --git a/mne/io/curry/tests/test_curry.py b/mne/io/curry/tests/test_curry.py index 79de0088e8f..7a2beebcec2 100644 --- a/mne/io/curry/tests/test_curry.py +++ b/mne/io/curry/tests/test_curry.py @@ -3,31 +3,15 @@ # License: BSD-3-Clause # Copyright the MNE-Python contributors. -from datetime import datetime, timezone -from pathlib import Path -from shutil import copyfile -import numpy as np import pytest +from numpy import empty from numpy.testing import assert_allclose, assert_array_equal -from mne._fiff.constants import FIFF -from mne._fiff.tag import _loc_to_coil_trans -from mne.annotations import events_from_annotations, read_annotations -from mne.bem import _fit_sphere from mne.datasets import testing -from mne.event import find_events -from mne.io.bti import read_raw_bti -from mne.io.curry import read_raw_curry -from mne.io.curry.curry import ( - FILE_EXTENSIONS, - _get_curry_file_structure, - _get_curry_version, - _read_events_curry, -) +from mne.io.curry import read_impedances_curry, read_raw_curry from mne.io.edf import read_raw_bdf from mne.io.tests.test_raw import _test_raw_reader -from mne.utils import catch_logging data_dir = testing.data_path(download=False) curry_dir = data_dir / "curry" @@ -59,32 +43,37 @@ def bdf_curry_ref(): pytest.param(curry8_bdf_file, 1e-7, id="curry 8"), pytest.param(curry7_bdf_ascii_file, 1e-4, id="curry 7 ascii"), pytest.param(curry8_bdf_ascii_file, 1e-4, id="curry 8 ascii"), + pytest.param(curry7_bdf_ascii_file, 1e-7, id="curry 7 ascii"), + pytest.param(curry8_bdf_ascii_file, 1e-7, id="curry 8 ascii"), ], ) @pytest.mark.parametrize("preload", [True, False]) def test_read_raw_curry(fname, tol, preload, bdf_curry_ref): """Test reading CURRY files.""" - raw = read_raw_curry(fname, preload=preload) + with pytest.raises(RuntimeWarning): # TODO change way to add montage in curry.py! + raw = read_raw_curry(fname, preload=preload) - assert hasattr(raw, "_data") == preload - assert raw.n_times == bdf_curry_ref.n_times - assert raw.info["sfreq"] == bdf_curry_ref.info["sfreq"] + assert hasattr(raw, "_data") == preload + assert raw.n_times == bdf_curry_ref.n_times + assert raw.info["sfreq"] == bdf_curry_ref.info["sfreq"] - for field in ["kind", "ch_name"]: - assert_array_equal( - [ch[field] for ch in raw.info["chs"]], - [ch[field] for ch in bdf_curry_ref.info["chs"]], - ) + for field in ["kind", "ch_name"]: + assert_array_equal( + [ch[field] for ch in raw.info["chs"]], + [ch[field] for ch in bdf_curry_ref.info["chs"]], + ) - assert_allclose(raw.get_data(verbose="error"), bdf_curry_ref.get_data(), atol=tol) + assert_allclose( + raw.get_data(verbose="error"), bdf_curry_ref.get_data(), atol=tol + ) - picks, start, stop = ["C3", "C4"], 200, 800 - assert_allclose( - raw.get_data(picks=picks, start=start, stop=stop, verbose="error"), - bdf_curry_ref.get_data(picks=picks, start=start, stop=stop), - rtol=tol, - ) - assert raw.info["dev_head_t"] is None + picks, start, stop = ["C3", "C4"], 200, 800 + assert_allclose( + raw.get_data(picks=picks, start=start, stop=stop, verbose="error"), + bdf_curry_ref.get_data(picks=picks, start=start, stop=stop), + rtol=tol, + ) + # assert raw.info["dev_head_t"] is None # TODO do we need this value? @testing.requires_testing_data @@ -99,193 +88,8 @@ def test_read_raw_curry(fname, tol, preload, bdf_curry_ref): ) def test_read_raw_curry_test_raw(fname): """Test read_raw_curry with _test_raw_reader.""" - _test_raw_reader(read_raw_curry, fname=fname) - - -# These values taken from a different recording but allow us to test -# using our existing filres - -HPI_CONTENT = """\ -FileVersion: 804 -NumCoils: 10 - -0 1 -50.67 50.98 133.15 0.006406 1 46.45 51.51 143.15 0.006789 1 39.38 -26.67 155.51 0.008034 1 -36.72 -39.95 142.83 0.007700 1 1.61 16.95 172.76 0.001788 0 0.00 0.00 0.00 0.000000 0 0.00 0.00 0.00 0.000000 0 0.00 0.00 0.00 0.000000 0 0.00 0.00 0.00 0.000000 0 0.00 0.00 0.00 0.000000 -""" # noqa: E501 - - -LM_CONTENT = """ - -LANDMARKS_MAG1 START - ListDescription = functional landmark positions - ListUnits = mm - ListNrColumns = 3 - ListNrRows = 8 - ListNrTimepts = 1 - ListNrBlocks = 1 - ListBinary = 0 - ListType = 1 - ListTrafoType = 1 - ListGridType = 2 - ListFirstColumn = 1 - ListIndexMin = -1 - ListIndexMax = -1 - ListIndexAbsMax = -1 -LANDMARKS_MAG1 END - -LANDMARKS_MAG1 START_LIST # Do not edit! - 75.4535 5.32907e-15 2.91434e-16 - 1.42109e-14 -75.3212 9.71445e-16 - -74.4568 -1.42109e-14 2.51188e-15 - -59.7558 35.5804 66.822 - 43.15 43.4107 78.0027 - 38.8415 -41.1884 81.9941 - -36.683 -59.5119 66.4338 - -1.07259 -1.88025 103.747 -LANDMARKS_MAG1 END_LIST - -LM_INDICES_MAG1 START - ListDescription = functional landmark PAN info - ListUnits = - ListNrColumns = 1 - ListNrRows = 3 - ListNrTimepts = 1 - ListNrBlocks = 1 - ListBinary = 0 - ListType = 0 - ListTrafoType = 0 - ListGridType = 2 - ListFirstColumn = 1 - ListIndexMin = -1 - ListIndexMax = -1 - ListIndexAbsMax = -1 -LM_INDICES_MAG1 END - -LM_INDICES_MAG1 START_LIST # Do not edit! - 2 - 1 - 3 -LM_INDICES_MAG1 END_LIST - -LM_REMARKS_MAG1 START - ListDescription = functional landmark labels - ListUnits = - ListNrColumns = 40 - ListNrRows = 8 - ListNrTimepts = 1 - ListNrBlocks = 1 - ListBinary = 0 - ListType = 5 - ListTrafoType = 0 - ListGridType = 2 - ListFirstColumn = 1 - ListIndexMin = -1 - ListIndexMax = -1 - ListIndexAbsMax = -1 -LM_REMARKS_MAG1 END - -LM_REMARKS_MAG1 START_LIST # Do not edit! -Left ear -Nasion -Right ear -HPI1 -HPI2 -HPI3 -HPI4 -HPI5 -LM_REMARKS_MAG1 END_LIST - -""" - -WANT_TRANS = np.array( - [ - [0.99729224, -0.07353067, -0.00119791, 0.00126953], - [0.07319243, 0.99085848, 0.11332405, 0.02670814], - [-0.00714583, -0.11310488, 0.99355736, 0.04721836], - [0.0, 0.0, 0.0, 1.0], - ] -) - - -@testing.requires_testing_data -@pytest.mark.parametrize( - "fname,tol", - [ - pytest.param(curry7_rfDC_file, 1e-6, id="curry 7"), - pytest.param(curry8_rfDC_file, 1e-3, id="curry 8"), - ], -) -@pytest.mark.parametrize("mock_dev_head_t", [True, False]) -def test_read_raw_curry_rfDC(fname, tol, mock_dev_head_t, tmp_path): - """Test reading CURRY files.""" - if mock_dev_head_t: - if "Curry 7" in fname.name: # not supported yet - return - # copy files to tmp_path - for ext in (".cdt", ".cdt.dpa"): - src = fname.with_suffix(ext) - dst = tmp_path / fname.with_suffix(ext).name - copyfile(src, dst) - if ext == ".cdt.dpa": - with open(dst, "a") as fid: - fid.write(LM_CONTENT) - fname = tmp_path / fname.name - with open(fname.with_suffix(fname.suffix + ".hpi"), "w") as fid: - fid.write(HPI_CONTENT) - - # check data - bti_rfDC = read_raw_bti(pdf_fname=bti_rfDC_file, head_shape_fname=None) - with catch_logging() as log: - raw = read_raw_curry(fname, verbose=True) - log = log.getvalue() - if mock_dev_head_t: - assert "Composing device" in log - else: - assert "Leaving device" in log - assert "no landmark" in log - - # test on the eeg chans, since these were not renamed by curry - eeg_names = [ - ch["ch_name"] for ch in raw.info["chs"] if ch["kind"] == FIFF.FIFFV_EEG_CH - ] - - assert_allclose(raw.get_data(eeg_names), bti_rfDC.get_data(eeg_names), rtol=tol) - assert bti_rfDC.info["dev_head_t"] is not None # XXX probably a BTI bug - if mock_dev_head_t: - assert raw.info["dev_head_t"] is not None - assert_allclose(raw.info["dev_head_t"]["trans"], WANT_TRANS, atol=1e-5) - else: - assert raw.info["dev_head_t"] is None - - # check that most MEG sensors are approximately oriented outward from - # the device origin - n_meg = n_eeg = n_other = 0 - pos = list() - nn = list() - for ch in raw.info["chs"]: - if ch["kind"] == FIFF.FIFFV_MEG_CH: - assert ch["coil_type"] == FIFF.FIFFV_COIL_CTF_GRAD - t = _loc_to_coil_trans(ch["loc"]) - pos.append(t[:3, 3]) - nn.append(t[:3, 2]) - assert_allclose(np.linalg.norm(nn[-1]), 1.0) - n_meg += 1 - elif ch["kind"] == FIFF.FIFFV_EEG_CH: - assert ch["coil_type"] == FIFF.FIFFV_COIL_EEG - n_eeg += 1 - else: - assert ch["coil_type"] == FIFF.FIFFV_COIL_NONE - n_other += 1 - assert n_meg == 148 - assert n_eeg == 31 - assert n_other == 15 - pos = np.array(pos) - nn = np.array(nn) - rad, origin = _fit_sphere(pos) - assert 0.11 < rad < 0.13 - pos -= origin - pos /= np.linalg.norm(pos, axis=1, keepdims=True) - angles = np.abs(np.rad2deg(np.arccos((pos * nn).sum(-1)))) - assert (angles < 20).sum() > 100 + with pytest.raises(RuntimeWarning): # TODO change way to add montage in curry.py! + _test_raw_reader(read_raw_curry, fname=fname) @testing.requires_testing_data @@ -294,316 +98,15 @@ def test_read_raw_curry_rfDC(fname, tol, mock_dev_head_t, tmp_path): [ pytest.param(curry7_bdf_file, id="curry 7"), pytest.param(curry8_bdf_file, id="curry 8"), + pytest.param(curry7_bdf_ascii_file, id="curry 7 ascii"), + pytest.param(curry8_bdf_ascii_file, id="curry 8 ascii"), ], ) -def test_read_events_curry_are_same_as_bdf(fname): - """Test events from curry annotations recovers the right events.""" - EVENT_ID = {str(ii): ii for ii in range(5)} - REF_EVENTS = find_events(read_raw_bdf(bdf_file, preload=True)) - - raw = read_raw_curry(fname) - events, _ = events_from_annotations(raw, event_id=EVENT_ID) - assert_allclose(events, REF_EVENTS) - assert raw.info["dev_head_t"] is None - - -@testing.requires_testing_data -def test_check_missing_files(): - """Test checking for missing curry files (smoke test).""" - invalid_fname = "/invalid/path/name.xy" - - with pytest.raises(OSError, match="file type .*? must end with"): - _read_events_curry(invalid_fname) - - with pytest.raises(FileNotFoundError, match="does not exist"): - _get_curry_file_structure(invalid_fname) - - with pytest.raises(FileNotFoundError, match="files cannot be found"): - _get_curry_file_structure(missing_event_file, required=["info", "events"]) - - -def _mock_info_file(src, dst, sfreq, time_step): - with open(src) as in_file, open(dst, "w") as out_file: - for line in in_file: - if "SampleFreqHz" in line: - out_file.write(line.replace("500", str(sfreq))) - elif "SampleTimeUsec" in line: - out_file.write(line.replace("2000", str(time_step))) - else: - out_file.write(line) - - -@pytest.fixture( - params=[ - pytest.param(dict(sfreq=500, time_step=0), id="correct sfreq"), - pytest.param(dict(sfreq=0, time_step=2000), id="correct time_step"), - pytest.param(dict(sfreq=500, time_step=2000), id="both correct"), - pytest.param( - dict(sfreq=0, time_step=0), - id="both 0", - marks=pytest.mark.xfail(raises=ValueError), - ), - pytest.param( - dict(sfreq=500, time_step=42), - id="mismatch", - marks=pytest.mark.xfail(raises=ValueError), - ), - ] -) -def sfreq_testing_data(tmp_path, request): - """Generate different sfreq, time_step scenarios to be tested.""" - sfreq, time_step = request.param["sfreq"], request.param["time_step"] - - # create dummy empty files for 'dat' and 'rs3' - for fname in ["curry.dat", "curry.rs3"]: - open(tmp_path / fname, "a").close() - - _mock_info_file( - src=curry7_bdf_file.with_suffix(".dap"), - dst=tmp_path / "curry.dap", - sfreq=sfreq, - time_step=time_step, - ) - _mock_info_file( - src=curry7_bdf_file.with_suffix(".rs3"), - dst=tmp_path / "curry.rs3", - sfreq=sfreq, - time_step=time_step, - ) - return tmp_path / "curry.dat" - - -@testing.requires_testing_data -def test_sfreq(sfreq_testing_data): - """Test sfreq and time_step.""" - raw = read_raw_curry(sfreq_testing_data, preload=False) - assert raw.info["sfreq"] == 500 - - -@testing.requires_testing_data -@pytest.mark.parametrize( - "fname", - [ - pytest.param(curry_dir / "test_bdf_stim_channel Curry 7.cef", id="7"), - pytest.param(curry_dir / "test_bdf_stim_channel Curry 8.cdt.cef", id="8"), - pytest.param( - curry_dir / "test_bdf_stim_channel Curry 7 ASCII.cef", id="7 ascii" - ), - pytest.param( - curry_dir / "test_bdf_stim_channel Curry 8 ASCII.cdt.cef", id="8 ascii" - ), - ], -) -def test_read_curry_annotations(fname): - """Test reading for Curry events file.""" - EXPECTED_ONSET = [ - 0.484, - 0.486, - 0.62, - 0.622, - 1.904, - 1.906, - 3.212, - 3.214, - 4.498, - 4.5, - 5.8, - 5.802, - 7.074, - 7.076, - 8.324, - 8.326, - 9.58, - 9.582, - ] - EXPECTED_DURATION = np.zeros_like(EXPECTED_ONSET) - EXPECTED_DESCRIPTION = [ - "4", - "50000", - "2", - "50000", - "1", - "50000", - "1", - "50000", - "1", - "50000", - "1", - "50000", - "1", - "50000", - "1", - "50000", - "1", - "50000", - ] - - annot = read_annotations(fname, sfreq="auto") - - assert annot.orig_time is None - assert_array_equal(annot.onset, EXPECTED_ONSET) - assert_array_equal(annot.duration, EXPECTED_DURATION) - assert_array_equal(annot.description, EXPECTED_DESCRIPTION) - - -def _get_read_annotations_mock_info(name_part, mock_dir): - original, modified = dict(), dict() - - original["event"] = curry_dir / ("test_bdf_stim_channel " + name_part) - original["base"], ext = str(original["event"]).split(".", maxsplit=1) - original["base"] = Path(original["base"]) - version = _get_curry_version(ext) - original["info"] = original["base"].with_suffix(FILE_EXTENSIONS[version]["info"]) - - modified["base"] = mock_dir / "curry" - modified["event"] = modified["base"].with_suffix( - FILE_EXTENSIONS[version]["events_cef"] +def test_read_impedances_curry(fname): + """Test reading impedances from CURRY files.""" + _, imp = read_impedances_curry(fname) + actual_imp = empty(shape=(0, 3)) + assert_allclose( + imp, + actual_imp, ) - modified["info"] = modified["base"].with_suffix(FILE_EXTENSIONS[version]["info"]) - - return original, modified - - -@testing.requires_testing_data -@pytest.mark.parametrize( - "name_part", - [ - pytest.param("7.cef", id="7"), - pytest.param("8.cdt.cef", id="8"), - pytest.param("7 ASCII.cef", id="7 (ascii)"), - pytest.param("8 ASCII.cdt.cef", id="8 (ascii)"), - ], -) -def test_read_curry_annotations_using_mocked_info(tmp_path, name_part): - """Test reading for Curry events file.""" - EXPECTED_ONSET = [ - 0.484, - 0.486, - 0.62, - 0.622, - 1.904, - 1.906, - 3.212, - 3.214, - 4.498, - 4.5, - 5.8, - 5.802, - 7.074, - 7.076, - 8.324, - 8.326, - 9.58, - 9.582, - ] - EXPECTED_DURATION = np.zeros_like(EXPECTED_ONSET) - EXPECTED_DESCRIPTION = [ - "4", - "50000", - "2", - "50000", - "1", - "50000", - "1", - "50000", - "1", - "50000", - "1", - "50000", - "1", - "50000", - "1", - "50000", - "1", - "50000", - ] - - original, fname = _get_read_annotations_mock_info("Curry " + name_part, tmp_path) - copyfile(src=original["event"], dst=fname["event"]) - - _msg = "required files cannot be found" - with pytest.raises(FileNotFoundError, match=_msg): - read_annotations(fname["event"], sfreq="auto") - - _mock_info_file(src=original["info"], dst=fname["info"], sfreq=0, time_step=2000) - - annot = read_annotations(fname["event"], sfreq="auto") - - assert annot.orig_time is None - assert_array_equal(annot.onset, EXPECTED_ONSET) - assert_array_equal(annot.duration, EXPECTED_DURATION) - assert_array_equal(annot.description, EXPECTED_DESCRIPTION) - - -@testing.requires_testing_data -@pytest.mark.parametrize( - "fname,expected_channel_list", - [ - pytest.param( - Ref_chan_omitted_file, - ["FP1", "FPZ", "FP2", "VEO", "EKG", "Trigger"], - id="Ref omitted, normal order", - ), - pytest.param( - Ref_chan_omitted_reordered_file, - ["FP2", "FPZ", "FP1", "VEO", "EKG", "Trigger"], - id="Ref omitted, reordered", - ), - ], -) -def test_read_files_missing_channel(fname, expected_channel_list): - """Test reading data files that has an omitted channel.""" - # This for Git issue #8391. In some cases, the 'labels' (.rs3 file will - # list channels that are not actually saved in the datafile (such as the - # 'Ref' channel). These channels are denoted in the 'info' (.dap) file - # in the CHAN_IN_FILE section with a '0' as their index. - # If the CHAN_IN_FILE section is present, the code also assures that the - # channels are sorted in the prescribed order. - # This test makes sure the data load correctly, and that we end up with - # the proper channel list. - raw = read_raw_curry(fname, preload=True) - assert raw.ch_names == expected_channel_list - - -@testing.requires_testing_data -@pytest.mark.parametrize( - "fname,expected_meas_date", - [ - pytest.param( - Ref_chan_omitted_file, - datetime(2018, 11, 21, 12, 53, 48, 525000, tzinfo=timezone.utc), - id="valid start date", - ), - pytest.param(curry7_rfDC_file, None, id="start date year is 0"), - pytest.param(curry7_bdf_file, None, id="start date seconds invalid"), - ], -) -def test_meas_date(fname, expected_meas_date): - """Test reading acquisition start datetime info info['meas_date'].""" - # This for Git issue #8398. The 'info' (.dap) file includes acquisition - # start date & time. Test that this goes into raw.info['meas_date']. - # If the information is not valid, raw.info['meas_date'] should be None - raw = read_raw_curry(fname, preload=False) - assert raw.info["meas_date"] == expected_meas_date - - -@testing.requires_testing_data -@pytest.mark.parametrize( - "fname, others", - [ - pytest.param(curry7_rfDC_file, (".dap", ".rs3"), id="curry7"), - pytest.param(curry8_rfDC_file, (".cdt.dpa",), id="curry8"), - ], -) -def test_dot_names(fname, others, tmp_path): - """Test that dots are parsed properly (e.g., in paths).""" - my_path = tmp_path / "dot.dot.dot" - my_path.mkdir() - my_path = my_path / Path(fname).parts[-1] - fname = Path(fname) - copyfile(fname, my_path) - for ext in others: - this_fname = fname.with_suffix(ext) - to_fname = my_path.with_suffix(ext) - copyfile(this_fname, to_fname) - read_raw_curry(my_path) From e3b91d8ec0186202d654f38944e0e738cd9a7cf3 Mon Sep 17 00:00:00 2001 From: dominikwelke Date: Tue, 13 May 2025 22:06:28 +0100 Subject: [PATCH 09/27] CI: style + curryreader version --- environment.yml | 2 +- mne/io/curry/__init__.py | 1 + mne/io/curry/tests/test_curry.py | 8 -------- 3 files changed, 2 insertions(+), 9 deletions(-) diff --git a/environment.yml b/environment.yml index 74e525dd1f7..a518180ec7c 100644 --- a/environment.yml +++ b/environment.yml @@ -5,7 +5,7 @@ channels: dependencies: - python >=3.10 - antio >=0.5.0 - - curryreader >=0.0.1 + - curryreader >=0.1.1 - darkdetect - decorator - defusedxml diff --git a/mne/io/curry/__init__.py b/mne/io/curry/__init__.py index fce6b7d9a32..5b2e89b6798 100644 --- a/mne/io/curry/__init__.py +++ b/mne/io/curry/__init__.py @@ -5,3 +5,4 @@ # Copyright the MNE-Python contributors. from .curry import read_raw_curry +from .curry import read_impedances_curry diff --git a/mne/io/curry/tests/test_curry.py b/mne/io/curry/tests/test_curry.py index 7a2beebcec2..ded101193ab 100644 --- a/mne/io/curry/tests/test_curry.py +++ b/mne/io/curry/tests/test_curry.py @@ -16,16 +16,10 @@ data_dir = testing.data_path(download=False) curry_dir = data_dir / "curry" bdf_file = data_dir / "BDF" / "test_bdf_stim_channel.bdf" -bti_rfDC_file = data_dir / "BTi" / "erm_HFH" / "c,rfDC" -curry7_rfDC_file = curry_dir / "c,rfDC Curry 7.dat" -curry8_rfDC_file = curry_dir / "c,rfDC Curry 8.cdt" curry7_bdf_file = curry_dir / "test_bdf_stim_channel Curry 7.dat" curry7_bdf_ascii_file = curry_dir / "test_bdf_stim_channel Curry 7 ASCII.dat" curry8_bdf_file = curry_dir / "test_bdf_stim_channel Curry 8.cdt" curry8_bdf_ascii_file = curry_dir / "test_bdf_stim_channel Curry 8 ASCII.cdt" -missing_event_file = curry_dir / "test_sfreq_0.dat" -Ref_chan_omitted_file = curry_dir / "Ref_channel_omitted Curry7.dat" -Ref_chan_omitted_reordered_file = curry_dir / "Ref_channel_omitted reordered Curry7.dat" @pytest.fixture(scope="session") @@ -41,8 +35,6 @@ def bdf_curry_ref(): [ pytest.param(curry7_bdf_file, 1e-7, id="curry 7"), pytest.param(curry8_bdf_file, 1e-7, id="curry 8"), - pytest.param(curry7_bdf_ascii_file, 1e-4, id="curry 7 ascii"), - pytest.param(curry8_bdf_ascii_file, 1e-4, id="curry 8 ascii"), pytest.param(curry7_bdf_ascii_file, 1e-7, id="curry 7 ascii"), pytest.param(curry8_bdf_ascii_file, 1e-7, id="curry 8 ascii"), ], From 7e8f1cc2da0bf60d38a205069e29c3dbbd6fbdff Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 13 May 2025 21:06:53 +0000 Subject: [PATCH 10/27] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- environment.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/environment.yml b/environment.yml index a518180ec7c..74e525dd1f7 100644 --- a/environment.yml +++ b/environment.yml @@ -5,7 +5,7 @@ channels: dependencies: - python >=3.10 - antio >=0.5.0 - - curryreader >=0.1.1 + - curryreader >=0.0.1 - darkdetect - decorator - defusedxml From 8ec4a17961ce7c0ec09e385cd6d3715874ada965 Mon Sep 17 00:00:00 2001 From: dominikwelke Date: Tue, 13 May 2025 22:11:41 +0100 Subject: [PATCH 11/27] bump curryreader version, second shot --- environment.yml | 2 +- pyproject.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/environment.yml b/environment.yml index 74e525dd1f7..a518180ec7c 100644 --- a/environment.yml +++ b/environment.yml @@ -5,7 +5,7 @@ channels: dependencies: - python >=3.10 - antio >=0.5.0 - - curryreader >=0.0.1 + - curryreader >=0.1.1 - darkdetect - decorator - defusedxml diff --git a/pyproject.toml b/pyproject.toml index 25c2ea213b9..443737a766e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -88,7 +88,7 @@ full = ["mne[full-no-qt]", "PyQt6 != 6.6.0", "PyQt6-Qt6 != 6.6.0, != 6.7.0"] # and mne[full-pyside6], which will install PySide6 instead of PyQt6. full-no-qt = [ "antio >= 0.5.0", - "curryreader >= 0.0.1", + "curryreader >= 0.1.1", "darkdetect", "defusedxml", "dipy", From 7eea5aa3660e22283b604e0817906d8b551edea4 Mon Sep 17 00:00:00 2001 From: dominikwelke Date: Wed, 14 May 2025 12:27:52 +0100 Subject: [PATCH 12/27] test fixes --- mne/io/curry/curry.py | 43 ++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 42 insertions(+), 1 deletion(-) diff --git a/mne/io/curry/curry.py b/mne/io/curry/curry.py index 0f8f8a4a23b..3514adf6e10 100644 --- a/mne/io/curry/curry.py +++ b/mne/io/curry/curry.py @@ -233,6 +233,47 @@ def _extract_curry_info(fname, preload): ) +def _read_annotations_curry(fname, sfreq="auto"): + r"""Read events from Curry event files. + + Parameters + ---------- + fname : str + The filename. + sfreq : float | 'auto' + The sampling frequency in the file. If set to 'auto' then the + ``sfreq`` is taken from the fileheader. + + Returns + ------- + annot : instance of Annotations | None + The annotations. + """ + fname = _check_curry_filename(fname) + + (sfreq_fromfile, _, _, _, _, _, _, _, events, _, _, _, _, _, _) = ( + _extract_curry_info(fname, preload=False) + ) + if sfreq == "auto": + sfreq = sfreq_fromfile + elif np.isreal(sfreq): + if float(sfreq) != float(sfreq_fromfile): + warn( + f"provided sfreq ({sfreq} Hz) does not match freq from fileheader " + "({sfreq_fromfile} Hz)!" + ) + else: + raise ValueError("'sfreq' must be numeric or 'auto'") + + if isinstance(events, np.ndarray): # if there are events + events = events.astype("int") + events = np.insert(events, 1, np.diff(events[:, 2:]).flatten(), axis=1)[:, :3] + return annotations_from_events(events, sfreq) + else: + warn("no event annotations found") + return None + + def _make_curry_montage(ch_names, ch_types, ch_pos, landmarks, landmarkslabels): # scale ch_pos to m?! ch_pos /= 1000.0 @@ -307,7 +348,7 @@ def read_raw_curry( ``.cdt``, ``.cdt.dpa``, ``.cdt.cef`` or ``.cef``. import_epochs_as_events : bool Set to ``True`` if you want to import epoched recordings as continuous ``raw`` - object with event annotations. Only do this if you know your data allows this! + object with event annotations. Only do this if you know your data allows it. %(preload)s %(verbose)s From 5afa6045e45ca03fe473de19fa3076a3f47c316b Mon Sep 17 00:00:00 2001 From: dominikwelke Date: Wed, 14 May 2025 12:54:58 +0100 Subject: [PATCH 13/27] add curryreader to test_extra dependencies --- pyproject.toml | 1 + 1 file changed, 1 insertion(+) diff --git a/pyproject.toml b/pyproject.toml index 443737a766e..ac74d949e1b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -158,6 +158,7 @@ test = [ # Dependencies for being able to run additional tests (rare/CIs/advanced devs) # Changes here should be reflected in the mne/utils/config.py dev dependencies section test_extra = [ + "curryreader >= 0.1.1", "edfio >= 0.2.1", "eeglabio", "imageio >= 2.6.1", From 07d42565f7ebf45ecc0106c1ab2d41629b0ecf82 Mon Sep 17 00:00:00 2001 From: dominikwelke Date: Wed, 14 May 2025 14:22:35 +0100 Subject: [PATCH 14/27] try fixing tests --- mne/io/curry/curry.py | 36 ++++++-------------------------- mne/io/curry/tests/test_curry.py | 4 ++-- mne/utils/config.py | 1 + 3 files changed, 9 insertions(+), 32 deletions(-) diff --git a/mne/io/curry/curry.py b/mne/io/curry/curry.py index 3514adf6e10..f1e8288532f 100644 --- a/mne/io/curry/curry.py +++ b/mne/io/curry/curry.py @@ -98,7 +98,7 @@ def _get_curry_epoch_info(fname): ) -def _extract_curry_info(fname, preload): +def _extract_curry_info(fname): # use curry-python-reader currydata = curryreader.read(str(fname), plotdata=0, verbosity=1) @@ -123,10 +123,6 @@ def _extract_curry_info(fname, preload): # data orig_format = "single" # curryreader.py always reads float32. is this correct? - if isinstance(preload, bool | np.bool_) and preload: - preload = currydata["data"].T.astype( - "float64" - ) # curryreader returns float32, but mne seems to need float64 # events events = currydata["events"] @@ -205,14 +201,6 @@ def _extract_curry_info(fname, preload): cals = [ 1.0 / 1e15 if (u == "fT") else 1.0 / 1e6 if (u == "uV") else 1.0 for u in units ] - # if isinstance(preload, np.ndarray): - # for i_ch, unit in enumerate(units): - # if unit == "fT": # femtoTesla - # preload[i_ch, :] /= 1e15 - # elif unit == "uV": # microVolt - # preload[i_ch, :] /= 1e6 - # else: # leave as is - # pass return ( sfreq, @@ -228,7 +216,6 @@ def _extract_curry_info(fname, preload): orig_units, is_ascii, cals, - preload, meas_date, ) @@ -252,7 +239,7 @@ def _read_annotations_curry(fname, sfreq="auto"): fname = _check_curry_filename(fname) (sfreq_fromfile, _, _, _, _, _, _, _, events, _, _, _, _, _, _) = ( - _extract_curry_info(fname, preload=False) + _extract_curry_info(fname) ) if sfreq == "auto": sfreq = sfreq_fromfile @@ -419,9 +406,8 @@ def __init__(self, fname, preload=False, verbose=None): orig_units, is_ascii, cals, - preload, meas_date, - ) = _extract_curry_info(fname, preload) + ) = _extract_curry_info(fname) # construct info info = create_info(ch_names=ch_names, sfreq=sfreq, ch_types=ch_types) @@ -431,7 +417,7 @@ def __init__(self, fname, preload=False, verbose=None): raw_extras = dict(is_ascii=is_ascii) super().__init__( info, - preload, + preload=False, filenames=[fname], last_samps=last_samps, orig_format=orig_format, @@ -445,8 +431,8 @@ def __init__(self, fname, preload=False, verbose=None): # scale data to SI units self._cals = np.array(cals) - if isinstance(preload, np.ndarray): - self._rescale_curry_data() + if isinstance(preload, bool | np.bool_) and preload: + self.load_data() # set events / annotations # format from curryreader: sample, etype, startsample, endsample @@ -471,16 +457,6 @@ def __init__(self, fname, preload=False, verbose=None): if not isinstance(hpimatrix, list): warn("HPI data found, but reader not implemented.") - def _rescale_curry_data(self): - orig_units = self._orig_units - for i_ch, unit in enumerate(orig_units.values()): - if unit == "fT": # femtoTesla - self._data[i_ch, :] /= 1e15 - elif unit == "µV": # microVolt - self._data[i_ch, :] /= 1e6 - else: # leave as is - pass - def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): """Read a chunk of raw data.""" if self._raw_extras[fi]["is_ascii"]: diff --git a/mne/io/curry/tests/test_curry.py b/mne/io/curry/tests/test_curry.py index ded101193ab..b91f1edbb4b 100644 --- a/mne/io/curry/tests/test_curry.py +++ b/mne/io/curry/tests/test_curry.py @@ -35,8 +35,8 @@ def bdf_curry_ref(): [ pytest.param(curry7_bdf_file, 1e-7, id="curry 7"), pytest.param(curry8_bdf_file, 1e-7, id="curry 8"), - pytest.param(curry7_bdf_ascii_file, 1e-7, id="curry 7 ascii"), - pytest.param(curry8_bdf_ascii_file, 1e-7, id="curry 8 ascii"), + pytest.param(curry7_bdf_ascii_file, 1e-4, id="curry 7 ascii"), + pytest.param(curry8_bdf_ascii_file, 1e-4, id="curry 8 ascii"), ], ) @pytest.mark.parametrize("preload", [True, False]) diff --git a/mne/utils/config.py b/mne/utils/config.py index c28373fcb93..29f09da7b23 100644 --- a/mne/utils/config.py +++ b/mne/utils/config.py @@ -762,6 +762,7 @@ def sys_info( "neo", "eeglabio", "edfio", + "curryreader", "mffpy", "pybv", "", From e4ae65005923df767969d40a776a56d8485d31de Mon Sep 17 00:00:00 2001 From: dominikwelke Date: Fri, 16 May 2025 00:04:00 +0100 Subject: [PATCH 15/27] again, try fixing tests --- mne/io/curry/tests/test_curry.py | 2 ++ tools/github_actions_dependencies.sh | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/mne/io/curry/tests/test_curry.py b/mne/io/curry/tests/test_curry.py index b91f1edbb4b..5eee6e37e17 100644 --- a/mne/io/curry/tests/test_curry.py +++ b/mne/io/curry/tests/test_curry.py @@ -13,6 +13,8 @@ from mne.io.edf import read_raw_bdf from mne.io.tests.test_raw import _test_raw_reader +pytest.importorskip("curryreader") + data_dir = testing.data_path(download=False) curry_dir = data_dir / "curry" bdf_file = data_dir / "BDF" / "test_bdf_stim_channel.bdf" diff --git a/tools/github_actions_dependencies.sh b/tools/github_actions_dependencies.sh index d47d9070f8b..a18fe8f4b1f 100755 --- a/tools/github_actions_dependencies.sh +++ b/tools/github_actions_dependencies.sh @@ -23,7 +23,7 @@ if [ ! -z "$CONDA_ENV" ]; then elif [[ "${MNE_CI_KIND}" == "pip" ]]; then # Only used for 3.13 at the moment, just get test deps plus a few extras # that we know are available - INSTALL_ARGS="nibabel scikit-learn numpydoc PySide6 mne-qt-browser pandas h5io mffpy defusedxml numba" + INSTALL_ARGS="nibabel scikit-learn numpydoc PySide6 mne-qt-browser pandas h5io mffpy defusedxml numba curryreader" INSTALL_KIND="test" else test "${MNE_CI_KIND}" == "pip-pre" From b005c5764ba8b4bf15af2d5050d2180a7e85f5a5 Mon Sep 17 00:00:00 2001 From: dominikwelke Date: Sat, 17 May 2025 00:17:53 +0100 Subject: [PATCH 16/27] _soft_import + nesting --- mne/io/curry/curry.py | 19 +++++++++++++++++-- 1 file changed, 17 insertions(+), 2 deletions(-) diff --git a/mne/io/curry/curry.py b/mne/io/curry/curry.py index f1e8288532f..3634806e3c0 100644 --- a/mne/io/curry/curry.py +++ b/mne/io/curry/curry.py @@ -7,7 +7,6 @@ from datetime import datetime, timezone from pathlib import Path -import curryreader import numpy as np import pandas as pd @@ -17,7 +16,7 @@ from ...annotations import annotations_from_events from ...channels import make_dig_montage from ...epochs import Epochs -from ...utils import verbose, warn +from ...utils import _soft_import, verbose, warn from ..base import BaseRaw CURRY_SUFFIX_DATA = [".cdt", ".dat"] @@ -57,6 +56,10 @@ def _check_curry_header_filename(fname): def _get_curry_recording_type(fname): + _soft_import("curryreader", "read recording modality") + + import curryreader + epochinfo = curryreader.read(str(fname), plotdata=0, verbosity=1)["epochinfo"] if epochinfo.size == 0: return "raw" @@ -69,6 +72,10 @@ def _get_curry_recording_type(fname): def _get_curry_epoch_info(fname): + _soft_import("curryreader", "read epoch info") + + import curryreader + # use curry-python-reader currydata = curryreader.read(str(fname), plotdata=0, verbosity=1) @@ -99,6 +106,10 @@ def _get_curry_epoch_info(fname): def _extract_curry_info(fname): + _soft_import("curryreader", "read file header") + + import curryreader + # use curry-python-reader currydata = curryreader.read(str(fname), plotdata=0, verbosity=1) @@ -492,6 +503,10 @@ def read_impedances_curry(fname, verbose=None): An array containing up to 10 impedance measurements for all recorded channels. """ + _soft_import("curryreader", "read impedances") + + import curryreader + # use curry-python-reader to load data fname = _check_curry_filename(fname) currydata = curryreader.read(str(fname), plotdata=0, verbosity=1) From c75a8e4c8811f7d0485ad5d4e3ae5ab2cf04f7bb Mon Sep 17 00:00:00 2001 From: dominikwelke Date: Wed, 21 May 2025 22:43:05 +0100 Subject: [PATCH 17/27] add back and adapt some more tests --- mne/io/curry/curry.py | 7 +- mne/io/curry/tests/test_curry.py | 127 +++++++++++++++++++++++++++++++ 2 files changed, 131 insertions(+), 3 deletions(-) diff --git a/mne/io/curry/curry.py b/mne/io/curry/curry.py index 3634806e3c0..8e871e628d8 100644 --- a/mne/io/curry/curry.py +++ b/mne/io/curry/curry.py @@ -41,14 +41,15 @@ def _check_curry_filename(fname): def _check_curry_header_filename(fname): + fname_in = Path(fname) fname_hdr = None # try suffixes for hdr_suff in CURRY_SUFFIX_HDR: - if fname.with_suffix(hdr_suff).exists(): - fname_hdr = fname.with_suffix(hdr_suff) + if fname_in.with_suffix(hdr_suff).exists(): + fname_hdr = fname_in.with_suffix(hdr_suff) break # final check - if not fname_hdr or not fname.exists(): + if not fname_hdr or not fname_in.exists(): raise FileNotFoundError( f"no corresponding header file found {CURRY_SUFFIX_HDR}" ) diff --git a/mne/io/curry/tests/test_curry.py b/mne/io/curry/tests/test_curry.py index 5eee6e37e17..d330d5fd8d3 100644 --- a/mne/io/curry/tests/test_curry.py +++ b/mne/io/curry/tests/test_curry.py @@ -4,12 +4,16 @@ # Copyright the MNE-Python contributors. +import numpy as np import pytest from numpy import empty from numpy.testing import assert_allclose, assert_array_equal +from mne.annotations import events_from_annotations, read_annotations from mne.datasets import testing +from mne.event import find_events from mne.io.curry import read_impedances_curry, read_raw_curry +from mne.io.curry.curry import _check_curry_filename, _check_curry_header_filename from mne.io.edf import read_raw_bdf from mne.io.tests.test_raw import _test_raw_reader @@ -22,6 +26,8 @@ curry7_bdf_ascii_file = curry_dir / "test_bdf_stim_channel Curry 7 ASCII.dat" curry8_bdf_file = curry_dir / "test_bdf_stim_channel Curry 8.cdt" curry8_bdf_ascii_file = curry_dir / "test_bdf_stim_channel Curry 8 ASCII.cdt" +Ref_chan_omitted_file = curry_dir / "Ref_channel_omitted Curry7.dat" +Ref_chan_omitted_reordered_file = curry_dir / "Ref_channel_omitted reordered Curry7.dat" @pytest.fixture(scope="session") @@ -86,6 +92,127 @@ def test_read_raw_curry_test_raw(fname): _test_raw_reader(read_raw_curry, fname=fname) +@testing.requires_testing_data +@pytest.mark.parametrize( + "fname", + [ + pytest.param(curry7_bdf_file, id="curry 7"), + pytest.param(curry8_bdf_file, id="curry 8"), + ], +) +def test_read_raw_curry_preload_equal(fname): + """Test raw identity with preload=True/False.""" + with pytest.raises(RuntimeWarning): # TODO change way to add montage in curry.py! + raw1 = read_raw_curry(fname, preload=False) + raw1.load_data() + assert raw1 == read_raw_curry(fname, preload=True) + + +@testing.requires_testing_data +@pytest.mark.parametrize( + "fname", + [ + pytest.param(curry7_bdf_file, id="curry 7"), + pytest.param(curry8_bdf_file, id="curry 8"), + ], +) +def test_read_events_curry_are_same_as_bdf(fname): + """Test events from curry annotations recovers the right events.""" + EVENT_ID = {str(ii): ii for ii in range(5)} + REF_EVENTS = find_events(read_raw_bdf(bdf_file, preload=True)) + + with pytest.raises(RuntimeWarning): # TODO change way to add montage in curry.py! + raw = read_raw_curry(fname) + events, _ = events_from_annotations(raw, event_id=EVENT_ID) + assert_allclose(events, REF_EVENTS) + assert raw.info["dev_head_t"] is None + + +@testing.requires_testing_data +def test_check_missing_files(): + """Test checking for missing curry files (smoke test).""" + invalid_fname = "/invalid/path/name.xy" + + with pytest.raises(FileNotFoundError, match="no curry data file"): + _check_curry_filename(invalid_fname) + + with pytest.raises(FileNotFoundError, match="no corresponding header"): + _check_curry_header_filename(invalid_fname) + + with pytest.raises(FileNotFoundError, match="no curry data file"): + read_raw_curry(invalid_fname) + + with pytest.raises(FileNotFoundError, match="no curry data file"): + read_impedances_curry(invalid_fname) + + +@testing.requires_testing_data +@pytest.mark.parametrize( + "fname", + [ + pytest.param(curry_dir / "test_bdf_stim_channel Curry 7.cef", id="7"), + pytest.param( + curry_dir / "test_bdf_stim_channel Curry 7 ASCII.cef", id="7 ascii" + ), + ], +) +def test_read_curry_annotations(fname): + """Test reading for Curry events file.""" + EXPECTED_ONSET = [ + 0.484, + 0.486, + 0.62, + 0.622, + 1.904, + 1.906, + 3.212, + 3.214, + 4.498, + 4.5, + 5.8, + 5.802, + 7.074, + 7.076, + 8.324, + 8.326, + 9.58, + 9.582, + ] + EXPECTED_DURATION = np.zeros_like(EXPECTED_ONSET) + EXPECTED_DESCRIPTION = [ + "4", + "50000", + "2", + "50000", + "1", + "50000", + "1", + "50000", + "1", + "50000", + "1", + "50000", + "1", + "50000", + "1", + "50000", + "1", + "50000", + ] + + annot = read_annotations(fname, sfreq="auto") + + assert annot.orig_time is None + assert_array_equal(annot.onset, EXPECTED_ONSET) + assert_array_equal(annot.duration, EXPECTED_DURATION) + assert_array_equal(annot.description, EXPECTED_DESCRIPTION) + + with pytest.raises(ValueError, match="must be numeric or 'auto'"): + _ = read_annotations(fname, sfreq="nonsense") + with pytest.warns(RuntimeWarning, match="does not match freq from fileheader"): + _ = read_annotations(fname, sfreq=12.0) + + @testing.requires_testing_data @pytest.mark.parametrize( "fname", From 99e968d39b60e015bef1208c0173e98765bc93a5 Mon Sep 17 00:00:00 2001 From: dominikwelke Date: Wed, 21 May 2025 22:48:40 +0100 Subject: [PATCH 18/27] add curryreader to circleci dependencies --- tools/circleci_dependencies.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/circleci_dependencies.sh b/tools/circleci_dependencies.sh index fe8116530ef..851263ccab8 100755 --- a/tools/circleci_dependencies.sh +++ b/tools/circleci_dependencies.sh @@ -9,7 +9,7 @@ python -m pip install --upgrade --progress-bar off \ "git+https://github.com/mne-tools/mne-bids.git" \ "git+https://github.com/mne-tools/mne-qt-browser.git" \ \ - alphaCSC autoreject bycycle conpy emd fooof meggie \ + alphaCSC autoreject bycycle conpy curryreader emd fooof meggie \ mne-ari mne-bids-pipeline mne-faster mne-features \ mne-icalabel mne-lsl mne-microstates mne-nirs mne-rsa \ neurodsp neurokit2 niseq nitime pactools mnelab \ From 036422585838f3a1d1caffc1a3fd9e43ed119742 Mon Sep 17 00:00:00 2001 From: dominikwelke Date: Wed, 21 May 2025 22:53:05 +0100 Subject: [PATCH 19/27] style --- mne/io/curry/tests/test_curry.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/mne/io/curry/tests/test_curry.py b/mne/io/curry/tests/test_curry.py index d330d5fd8d3..b97e31b2492 100644 --- a/mne/io/curry/tests/test_curry.py +++ b/mne/io/curry/tests/test_curry.py @@ -26,8 +26,6 @@ curry7_bdf_ascii_file = curry_dir / "test_bdf_stim_channel Curry 7 ASCII.dat" curry8_bdf_file = curry_dir / "test_bdf_stim_channel Curry 8.cdt" curry8_bdf_ascii_file = curry_dir / "test_bdf_stim_channel Curry 8 ASCII.cdt" -Ref_chan_omitted_file = curry_dir / "Ref_channel_omitted Curry7.dat" -Ref_chan_omitted_reordered_file = curry_dir / "Ref_channel_omitted reordered Curry7.dat" @pytest.fixture(scope="session") From e0c7eeeb201256b2f8ebd8065ab1db16b67ed8c6 Mon Sep 17 00:00:00 2001 From: dominikwelke Date: Wed, 2 Jul 2025 16:33:16 +0100 Subject: [PATCH 20/27] montage wip --- mne/io/curry/curry.py | 36 +++++++++++++++++++++++++++++------- 1 file changed, 29 insertions(+), 7 deletions(-) diff --git a/mne/io/curry/curry.py b/mne/io/curry/curry.py index 8e871e628d8..f7818ec45c9 100644 --- a/mne/io/curry/curry.py +++ b/mne/io/curry/curry.py @@ -250,8 +250,8 @@ def _read_annotations_curry(fname, sfreq="auto"): """ fname = _check_curry_filename(fname) - (sfreq_fromfile, _, _, _, _, _, _, _, events, _, _, _, _, _, _) = ( - _extract_curry_info(fname) + (sfreq_fromfile, _, _, _, _, _, _, _, events, _, _, _, _, _) = _extract_curry_info( + fname ) if sfreq == "auto": sfreq = sfreq_fromfile @@ -457,17 +457,39 @@ def __init__(self, fname, preload=False, verbose=None): self.set_annotations(annot) # make montage - mont = _make_curry_montage( - ch_names, ch_types, ch_pos, landmarks, landmarkslabels - ) - self.set_montage(mont, on_missing="ignore") + self._set_curry_montage(ch_types, ch_pos, landmarks, landmarkslabels) + # with self.info._unlock(): # self.info['dig'] = mont.dig # add HPI data (if present) + # from curryreader docstring: + # "HPI-coil measurements matrix (Orion-MEG only) where every row is: + # [measurementsample, dipolefitflag, x, y, z, deviation]" + # that's incorrect, though. it seems to be: + # [sample, dipole_1, x_1,y_1, z_1, dev_1, ..., dipole_n, x_n, ...] + # for all n coils. # TODO if not isinstance(hpimatrix, list): - warn("HPI data found, but reader not implemented.") + warn("cHPI data found, but reader not implemented.") + hpisamples = hpimatrix[:, 0] + n_coil = int((hpimatrix.shape[1] - 1) / 5) + hpimatrix = hpimatrix[:, 1:].reshape(hpimatrix.shape[0], n_coil, 5) + print(f"found {len(hpisamples)} cHPI samples for {n_coil} coils") + + def _set_curry_montage(self, ch_types, ch_pos, landmarks, landmarkslabels): + assert len(self.info["ch_names"]) == len(ch_types) >= len(ch_pos) + + mont = _make_curry_montage( + self.info["ch_names"], ch_types, ch_pos, landmarks, landmarkslabels + ) + + # hack the montage in (for MEG chans) + # TODO change this! + ch_types_tmp = [ct if ct != "mag" else "eeg" for ct in ch_types] + self.set_channel_types(dict(zip(self.info["ch_names"], ch_types_tmp))) + self.set_montage(mont, on_missing="ignore") + self.set_channel_types(dict(zip(self.info["ch_names"], ch_types))) def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): """Read a chunk of raw data.""" From 1e0166722eb6c90886f21751879e17ce8cd62ed8 Mon Sep 17 00:00:00 2001 From: dominikwelke Date: Wed, 2 Jul 2025 18:56:58 +0100 Subject: [PATCH 21/27] reintroduce test: test_read_files_missing_channel --- mne/io/curry/curry.py | 54 ++++++++++++++++++++++++++++++++ mne/io/curry/tests/test_curry.py | 46 +++++++++++++++++++++++++-- 2 files changed, 97 insertions(+), 3 deletions(-) diff --git a/mne/io/curry/curry.py b/mne/io/curry/curry.py index f7818ec45c9..c19da804339 100644 --- a/mne/io/curry/curry.py +++ b/mne/io/curry/curry.py @@ -21,6 +21,7 @@ CURRY_SUFFIX_DATA = [".cdt", ".dat"] CURRY_SUFFIX_HDR = [".cdt.dpa", ".cdt.dpo", ".dap"] +CURRY_SUFFIX_LABELS = [".cdt.dpa", ".cdt.dpo", ".rs3"] def _check_curry_filename(fname): @@ -56,6 +57,22 @@ def _check_curry_header_filename(fname): return fname_hdr +def _check_curry_labels_filename(fname): + fname_in = Path(fname) + fname_labels = None + # try suffixes + for hdr_suff in CURRY_SUFFIX_LABELS: + if fname_in.with_suffix(hdr_suff).exists(): + fname_labels = fname_in.with_suffix(hdr_suff) + break + # final check + if not fname_labels or not fname_in.exists(): + raise FileNotFoundError( + f"no corresponding labels file found {CURRY_SUFFIX_HDR}" + ) + return fname_labels + + def _get_curry_recording_type(fname): _soft_import("curryreader", "read recording modality") @@ -203,6 +220,43 @@ def _extract_curry_info(fname): # combine info ch_types += [ch_type] * n_ch_group units += [unit] * n_ch_group + + # This for Git issue #8391. In some cases, the 'labels' (.rs3 file will + # list channels that are not actually saved in the datafile (such as the + # 'Ref' channel). These channels are denoted in the 'info' (.dap) file + # in the CHAN_IN_FILE section with a '0' as their index. + # + # current curryreader cannot cope with this - loads the list of channels solely + # based on their order, so can be false. fix it here! + if not len(ch_types) == len(units) == len(ch_names) == n_ch: + # read relevant info + fname_lbl = _check_curry_labels_filename(fname) + lbl = fname_lbl.read_text().split("START_LIST") + ch_names_full = [] + for i in range(1, len(lbl)): + if "LABELS" in lbl[i - 1].split()[-1]: + for ll in lbl[i].split("\n")[1:]: + if "LABELS" not in ll: + ch_names_full.append(ll.strip()) + else: + break + hdr = fname_hdr.read_text().split("START_LIST") + chaninfile_full = [] + for i in range(1, len(hdr)): + if "CHAN_IN_FILE" in hdr[i - 1].split()[-1]: + for ll in hdr[i].split("\n")[1:]: + if "CHAN_IN_FILE" not in ll: + chaninfile_full.append(int(ll.strip())) + else: + break + # drop channels with chan_in_file==0, account for order + i_drop = [i for i, ich in enumerate(chaninfile_full) if ich == 0] + ch_names = [ + ch_names_full[i] for i in np.argsort(chaninfile_full) if i not in i_drop + ] + ch_types = [ch_types[i] for i in np.argsort(chaninfile_full) if i not in i_drop] + units = [units[i] for i in np.argsort(chaninfile_full) if i not in i_drop] + assert len(ch_types) == len(units) == len(ch_names) == n_ch # finetune channel types (e.g. stim, eog etc might be identified by name) diff --git a/mne/io/curry/tests/test_curry.py b/mne/io/curry/tests/test_curry.py index b97e31b2492..2a6434577b1 100644 --- a/mne/io/curry/tests/test_curry.py +++ b/mne/io/curry/tests/test_curry.py @@ -26,6 +26,8 @@ curry7_bdf_ascii_file = curry_dir / "test_bdf_stim_channel Curry 7 ASCII.dat" curry8_bdf_file = curry_dir / "test_bdf_stim_channel Curry 8.cdt" curry8_bdf_ascii_file = curry_dir / "test_bdf_stim_channel Curry 8 ASCII.cdt" +Ref_chan_omitted_file = curry_dir / "Ref_channel_omitted Curry7.dat" +Ref_chan_omitted_reordered_file = curry_dir / "Ref_channel_omitted reordered Curry7.dat" @pytest.fixture(scope="session") @@ -48,7 +50,9 @@ def bdf_curry_ref(): @pytest.mark.parametrize("preload", [True, False]) def test_read_raw_curry(fname, tol, preload, bdf_curry_ref): """Test reading CURRY files.""" - with pytest.raises(RuntimeWarning): # TODO change way to add montage in curry.py! + with pytest.warns( + RuntimeWarning, match="Fiducial point nasion not found" + ): # TODO change way to add montage in curry.py! raw = read_raw_curry(fname, preload=preload) assert hasattr(raw, "_data") == preload @@ -84,6 +88,7 @@ def test_read_raw_curry(fname, tol, preload, bdf_curry_ref): pytest.param(curry8_bdf_ascii_file, id="curry 8 ascii"), ], ) +# TODO! def test_read_raw_curry_test_raw(fname): """Test read_raw_curry with _test_raw_reader.""" with pytest.raises(RuntimeWarning): # TODO change way to add montage in curry.py! @@ -100,7 +105,9 @@ def test_read_raw_curry_test_raw(fname): ) def test_read_raw_curry_preload_equal(fname): """Test raw identity with preload=True/False.""" - with pytest.raises(RuntimeWarning): # TODO change way to add montage in curry.py! + with pytest.warns( + RuntimeWarning, match="Fiducial point nasion not found" + ): # TODO change way to add montage in curry.py! raw1 = read_raw_curry(fname, preload=False) raw1.load_data() assert raw1 == read_raw_curry(fname, preload=True) @@ -118,7 +125,7 @@ def test_read_events_curry_are_same_as_bdf(fname): """Test events from curry annotations recovers the right events.""" EVENT_ID = {str(ii): ii for ii in range(5)} REF_EVENTS = find_events(read_raw_bdf(bdf_file, preload=True)) - + # TODO! with pytest.raises(RuntimeWarning): # TODO change way to add montage in curry.py! raw = read_raw_curry(fname) events, _ = events_from_annotations(raw, event_id=EVENT_ID) @@ -211,6 +218,39 @@ def test_read_curry_annotations(fname): _ = read_annotations(fname, sfreq=12.0) +@testing.requires_testing_data +@pytest.mark.parametrize( + "fname,expected_channel_list", + [ + pytest.param( + Ref_chan_omitted_file, + ["FP1", "FPZ", "FP2", "VEO", "EKG", "Trigger"], + id="Ref omitted, normal order", + ), + pytest.param( + Ref_chan_omitted_reordered_file, + ["FP2", "FPZ", "FP1", "VEO", "EKG", "Trigger"], + id="Ref omitted, reordered", + ), + ], +) +def test_read_files_missing_channel(fname, expected_channel_list): + """Test reading data files that has an omitted channel.""" + # This for Git issue #8391. In some cases, the 'labels' (.rs3 file will + # list channels that are not actually saved in the datafile (such as the + # 'Ref' channel). These channels are denoted in the 'info' (.dap) file + # in the CHAN_IN_FILE section with a '0' as their index. + # If the CHAN_IN_FILE section is present, the code also assures that the + # channels are sorted in the prescribed order. + # This test makes sure the data load correctly, and that we end up with + # the proper channel list. + with pytest.warns( + RuntimeWarning, match="Fiducial point nasion not found" + ): # TODO change way to add montage in curry.py! + raw = read_raw_curry(fname, preload=True) + assert raw.ch_names == expected_channel_list + + @testing.requires_testing_data @pytest.mark.parametrize( "fname", From 3ffd6aae5389b494cb63f9b41a50816be213f199 Mon Sep 17 00:00:00 2001 From: dominikwelke Date: Wed, 2 Jul 2025 19:06:39 +0100 Subject: [PATCH 22/27] soft_import pandas --- mne/io/curry/curry.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/mne/io/curry/curry.py b/mne/io/curry/curry.py index c19da804339..bc1a7cbcbbb 100644 --- a/mne/io/curry/curry.py +++ b/mne/io/curry/curry.py @@ -8,7 +8,6 @@ from pathlib import Path import numpy as np -import pandas as pd # from ..._fiff._digitization import _make_dig_points from ..._fiff.meas_info import create_info @@ -91,8 +90,10 @@ def _get_curry_recording_type(fname): def _get_curry_epoch_info(fname): _soft_import("curryreader", "read epoch info") + _soft_import("pandas", "dataframe integration") import curryreader + import pandas as pd # use curry-python-reader currydata = curryreader.read(str(fname), plotdata=0, verbosity=1) From 3cca0d3776aeff95825ef309ff1c49d7f14ba0a1 Mon Sep 17 00:00:00 2001 From: dominikwelke Date: Tue, 15 Jul 2025 18:28:30 +0100 Subject: [PATCH 23/27] set sensor locations --- mne/io/curry/__init__.py | 1 + mne/io/curry/curry.py | 222 +++++++++++++++++++++++++------ mne/io/curry/tests/test_curry.py | 93 +++++++------ 3 files changed, 228 insertions(+), 88 deletions(-) diff --git a/mne/io/curry/__init__.py b/mne/io/curry/__init__.py index 5b2e89b6798..d71f0465db4 100644 --- a/mne/io/curry/__init__.py +++ b/mne/io/curry/__init__.py @@ -6,3 +6,4 @@ from .curry import read_raw_curry from .curry import read_impedances_curry +from .curry import read_montage_curry diff --git a/mne/io/curry/curry.py b/mne/io/curry/curry.py index bc1a7cbcbbb..7ecda0a94f3 100644 --- a/mne/io/curry/curry.py +++ b/mne/io/curry/curry.py @@ -9,13 +9,21 @@ import numpy as np -# from ..._fiff._digitization import _make_dig_points +from ..._fiff._digitization import _make_dig_points +from ..._fiff.constants import FIFF from ..._fiff.meas_info import create_info +from ..._fiff.tag import _coil_trans_to_loc from ..._fiff.utils import _mult_cal_one, _read_segments_file from ...annotations import annotations_from_events from ...channels import make_dig_montage from ...epochs import Epochs -from ...utils import _soft_import, verbose, warn +from ...surface import _normal_orth +from ...transforms import Transform, apply_trans +from ...utils import ( + _soft_import, + verbose, + warn, +) from ..base import BaseRaw CURRY_SUFFIX_DATA = [".cdt", ".dat"] @@ -124,6 +132,19 @@ def _get_curry_epoch_info(fname): ) +def _get_curry_meg_normals(fname): + fname_hdr = _check_curry_header_filename(fname) + normals_str = fname_hdr.read_text().split("\n") + i_start, i_stop = [ + i + for i, ll in enumerate(normals_str) + if ("NORMALS" in ll and "START_LIST" in ll) + or ("NORMALS" in ll and "END_LIST" in ll) + ] + normals_str = [nn.split("\t") for nn in normals_str[i_start + 1 : i_stop]] + return np.array([[float(nnn.strip()) for nnn in nn] for nn in normals_str]) + + def _extract_curry_info(fname): _soft_import("curryreader", "read file header") @@ -148,6 +169,8 @@ def _extract_curry_info(fname): ch_names = currydata["labels"] ch_pos = currydata["sensorpos"] landmarks = currydata["landmarks"] + if not isinstance(landmarks, np.ndarray): + landmarks = np.array(landmarks) landmarkslabels = currydata["landmarkslabels"] hpimatrix = currydata["hpimatrix"] @@ -158,7 +181,7 @@ def _extract_curry_info(fname): events = currydata["events"] # annotations = currydata[ # "annotations" - # ] # TODO these dont really seem to correspond to events! what is it? + # ] # TODO - these dont really seem to correspond to events! what is it? # impedance measurements # moved to standalone def; see read_impedances_curry @@ -292,7 +315,7 @@ def _read_annotations_curry(fname, sfreq="auto"): Parameters ---------- - fname : str + fname : path-like The filename. sfreq : float | 'auto' The sampling frequency in the file. If set to 'auto' then the @@ -331,13 +354,10 @@ def _read_annotations_curry(fname, sfreq="auto"): def _make_curry_montage(ch_names, ch_types, ch_pos, landmarks, landmarkslabels): # scale ch_pos to m?! ch_pos /= 1000.0 + landmarks /= 1000.0 # channel locations - # only take inner coil for MEG (ch_pos[i,:3]) - # TODO what about misc without pos? can they mess things up if unordered? + # TODO - what about misc without pos? can they mess things up if unordered? assert len(ch_pos) >= (ch_types.count("mag") + ch_types.count("eeg")) - ch_pos_meg = { - ch_names[i]: ch_pos[i, :3] for i, t in enumerate(ch_types) if t == "mag" - } ch_pos_eeg = { ch_names[i]: ch_pos[i, :3] for i, t in enumerate(ch_types) if t == "eeg" } @@ -370,25 +390,113 @@ def _make_curry_montage(ch_names, ch_types, ch_pos, landmarks, landmarkslabels): hpi=hpi_pos, coord_frame="unknown", ) - # dig = _make_dig_points( - # nasion=landmark_dict["Nas"], - # lpa=landmark_dict["LPA"], - # rpa=landmark_dict["RPA"], - # hpi=hpi_pos, - # extra_points=hsp_pos, - # dig_ch_pos=ch_pos_eeg, - # coord_frame="unknown", - # ) else: # not recorded? pass - # collect pos for meg - if ch_pos_meg != dict(): - warn("reading MEG sensor locations not yet implemented!") - return mont +def _set_chanloc_curry(inst, ch_types, ch_pos, landmarks, landmarkslabels): + ch_names = inst.info["ch_names"] + + # scale ch_pos to m?! + ch_pos /= 1000.0 + landmarks /= 1000.0 + # channel locations + # TODO - what about misc without pos? can they mess things up if unordered? + assert len(ch_pos) >= (ch_types.count("mag") + ch_types.count("eeg")) + ch_pos_meg = { + ch_names[i]: ch_pos[i, :3] for i, t in enumerate(ch_types) if t == "mag" + } + ch_pos_eeg = { + ch_names[i]: ch_pos[i, :3] for i, t in enumerate(ch_types) if t == "eeg" + } + + # landmarks and headshape + landmark_dict = dict(zip(landmarkslabels, landmarks)) + for k in ["Nas", "RPA", "LPA"]: + if k not in landmark_dict.keys(): + landmark_dict[k] = None + if len(landmarkslabels) > 0: + hpi_pos = landmarks[ + [i for i, n in enumerate(landmarkslabels) if re.match("HPI[1-99]", n)], + :, + ] + else: + hpi_pos = None + if len(landmarkslabels) > 0: + hsp_pos = landmarks[ + [i for i, n in enumerate(landmarkslabels) if re.match("H[1-99]", n)], : + ] + else: + hsp_pos = None + + add_missing_fiducials = ( + True + if ( + not landmark_dict["Nas"] + and not landmark_dict["LPA"] + and not landmark_dict["LPA"] + ) + else False # raises otherwise + ) + dig = _make_dig_points( + nasion=landmark_dict["Nas"], + lpa=landmark_dict["LPA"], + rpa=landmark_dict["RPA"], + hpi=hpi_pos, + extra_points=hsp_pos, + dig_ch_pos=ch_pos_eeg, + coord_frame="head", + add_missing_fiducials=add_missing_fiducials, + ) + with inst.info._unlock(): + inst.info["dig"] = dig + + # loc transformation for meg sensors (taken from previous version) + if len(ch_pos_meg) > 0: + R = np.eye(4) + R[[0, 1], [0, 1]] = -1 # rotate 180 deg + # shift down and back + # (chosen by eyeballing to make the CTF helmet look roughly correct) + R[:3, 3] = [0.0, -0.015, -0.12] + curry_dev_dev_t = Transform("ctf_meg", "meg", R) + + ch_normals_meg = _get_curry_meg_normals(inst.filenames[0]) + assert len(ch_normals_meg) == len(ch_pos_meg) + else: + curry_dev_dev_t, ch_normals_meg = None, None + # fill up chanlocs + assert len(ch_names) == len(ch_types) >= len(ch_pos) + for i, (ch_name, ch_type, ch_loc) in enumerate(zip(ch_names, ch_types, ch_pos)): + assert inst.info["ch_names"][i] == ch_name + ch = inst.info["chs"][i] + if ch_type == "eeg": + with inst.info._unlock(): + ch["loc"][:3] = ch_loc[:3] + ch["coord_frame"] = FIFF.FIFFV_COORD_HEAD + elif ch_type == "mag": + # transform mode + pos = ch_loc[:3] # just the inner coil for MEG + pos = apply_trans(curry_dev_dev_t, pos) + nn = ch_normals_meg[i] + assert np.isclose(np.linalg.norm(nn), 1.0, atol=1e-4) + nn /= np.linalg.norm(nn) + nn = apply_trans(curry_dev_dev_t, nn, move=False) + trans = np.eye(4) + trans[:3, 3] = pos + trans[:3, :3] = _normal_orth(nn).T + with inst.info._unlock(): + ch["loc"] = _coil_trans_to_loc(trans) + ch["coord_frame"] = FIFF.FIFFV_COORD_DEVICE + elif ch_type == "misc": + pass + else: + raise NotImplementedError + + # _make_trans_dig(curry_paths, inst.info, curry_dev_dev_t) # TODO?! + + @verbose def read_raw_curry( fname, import_epochs_as_events=False, preload=False, verbose=None @@ -432,7 +540,7 @@ def read_raw_curry( else: inst = Epochs( inst, **curry_epoch_info - ) # TODO seems to rejects flat channel + ) # TODO - seems to reject flat channel if rectype == "evoked": raise NotImplementedError return inst @@ -511,11 +619,16 @@ def __init__(self, fname, preload=False, verbose=None): annot = annotations_from_events(events, sfreq) self.set_annotations(annot) - # make montage - self._set_curry_montage(ch_types, ch_pos, landmarks, landmarkslabels) - - # with self.info._unlock(): - # self.info['dig'] = mont.dig + # add sensor locations + # TODO - is this working correctly? + assert len(self.info["ch_names"]) == len(ch_types) >= len(ch_pos) + _set_chanloc_curry( + inst=self, + ch_types=ch_types, + ch_pos=ch_pos, + landmarks=landmarks, + landmarkslabels=landmarkslabels, + ) # add HPI data (if present) # from curryreader docstring: @@ -524,7 +637,7 @@ def __init__(self, fname, preload=False, verbose=None): # that's incorrect, though. it seems to be: # [sample, dipole_1, x_1,y_1, z_1, dev_1, ..., dipole_n, x_n, ...] # for all n coils. - # TODO + # TODO - do they actually store cHPI? if not isinstance(hpimatrix, list): warn("cHPI data found, but reader not implemented.") hpisamples = hpimatrix[:, 0] @@ -532,20 +645,6 @@ def __init__(self, fname, preload=False, verbose=None): hpimatrix = hpimatrix[:, 1:].reshape(hpimatrix.shape[0], n_coil, 5) print(f"found {len(hpisamples)} cHPI samples for {n_coil} coils") - def _set_curry_montage(self, ch_types, ch_pos, landmarks, landmarkslabels): - assert len(self.info["ch_names"]) == len(ch_types) >= len(ch_pos) - - mont = _make_curry_montage( - self.info["ch_names"], ch_types, ch_pos, landmarks, landmarkslabels - ) - - # hack the montage in (for MEG chans) - # TODO change this! - ch_types_tmp = [ct if ct != "mag" else "eeg" for ct in ch_types] - self.set_channel_types(dict(zip(self.info["ch_names"], ch_types_tmp))) - self.set_montage(mont, on_missing="ignore") - self.set_channel_types(dict(zip(self.info["ch_names"], ch_types))) - def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): """Read a chunk of raw data.""" if self._raw_extras[fi]["is_ascii"]: @@ -593,7 +692,7 @@ def read_impedances_curry(fname, verbose=None): ch_names = currydata["labels"] # try get measurement times - # TODO possible? + # TODO - is this even possible? annotations = currydata[ "annotations" ] # dont really seem to correspond to events!?! @@ -608,3 +707,38 @@ def read_impedances_curry(fname, verbose=None): print({ch: float(imp) for ch, imp in zip(ch_names, impedances[iimp])}) return ch_names, impedances + + +@verbose +def read_montage_curry(fname, verbose=None): + """Read eeg montage from Curry files. + + Parameters + ---------- + fname : path-like + The filename. + %(verbose)s + + Returns + ------- + montage : instance of DigMontage | None + The montage. + """ + fname = _check_curry_filename(fname) + ( + _, + _, + ch_names, + ch_types, + ch_pos, + landmarks, + landmarkslabels, + _, + _, + _, + _, + _, + _, + _, + ) = _extract_curry_info(fname) + return _make_curry_montage(ch_names, ch_types, ch_pos, landmarks, landmarkslabels) diff --git a/mne/io/curry/tests/test_curry.py b/mne/io/curry/tests/test_curry.py index 2a6434577b1..3b09aa50f2f 100644 --- a/mne/io/curry/tests/test_curry.py +++ b/mne/io/curry/tests/test_curry.py @@ -10,12 +10,14 @@ from numpy.testing import assert_allclose, assert_array_equal from mne.annotations import events_from_annotations, read_annotations +from mne.channels import DigMontage from mne.datasets import testing from mne.event import find_events -from mne.io.curry import read_impedances_curry, read_raw_curry +from mne.io.curry import read_impedances_curry, read_montage_curry, read_raw_curry from mne.io.curry.curry import _check_curry_filename, _check_curry_header_filename from mne.io.edf import read_raw_bdf from mne.io.tests.test_raw import _test_raw_reader +from mne.transforms import Transform pytest.importorskip("curryreader") @@ -50,32 +52,27 @@ def bdf_curry_ref(): @pytest.mark.parametrize("preload", [True, False]) def test_read_raw_curry(fname, tol, preload, bdf_curry_ref): """Test reading CURRY files.""" - with pytest.warns( - RuntimeWarning, match="Fiducial point nasion not found" - ): # TODO change way to add montage in curry.py! - raw = read_raw_curry(fname, preload=preload) - - assert hasattr(raw, "_data") == preload - assert raw.n_times == bdf_curry_ref.n_times - assert raw.info["sfreq"] == bdf_curry_ref.info["sfreq"] - - for field in ["kind", "ch_name"]: - assert_array_equal( - [ch[field] for ch in raw.info["chs"]], - [ch[field] for ch in bdf_curry_ref.info["chs"]], - ) - - assert_allclose( - raw.get_data(verbose="error"), bdf_curry_ref.get_data(), atol=tol - ) + raw = read_raw_curry(fname, preload=preload) + + assert hasattr(raw, "_data") == preload + assert raw.n_times == bdf_curry_ref.n_times + assert raw.info["sfreq"] == bdf_curry_ref.info["sfreq"] - picks, start, stop = ["C3", "C4"], 200, 800 - assert_allclose( - raw.get_data(picks=picks, start=start, stop=stop, verbose="error"), - bdf_curry_ref.get_data(picks=picks, start=start, stop=stop), - rtol=tol, + for field in ["kind", "ch_name"]: + assert_array_equal( + [ch[field] for ch in raw.info["chs"]], + [ch[field] for ch in bdf_curry_ref.info["chs"]], ) - # assert raw.info["dev_head_t"] is None # TODO do we need this value? + + assert_allclose(raw.get_data(verbose="error"), bdf_curry_ref.get_data(), atol=tol) + + picks, start, stop = ["C3", "C4"], 200, 800 + assert_allclose( + raw.get_data(picks=picks, start=start, stop=stop, verbose="error"), + bdf_curry_ref.get_data(picks=picks, start=start, stop=stop), + rtol=tol, + ) + # assert raw.info["dev_head_t"] is None # TODO do we need this value? @testing.requires_testing_data @@ -88,11 +85,9 @@ def test_read_raw_curry(fname, tol, preload, bdf_curry_ref): pytest.param(curry8_bdf_ascii_file, id="curry 8 ascii"), ], ) -# TODO! def test_read_raw_curry_test_raw(fname): """Test read_raw_curry with _test_raw_reader.""" - with pytest.raises(RuntimeWarning): # TODO change way to add montage in curry.py! - _test_raw_reader(read_raw_curry, fname=fname) + _test_raw_reader(read_raw_curry, fname=fname) @testing.requires_testing_data @@ -105,12 +100,9 @@ def test_read_raw_curry_test_raw(fname): ) def test_read_raw_curry_preload_equal(fname): """Test raw identity with preload=True/False.""" - with pytest.warns( - RuntimeWarning, match="Fiducial point nasion not found" - ): # TODO change way to add montage in curry.py! - raw1 = read_raw_curry(fname, preload=False) - raw1.load_data() - assert raw1 == read_raw_curry(fname, preload=True) + raw1 = read_raw_curry(fname, preload=False) + raw1.load_data() + assert raw1 == read_raw_curry(fname, preload=True) @testing.requires_testing_data @@ -126,11 +118,10 @@ def test_read_events_curry_are_same_as_bdf(fname): EVENT_ID = {str(ii): ii for ii in range(5)} REF_EVENTS = find_events(read_raw_bdf(bdf_file, preload=True)) # TODO! - with pytest.raises(RuntimeWarning): # TODO change way to add montage in curry.py! - raw = read_raw_curry(fname) - events, _ = events_from_annotations(raw, event_id=EVENT_ID) - assert_allclose(events, REF_EVENTS) - assert raw.info["dev_head_t"] is None + raw = read_raw_curry(fname) + events, _ = events_from_annotations(raw, event_id=EVENT_ID) + assert_allclose(events, REF_EVENTS) + assert raw.info["dev_head_t"] == Transform("meg", "head") @testing.requires_testing_data @@ -244,11 +235,8 @@ def test_read_files_missing_channel(fname, expected_channel_list): # channels are sorted in the prescribed order. # This test makes sure the data load correctly, and that we end up with # the proper channel list. - with pytest.warns( - RuntimeWarning, match="Fiducial point nasion not found" - ): # TODO change way to add montage in curry.py! - raw = read_raw_curry(fname, preload=True) - assert raw.ch_names == expected_channel_list + raw = read_raw_curry(fname, preload=True) + assert raw.ch_names == expected_channel_list @testing.requires_testing_data @@ -269,3 +257,20 @@ def test_read_impedances_curry(fname): imp, actual_imp, ) + + +@testing.requires_testing_data +@pytest.mark.parametrize( + "fname", + [ + pytest.param(curry7_bdf_file, id="curry 7"), + pytest.param(curry8_bdf_file, id="curry 8"), + pytest.param(curry7_bdf_ascii_file, id="curry 7 ascii"), + pytest.param(curry8_bdf_ascii_file, id="curry 8 ascii"), + ], +) +def test_read_montage_curry(fname): + """Test reading montage from CURRY files.""" + mont = read_montage_curry(fname) + assert isinstance(mont, DigMontage) + # TODO - not very specific, yet From 2495cd952dafd9baa3157b8d97a2b1f10334ec94 Mon Sep 17 00:00:00 2001 From: dominikwelke Date: Wed, 16 Jul 2025 18:13:43 +0100 Subject: [PATCH 24/27] extract device_info + refactoring --- mne/io/curry/curry.py | 134 ++++++++++++++++--------------- mne/io/curry/tests/test_curry.py | 9 +++ 2 files changed, 77 insertions(+), 66 deletions(-) diff --git a/mne/io/curry/curry.py b/mne/io/curry/curry.py index 7ecda0a94f3..d764d3cf38c 100644 --- a/mne/io/curry/curry.py +++ b/mne/io/curry/curry.py @@ -80,6 +80,62 @@ def _check_curry_labels_filename(fname): return fname_labels +def _get_curry_meas_info(fname): + # get other essential info not provided by curryreader + fname_hdr = _check_curry_header_filename(fname) + content_hdr = fname_hdr.read_text() + + # read meas_date + meas_date = [ + int(re.compile(rf"{v}\s*=\s*-?\d+").search(content_hdr).group(0).split()[-1]) + for v in [ + "StartYear", + "StartMonth", + "StartDay", + "StartHour", + "StartMin", + "StartSec", + "StartMillisec", + ] + ] + try: + meas_date = datetime( + *meas_date[:-1], + meas_date[-1] * 1000, # -> microseconds + timezone.utc, + ) + except Exception: + meas_date = None + + # read datatype + byteorder = ( + re.compile(r"DataByteOrder\s*=\s*[A-Z]+") + .search(content_hdr) + .group() + .split()[-1] + ) + is_ascii = byteorder == "ASCII" + + # amp info + # TODO - seems like there can be identifiable information (serial numbers, dates). + # MNE anonymization functions only overwrite "serial" and "site", though + # TODO - there can be filter details, too + amp_info = ( + re.compile(r"AmplifierInfo\s*=.*\n") + .search(content_hdr) + .group() + .strip("\n") + .split("= ")[-1] + .strip() + ) + + device_info = ( + dict(type=amp_info) if amp_info != "" else None # model="", serial="", site="" + ) + + return meas_date, is_ascii, device_info + + def _get_curry_recording_type(fname): _soft_import("curryreader", "read recording modality") @@ -175,7 +231,9 @@ def _extract_curry_info(fname): hpimatrix = currydata["hpimatrix"] # data - orig_format = "single" # curryreader.py always reads float32. is this correct? + orig_format = "int" + # curryreader.py always reads float32, but this is probably just numpy. + # legacy MNE code states int. # events events = currydata["events"] @@ -189,46 +247,6 @@ def _extract_curry_info(fname): # get other essential info not provided by curryreader fname_hdr = _check_curry_header_filename(fname) - content_hdr = fname_hdr.read_text() - - # read meas_date - meas_date = [ - int(re.compile(rf"{v}\s*=\s*-?\d+").search(content_hdr).group(0).split()[-1]) - for v in [ - "StartYear", - "StartMonth", - "StartDay", - "StartHour", - "StartMin", - "StartSec", - "StartMillisec", - ] - ] - try: - meas_date = datetime( - *meas_date[:-1], - meas_date[-1] * 1000, # -> microseconds - timezone.utc, - ) - except Exception: - meas_date = None - - print(f"meas_date: {meas_date}") - - # read datatype - byteorder = ( - re.compile(r"DataByteOrder\s*=\s*[A-Z]+") - .search(content_hdr) - .group() - .split()[-1] - ) - is_ascii = byteorder == "ASCII" - - # amp info - # TODO - # amp_info = ( - # re.compile(r"AmplifierInfo\s*=.*\n").search(content_hdr).group().split("= ") - # ) # channel types and units ch_types, units = [], [] @@ -304,9 +322,7 @@ def _extract_curry_info(fname): events, orig_format, orig_units, - is_ascii, cals, - meas_date, ) @@ -328,9 +344,7 @@ def _read_annotations_curry(fname, sfreq="auto"): """ fname = _check_curry_filename(fname) - (sfreq_fromfile, _, _, _, _, _, _, _, events, _, _, _, _, _) = _extract_curry_info( - fname - ) + (sfreq_fromfile, _, _, _, _, _, _, _, events, _, _, _) = _extract_curry_info(fname) if sfreq == "auto": sfreq = sfreq_fromfile elif np.isreal(sfreq): @@ -494,7 +508,7 @@ def _set_chanloc_curry(inst, ch_types, ch_pos, landmarks, landmarkslabels): else: raise NotImplementedError - # _make_trans_dig(curry_paths, inst.info, curry_dev_dev_t) # TODO?! + # _make_trans_dig(curry_paths, inst.info, curry_dev_dev_t) # TODO - necessary?! @verbose @@ -579,13 +593,14 @@ def __init__(self, fname, preload=False, verbose=None): events, orig_format, orig_units, - is_ascii, cals, - meas_date, ) = _extract_curry_info(fname) + meas_date, is_ascii, device_info = _get_curry_meas_info(fname) + # construct info info = create_info(ch_names=ch_names, sfreq=sfreq, ch_types=ch_types) + info["device_info"] = device_info # create raw object last_samps = [n_samples - 1] @@ -620,7 +635,7 @@ def __init__(self, fname, preload=False, verbose=None): self.set_annotations(annot) # add sensor locations - # TODO - is this working correctly? + # TODO - review wanted! assert len(self.info["ch_names"]) == len(ch_types) >= len(ch_pos) _set_chanloc_curry( inst=self, @@ -725,20 +740,7 @@ def read_montage_curry(fname, verbose=None): The montage. """ fname = _check_curry_filename(fname) - ( - _, - _, - ch_names, - ch_types, - ch_pos, - landmarks, - landmarkslabels, - _, - _, - _, - _, - _, - _, - _, - ) = _extract_curry_info(fname) + (_, _, ch_names, ch_types, ch_pos, landmarks, landmarkslabels, _, _, _, _, _) = ( + _extract_curry_info(fname) + ) return _make_curry_montage(ch_names, ch_types, ch_pos, landmarks, landmarkslabels) diff --git a/mne/io/curry/tests/test_curry.py b/mne/io/curry/tests/test_curry.py index 3b09aa50f2f..372a2bf81e6 100644 --- a/mne/io/curry/tests/test_curry.py +++ b/mne/io/curry/tests/test_curry.py @@ -239,6 +239,15 @@ def test_read_files_missing_channel(fname, expected_channel_list): assert raw.ch_names == expected_channel_list +@testing.requires_testing_data +def test_read_device_info(): + """Test extraction of device_info.""" + raw = read_raw_curry(curry7_bdf_file) + assert not raw.info["device_info"] + raw2 = read_raw_curry(Ref_chan_omitted_file) + assert isinstance(raw2.info["device_info"], dict) + + @testing.requires_testing_data @pytest.mark.parametrize( "fname", From feb7def0a4aae43a3ba8124d0dc40f4242d5470f Mon Sep 17 00:00:00 2001 From: dominikwelke Date: Wed, 16 Jul 2025 20:00:44 +0100 Subject: [PATCH 25/27] test for epoched recording --- mne/io/curry/curry.py | 42 +++++++++++++++++++++++++------- mne/io/curry/tests/test_curry.py | 24 +++++++++++++++--- 2 files changed, 53 insertions(+), 13 deletions(-) diff --git a/mne/io/curry/curry.py b/mne/io/curry/curry.py index d764d3cf38c..18344420378 100644 --- a/mne/io/curry/curry.py +++ b/mne/io/curry/curry.py @@ -191,13 +191,27 @@ def _get_curry_epoch_info(fname): def _get_curry_meg_normals(fname): fname_hdr = _check_curry_header_filename(fname) normals_str = fname_hdr.read_text().split("\n") - i_start, i_stop = [ + # i_start, i_stop = [ + # i + # for i, ll in enumerate(normals_str) + # if ("NORMALS" in ll and "START_LIST" in ll) + # or ("NORMALS" in ll and "END_LIST" in ll) + # ] + # normals_str = [nn.split("\t") for nn in normals_str[i_start + 1 : i_stop]] + i_list = [ i for i, ll in enumerate(normals_str) if ("NORMALS" in ll and "START_LIST" in ll) or ("NORMALS" in ll and "END_LIST" in ll) ] - normals_str = [nn.split("\t") for nn in normals_str[i_start + 1 : i_stop]] + assert len(i_list) % 2 == 0 + i_start_list = i_list[::2] + i_stop_list = i_list[1::2] + normals_str = [ + nn.split("\t") + for i_start, i_stop in zip(i_start_list, i_stop_list) + for nn in normals_str[i_start + 1 : i_stop] + ] return np.array([[float(nnn.strip()) for nnn in nn] for nn in normals_str]) @@ -296,10 +310,18 @@ def _extract_curry_info(fname): ch_names = [ ch_names_full[i] for i in np.argsort(chaninfile_full) if i not in i_drop ] + ch_pos = np.array( + [ + ch_pos[i] + for i in np.argsort(chaninfile_full) + if (i not in i_drop) and (i < len(ch_pos)) + ] + ) ch_types = [ch_types[i] for i in np.argsort(chaninfile_full) if i not in i_drop] units = [units[i] for i in np.argsort(chaninfile_full) if i not in i_drop] assert len(ch_types) == len(units) == len(ch_names) == n_ch + assert len(ch_pos) == ch_types.count("eeg") + ch_types.count("mag") # finetune channel types (e.g. stim, eog etc might be identified by name) # TODO? @@ -370,8 +392,9 @@ def _make_curry_montage(ch_names, ch_types, ch_pos, landmarks, landmarkslabels): ch_pos /= 1000.0 landmarks /= 1000.0 # channel locations - # TODO - what about misc without pos? can they mess things up if unordered? + # what about misc without pos? can they mess things up if unordered? assert len(ch_pos) >= (ch_types.count("mag") + ch_types.count("eeg")) + assert len(ch_pos) == (ch_types.count("mag") + ch_types.count("eeg")) ch_pos_eeg = { ch_names[i]: ch_pos[i, :3] for i, t in enumerate(ch_types) if t == "eeg" } @@ -417,8 +440,9 @@ def _set_chanloc_curry(inst, ch_types, ch_pos, landmarks, landmarkslabels): ch_pos /= 1000.0 landmarks /= 1000.0 # channel locations - # TODO - what about misc without pos? can they mess things up if unordered? + # what about misc without pos? can they mess things up if unordered? assert len(ch_pos) >= (ch_types.count("mag") + ch_types.count("eeg")) + assert len(ch_pos) == (ch_types.count("mag") + ch_types.count("eeg")) ch_pos_meg = { ch_names[i]: ch_pos[i, :3] for i, t in enumerate(ch_types) if t == "mag" } @@ -433,14 +457,14 @@ def _set_chanloc_curry(inst, ch_types, ch_pos, landmarks, landmarkslabels): landmark_dict[k] = None if len(landmarkslabels) > 0: hpi_pos = landmarks[ - [i for i, n in enumerate(landmarkslabels) if re.match("HPI[1-99]", n)], + [i for i, n in enumerate(landmarkslabels) if re.match("HPI.?[1-99]", n)], :, ] else: hpi_pos = None if len(landmarkslabels) > 0: hsp_pos = landmarks[ - [i for i, n in enumerate(landmarkslabels) if re.match("H[1-99]", n)], : + [i for i, n in enumerate(landmarkslabels) if re.match("H.?[1-99]", n)], : ] else: hsp_pos = None @@ -448,9 +472,9 @@ def _set_chanloc_curry(inst, ch_types, ch_pos, landmarks, landmarkslabels): add_missing_fiducials = ( True if ( - not landmark_dict["Nas"] - and not landmark_dict["LPA"] - and not landmark_dict["LPA"] + isinstance(landmark_dict["Nas"], type(None)) + and isinstance(landmark_dict["LPA"], type(None)) + and isinstance(landmark_dict["RPA"], type(None)) ) else False # raises otherwise ) diff --git a/mne/io/curry/tests/test_curry.py b/mne/io/curry/tests/test_curry.py index 372a2bf81e6..ef85b9cd886 100644 --- a/mne/io/curry/tests/test_curry.py +++ b/mne/io/curry/tests/test_curry.py @@ -3,7 +3,6 @@ # License: BSD-3-Clause # Copyright the MNE-Python contributors. - import numpy as np import pytest from numpy import empty @@ -12,9 +11,14 @@ from mne.annotations import events_from_annotations, read_annotations from mne.channels import DigMontage from mne.datasets import testing +from mne.epochs import Epochs from mne.event import find_events from mne.io.curry import read_impedances_curry, read_montage_curry, read_raw_curry -from mne.io.curry.curry import _check_curry_filename, _check_curry_header_filename +from mne.io.curry.curry import ( + RawCurry, + _check_curry_filename, + _check_curry_header_filename, +) from mne.io.edf import read_raw_bdf from mne.io.tests.test_raw import _test_raw_reader from mne.transforms import Transform @@ -30,6 +34,7 @@ curry8_bdf_ascii_file = curry_dir / "test_bdf_stim_channel Curry 8 ASCII.cdt" Ref_chan_omitted_file = curry_dir / "Ref_channel_omitted Curry7.dat" Ref_chan_omitted_reordered_file = curry_dir / "Ref_channel_omitted reordered Curry7.dat" +epoched_file = curry_dir / "Epoched.cdt" @pytest.fixture(scope="session") @@ -72,7 +77,19 @@ def test_read_raw_curry(fname, tol, preload, bdf_curry_ref): bdf_curry_ref.get_data(picks=picks, start=start, stop=stop), rtol=tol, ) - # assert raw.info["dev_head_t"] is None # TODO do we need this value? + assert raw.info["dev_head_t"] == Transform("meg", "head") + + +@testing.requires_testing_data +def test_read_raw_curry_epoched(): + """Test reading epoched file.""" + ep = read_raw_curry(epoched_file) + assert isinstance(ep, Epochs) + assert len(ep.events) == 26 + assert len(ep.annotations) == 0 + raw = read_raw_curry(epoched_file, import_epochs_as_events=True) + assert isinstance(raw, RawCurry) + assert len(raw.annotations) == 26 @testing.requires_testing_data @@ -117,7 +134,6 @@ def test_read_events_curry_are_same_as_bdf(fname): """Test events from curry annotations recovers the right events.""" EVENT_ID = {str(ii): ii for ii in range(5)} REF_EVENTS = find_events(read_raw_bdf(bdf_file, preload=True)) - # TODO! raw = read_raw_curry(fname) events, _ = events_from_annotations(raw, event_id=EVENT_ID) assert_allclose(events, REF_EVENTS) From e85020150b955059bc626000544bea701cbf4f9e Mon Sep 17 00:00:00 2001 From: dominikwelke Date: Wed, 16 Jul 2025 20:09:58 +0100 Subject: [PATCH 26/27] add changelog entry --- doc/changes/devel/13176.dependency.rst | 1 + 1 file changed, 1 insertion(+) create mode 100644 doc/changes/devel/13176.dependency.rst diff --git a/doc/changes/devel/13176.dependency.rst b/doc/changes/devel/13176.dependency.rst new file mode 100644 index 00000000000..713ce3ba502 --- /dev/null +++ b/doc/changes/devel/13176.dependency.rst @@ -0,0 +1 @@ +New reader for Neuroscan Curry files, using the curry-python-reader module, by `Dominik Welke`_. \ No newline at end of file From 726067534fb3f486d45cfa1fb9ff31325a1a90fc Mon Sep 17 00:00:00 2001 From: dominikwelke Date: Fri, 18 Jul 2025 20:50:20 +0100 Subject: [PATCH 27/27] fixes, refactoring, documentation, clean up --- mne/io/curry/__init__.py | 2 +- mne/io/curry/curry.py | 111 ++++++++++++++++++------------- mne/io/curry/tests/test_curry.py | 32 +++++---- 3 files changed, 87 insertions(+), 58 deletions(-) diff --git a/mne/io/curry/__init__.py b/mne/io/curry/__init__.py index d71f0465db4..9c665ac09fb 100644 --- a/mne/io/curry/__init__.py +++ b/mne/io/curry/__init__.py @@ -6,4 +6,4 @@ from .curry import read_raw_curry from .curry import read_impedances_curry -from .curry import read_montage_curry +from .curry import read_dig_curry diff --git a/mne/io/curry/curry.py b/mne/io/curry/curry.py index 18344420378..f150f609d2d 100644 --- a/mne/io/curry/curry.py +++ b/mne/io/curry/curry.py @@ -37,6 +37,11 @@ def _check_curry_filename(fname): # try suffixes if fname_in.suffix in CURRY_SUFFIX_DATA: fname_out = fname_in + elif ( + fname_in.with_suffix("").exists() + and fname_in.with_suffix("").suffix in CURRY_SUFFIX_DATA + ): + fname_out = fname_in.with_suffix("") else: for data_suff in CURRY_SUFFIX_DATA: if fname_in.with_suffix(data_suff).exists(): @@ -116,10 +121,12 @@ def _get_curry_meas_info(fname): ) is_ascii = byteorder == "ASCII" - # amp info - # TODO - seems like there can be identifiable information (serial numbers, dates). + # amplifier info + # TODO - PRIVACY + # seems like there can be identifiable information (serial numbers, dates). # MNE anonymization functions only overwrite "serial" and "site", though - # TODO - there can be filter details, too + # TODO - FUTURE ENHANCEMENT + # # there can be filter details in AmplifierInfo, too amp_info = ( re.compile(r"AmplifierInfo\s*=.*\n") .search(content_hdr) @@ -182,9 +189,12 @@ def _get_curry_epoch_info(fname): event_id=event_id, tmin=0.0, tmax=(n_samples - 1) / sfreq, - baseline=(0, 0), + baseline=None, + detrend=None, + verbose=False, metadata=epochmetainfo, reject_by_annotation=False, + reject=None, ) @@ -251,9 +261,18 @@ def _extract_curry_info(fname): # events events = currydata["events"] - # annotations = currydata[ - # "annotations" - # ] # TODO - these dont really seem to correspond to events! what is it? + # BUG in curryreader (v.0.1.1)! annotations read incorrectly (shifted by 1 line) + annotations = currydata["annotations"] + assert len(annotations) == len(events) + if len(events) > 0: + # quick fix: shift annotation down, last one is missing + annotations = annotations[1:] + [""] + event_desc = dict() + for k, v in zip(events[:, 1], annotations): + if int(k) not in event_desc.keys(): + event_desc[int(k)] = v.strip() if (v.strip() != "") else str(int(k)) + else: + event_desc = None # impedance measurements # moved to standalone def; see read_impedances_curry @@ -324,8 +343,7 @@ def _extract_curry_info(fname): assert len(ch_pos) == ch_types.count("eeg") + ch_types.count("mag") # finetune channel types (e.g. stim, eog etc might be identified by name) - # TODO? - + # TODO - FUTURE ENHANCEMENT # scale data to SI units orig_units = dict(zip(ch_names, units)) cals = [ @@ -342,6 +360,7 @@ def _extract_curry_info(fname): landmarkslabels, hpimatrix, events, + event_desc, orig_format, orig_units, cals, @@ -366,7 +385,9 @@ def _read_annotations_curry(fname, sfreq="auto"): """ fname = _check_curry_filename(fname) - (sfreq_fromfile, _, _, _, _, _, _, _, events, _, _, _) = _extract_curry_info(fname) + (sfreq_fromfile, _, _, _, _, _, _, _, events, event_desc, _, _, _) = ( + _extract_curry_info(fname) + ) if sfreq == "auto": sfreq = sfreq_fromfile elif np.isreal(sfreq): @@ -381,7 +402,7 @@ def _read_annotations_curry(fname, sfreq="auto"): if isinstance(events, np.ndarray): # if there are events events = events.astype("int") events = np.insert(events, 1, np.diff(events[:, 2:]).flatten(), axis=1)[:, :3] - return annotations_from_events(events, sfreq) + return annotations_from_events(events, sfreq, event_desc=event_desc) else: warn("no event annotations found") return None @@ -417,7 +438,7 @@ def _make_curry_montage(ch_names, ch_types, ch_pos, landmarks, landmarkslabels): hsp_pos = None # make dig montage for eeg mont = None - if ch_pos.shape[1] in [3, 6]: # eeg xyz space + if len(ch_pos_eeg) > 0: mont = make_dig_montage( ch_pos=ch_pos_eeg, nasion=landmark_dict["Nas"], @@ -425,10 +446,10 @@ def _make_curry_montage(ch_names, ch_types, ch_pos, landmarks, landmarkslabels): rpa=landmark_dict["RPA"], hsp=hsp_pos, hpi=hpi_pos, - coord_frame="unknown", + coord_frame="head", ) else: # not recorded? - pass + warn("No eeg sensor locations found in file.") return mont @@ -532,21 +553,23 @@ def _set_chanloc_curry(inst, ch_types, ch_pos, landmarks, landmarkslabels): else: raise NotImplementedError - # _make_trans_dig(curry_paths, inst.info, curry_dev_dev_t) # TODO - necessary?! + # TODO - REVIEW NEEDED + # do we need further transpositions for MEG channel positions? + # the testfiles i got look good to me.. + # _make_trans_dig(curry_paths, inst.info, curry_dev_dev_t) @verbose def read_raw_curry( - fname, import_epochs_as_events=False, preload=False, verbose=None + fname, import_epochs_as_annotations=False, preload=False, verbose=None ) -> "RawCurry": """Read raw data from Curry files. Parameters ---------- fname : path-like - Path to a curry file with extensions ``.dat``, ``.dap``, ``.rs3``, - ``.cdt``, ``.cdt.dpa``, ``.cdt.cef`` or ``.cef``. - import_epochs_as_events : bool + Path to a valid curry file. + import_epochs_as_annotations : bool Set to ``True`` if you want to import epoched recordings as continuous ``raw`` object with event annotations. Only do this if you know your data allows it. %(preload)s @@ -568,7 +591,9 @@ def read_raw_curry( inst = RawCurry(fname, preload, verbose) if rectype in ["epochs", "evoked"]: curry_epoch_info = _get_curry_epoch_info(fname) - if import_epochs_as_events: + if import_epochs_as_annotations: + # TODO - REVIEW NEEDED + # give those annotations a specific name/type? epoch_annotations = annotations_from_events( events=curry_epoch_info["events"], event_desc={v: k for k, v in curry_epoch_info["event_id"].items()}, @@ -576,11 +601,9 @@ def read_raw_curry( ) inst.set_annotations(inst.annotations + epoch_annotations) else: - inst = Epochs( - inst, **curry_epoch_info - ) # TODO - seems to reject flat channel + inst = Epochs(inst, **curry_epoch_info) if rectype == "evoked": - raise NotImplementedError + raise NotImplementedError # not sure this is even supported format return inst @@ -590,8 +613,7 @@ class RawCurry(BaseRaw): Parameters ---------- fname : path-like - Path to a curry file with extensions ``.dat``, ``.dap``, ``.rs3``, - ``.cdt``, ``.cdt.dpa``, ``.cdt.cef`` or ``.cef``. + Path to a valid curry file. %(preload)s %(verbose)s @@ -615,6 +637,7 @@ def __init__(self, fname, preload=False, verbose=None): landmarkslabels, hpimatrix, events, + event_desc, orig_format, orig_units, cals, @@ -655,11 +678,11 @@ def __init__(self, fname, preload=False, verbose=None): events = np.insert(events, 1, np.diff(events[:, 2:]).flatten(), axis=1)[ :, :3 ] - annot = annotations_from_events(events, sfreq) + annot = annotations_from_events(events, sfreq, event_desc=event_desc) self.set_annotations(annot) # add sensor locations - # TODO - review wanted! + # TODO - REVIEW NEEDED assert len(self.info["ch_names"]) == len(ch_types) >= len(ch_pos) _set_chanloc_curry( inst=self, @@ -670,13 +693,15 @@ def __init__(self, fname, preload=False, verbose=None): ) # add HPI data (if present) + # TODO - FUTURE ENHANCEMENT # from curryreader docstring: # "HPI-coil measurements matrix (Orion-MEG only) where every row is: # [measurementsample, dipolefitflag, x, y, z, deviation]" - # that's incorrect, though. it seems to be: + # + # that's incorrect, though. it ratehr seems to be: # [sample, dipole_1, x_1,y_1, z_1, dev_1, ..., dipole_n, x_n, ...] # for all n coils. - # TODO - do they actually store cHPI? + # We are missing good example data or format specs! do not implement for now. if not isinstance(hpimatrix, list): warn("cHPI data found, but reader not implemented.") hpisamples = hpimatrix[:, 0] @@ -707,8 +732,7 @@ def read_impedances_curry(fname, verbose=None): Parameters ---------- fname : path-like - Path to a curry file with extensions ``.dat``, ``.dap``, ``.rs3``, - ``.cdt``, ``.cdt.dpa``, ``.cdt.cef`` or ``.cef``. + Path to a valid curry file. %(verbose)s Returns @@ -730,15 +754,10 @@ def read_impedances_curry(fname, verbose=None): impedances = currydata["impedances"] ch_names = currydata["labels"] - # try get measurement times - # TODO - is this even possible? - annotations = currydata[ - "annotations" - ] # dont really seem to correspond to events!?! - for anno in set(annotations): - if "impedance" in anno.lower(): - print("FOUND IMPEDANCE ANNOTATION!") - print(f"'{anno}' - N={len([a for a in annotations if a == anno])}") + # get impedance measurement times + # TODO - FUTURE ENHANCEMENT + # info can be in the files (as events or IMPEDANCE_TIMES) + # inconsistently, though, and low priority # print impedances print("impedance measurements:") @@ -749,13 +768,13 @@ def read_impedances_curry(fname, verbose=None): @verbose -def read_montage_curry(fname, verbose=None): - """Read eeg montage from Curry files. +def read_dig_curry(fname, verbose=None): + """Read electrode locations from Curry files. Parameters ---------- fname : path-like - The filename. + A valid Curry file. %(verbose)s Returns @@ -763,8 +782,10 @@ def read_montage_curry(fname, verbose=None): montage : instance of DigMontage | None The montage. """ + # TODO - REVIEW NEEDED + # API? do i need to add this in the docs somewhere? fname = _check_curry_filename(fname) - (_, _, ch_names, ch_types, ch_pos, landmarks, landmarkslabels, _, _, _, _, _) = ( + (_, _, ch_names, ch_types, ch_pos, landmarks, landmarkslabels, _, _, _, _, _, _) = ( _extract_curry_info(fname) ) return _make_curry_montage(ch_names, ch_types, ch_pos, landmarks, landmarkslabels) diff --git a/mne/io/curry/tests/test_curry.py b/mne/io/curry/tests/test_curry.py index ef85b9cd886..f1ec6e3a06a 100644 --- a/mne/io/curry/tests/test_curry.py +++ b/mne/io/curry/tests/test_curry.py @@ -13,7 +13,7 @@ from mne.datasets import testing from mne.epochs import Epochs from mne.event import find_events -from mne.io.curry import read_impedances_curry, read_montage_curry, read_raw_curry +from mne.io.curry import read_dig_curry, read_impedances_curry, read_raw_curry from mne.io.curry.curry import ( RawCurry, _check_curry_filename, @@ -87,10 +87,15 @@ def test_read_raw_curry_epoched(): assert isinstance(ep, Epochs) assert len(ep.events) == 26 assert len(ep.annotations) == 0 - raw = read_raw_curry(epoched_file, import_epochs_as_events=True) + raw = read_raw_curry(epoched_file, import_epochs_as_annotations=True) assert isinstance(raw, RawCurry) assert len(raw.annotations) == 26 + # check signal identity + aa = raw.get_data() + bb = ep.get_data() + assert np.equal(aa, bb.transpose(1, 0, 2).reshape(aa.shape)).all() + @testing.requires_testing_data @pytest.mark.parametrize( @@ -277,7 +282,7 @@ def test_read_device_info(): def test_read_impedances_curry(fname): """Test reading impedances from CURRY files.""" _, imp = read_impedances_curry(fname) - actual_imp = empty(shape=(0, 3)) + actual_imp = empty(shape=(0, 3)) # TODO - need better testing data assert_allclose( imp, actual_imp, @@ -286,16 +291,19 @@ def test_read_impedances_curry(fname): @testing.requires_testing_data @pytest.mark.parametrize( - "fname", + "fname,mont_present", [ - pytest.param(curry7_bdf_file, id="curry 7"), - pytest.param(curry8_bdf_file, id="curry 8"), - pytest.param(curry7_bdf_ascii_file, id="curry 7 ascii"), - pytest.param(curry8_bdf_ascii_file, id="curry 8 ascii"), + pytest.param(curry7_bdf_file, True, id="curry 7"), + pytest.param(curry8_bdf_file, True, id="curry 8"), + pytest.param(curry7_bdf_ascii_file, True, id="curry 7 ascii"), + pytest.param(curry8_bdf_ascii_file, True, id="curry 8 ascii"), ], ) -def test_read_montage_curry(fname): +def test_read_montage_curry(fname, mont_present): """Test reading montage from CURRY files.""" - mont = read_montage_curry(fname) - assert isinstance(mont, DigMontage) - # TODO - not very specific, yet + if mont_present: + assert isinstance(read_dig_curry(fname), DigMontage) + else: + # TODO - not reached, yet. no test file without eeg chanlocs + with pytest.warns(RuntimeWarning, match="No sensor locations found"): + _ = read_dig_curry(fname)