Skip to content

Commit e761501

Browse files
author
Thinh Nguyen
committed
improve variables naming in kilosort reader
1 parent c002646 commit e761501

File tree

1 file changed

+33
-33
lines changed

1 file changed

+33
-33
lines changed

element_array_ephys/readers/kilosort.py

Lines changed: 33 additions & 33 deletions
Original file line numberDiff line numberDiff line change
@@ -36,21 +36,21 @@ class Kilosort:
3636
]
3737

3838
# keys to self.files, .data are file name e.g. self.data['params'], etc.
39-
ks_keys = [path.splitext(i)[0] for i in ks_files]
39+
ks_keys = [path.splitext(ks_file)[0] for ks_file in ks_files]
4040

4141
def __init__(self, ks_dir):
4242
self._ks_dir = pathlib.Path(ks_dir)
4343
self._files = {}
4444
self._data = None
4545
self._clusters = None
4646

47-
params_fp = ks_dir / 'params.py'
47+
params_filepath = ks_dir / 'params.py'
4848

49-
if not params_fp.exists():
49+
if not params_filepath.exists():
5050
raise FileNotFoundError(f'No Kilosort output found in: {ks_dir}')
5151

52-
self._info = {'time_created': datetime.fromtimestamp(params_fp.stat().st_ctime),
53-
'time_modified': datetime.fromtimestamp(params_fp.stat().st_mtime)}
52+
self._info = {'time_created': datetime.fromtimestamp(params_filepath.stat().st_ctime),
53+
'time_modified': datetime.fromtimestamp(params_filepath.stat().st_mtime)}
5454

5555
@property
5656
def data(self):
@@ -64,29 +64,29 @@ def info(self):
6464

6565
def _stat(self):
6666
self._data = {}
67-
for i in Kilosort.ks_files:
68-
f = self._ks_dir / i
67+
for ks_filename in Kilosort.ks_files:
68+
ks_filepath = self._ks_dir / ks_filename
6969

70-
if not f.exists():
71-
log.debug('skipping {} - doesnt exist'.format(f))
70+
if not ks_filepath.exists():
71+
log.debug('skipping {} - does not exist'.format(ks_filepath))
7272
continue
7373

74-
base, ext = path.splitext(i)
75-
self._files[base] = f
74+
base, ext = path.splitext(ks_filename)
75+
self._files[base] = ks_filepath
7676

77-
if i == 'params.py':
78-
log.debug('loading params.py {}'.format(f))
77+
if ks_filename == 'params.py':
78+
log.debug('loading params.py {}'.format(ks_filepath))
7979
# params.py is a 'key = val' file
80-
prm = {}
81-
for line in open(f, 'r').readlines():
80+
params = {}
81+
for line in open(ks_filepath, 'r').readlines():
8282
k, v = line.strip('\n').split('=')
83-
prm[k.strip()] = convert_to_number(v.strip())
84-
log.debug('prm: {}'.format(prm))
85-
self._data[base] = prm
83+
params[k.strip()] = convert_to_number(v.strip())
84+
log.debug('params: {}'.format(params))
85+
self._data[base] = params
8686

8787
if ext == '.npy':
88-
log.debug('loading npy {}'.format(f))
89-
d = np.load(f, mmap_mode='r', allow_pickle=False, fix_imports=False)
88+
log.debug('loading npy {}'.format(ks_filepath))
89+
d = np.load(ks_filepath, mmap_mode='r', allow_pickle=False, fix_imports=False)
9090
self._data[base] = (np.reshape(d, d.shape[0])
9191
if d.ndim == 2 and d.shape[1] == 1 else d)
9292

@@ -105,11 +105,11 @@ def _stat(self):
105105
def get_best_channel(self, unit):
106106
template_idx = self.data['spike_templates'][
107107
np.where(self.data['spike_clusters'] == unit)[0][0]]
108-
chn_templates = self.data['templates'][template_idx, :, :]
109-
max_chn_idx = np.abs(np.abs(chn_templates).max(axis=0)).argmax()
110-
max_chn = self.data['channel_map'][max_chn_idx]
108+
channel_templates = self.data['templates'][template_idx, :, :]
109+
max_channel_idx = np.abs(np.abs(channel_templates).max(axis=0)).argmax()
110+
max_channel = self.data['channel_map'][max_channel_idx]
111111

112-
return max_chn, max_chn_idx
112+
return max_channel, max_channel_idx
113113

114114
def extract_spike_depths(self):
115115
""" Reimplemented from https://github.com/cortex-lab/spikes/blob/master/analysis/ksDriftmap.m """
@@ -138,9 +138,9 @@ def extract_clustering_info(cluster_output_dir):
138138

139139
phy_curation_indicators = ['Merge clusters', 'Split cluster', 'Change metadata_group']
140140
# ---- Manual curation? ----
141-
phylog_fp = cluster_output_dir / 'phy.log'
142-
if phylog_fp.exists():
143-
phylog = pd.read_fwf(phylog_fp, colspecs=[(6, 40), (41, 250)])
141+
phylog_filepath = cluster_output_dir / 'phy.log'
142+
if phylog_filepath.exists():
143+
phylog = pd.read_fwf(phylog_filepath, colspecs=[(6, 40), (41, 250)])
144144
phylog.columns = ['meta', 'detail']
145145
curation_row = [bool(re.match('|'.join(phy_curation_indicators), str(s)))
146146
for s in phylog.detail]
@@ -151,7 +151,7 @@ def extract_clustering_info(cluster_output_dir):
151151
if datetime_str:
152152
creation_time = datetime.strptime(datetime_str.group(), '%Y-%m-%d %H:%M:%S')
153153
else:
154-
creation_time = datetime.fromtimestamp(phylog_fp.stat().st_ctime)
154+
creation_time = datetime.fromtimestamp(phylog_filepath.stat().st_ctime)
155155
time_str = re.search('\d{2}:\d{2}:\d{2}', row_meta)
156156
if time_str:
157157
creation_time = datetime.combine(
@@ -161,16 +161,16 @@ def extract_clustering_info(cluster_output_dir):
161161
is_curated = False
162162

163163
# ---- Quality control? ----
164-
metric_fp = cluster_output_dir / 'metrics.csv'
165-
if metric_fp.exists():
164+
metric_filepath = cluster_output_dir / 'metrics.csv'
165+
if metric_filepath.exists():
166166
is_qc = True
167167
if creation_time is None:
168-
creation_time = datetime.fromtimestamp(metric_fp.stat().st_ctime)
168+
creation_time = datetime.fromtimestamp(metric_filepath.stat().st_ctime)
169169
else:
170170
is_qc = False
171171

172172
if creation_time is None:
173-
spk_fp = next(cluster_output_dir.glob('spike_times.npy'))
174-
creation_time = datetime.fromtimestamp(spk_fp.stat().st_ctime)
173+
spiketimes_filepath = next(cluster_output_dir.glob('spike_times.npy'))
174+
creation_time = datetime.fromtimestamp(spiketimes_filepath.stat().st_ctime)
175175

176176
return creation_time, is_curated, is_qc

0 commit comments

Comments
 (0)