@@ -36,21 +36,21 @@ class Kilosort:
36
36
]
37
37
38
38
# keys to self.files, .data are file name e.g. self.data['params'], etc.
39
- ks_keys = [path .splitext (i )[0 ] for i in ks_files ]
39
+ ks_keys = [path .splitext (ks_file )[0 ] for ks_file in ks_files ]
40
40
41
41
def __init__ (self , ks_dir ):
42
42
self ._ks_dir = pathlib .Path (ks_dir )
43
43
self ._files = {}
44
44
self ._data = None
45
45
self ._clusters = None
46
46
47
- params_fp = ks_dir / 'params.py'
47
+ params_filepath = ks_dir / 'params.py'
48
48
49
- if not params_fp .exists ():
49
+ if not params_filepath .exists ():
50
50
raise FileNotFoundError (f'No Kilosort output found in: { ks_dir } ' )
51
51
52
- self ._info = {'time_created' : datetime .fromtimestamp (params_fp .stat ().st_ctime ),
53
- 'time_modified' : datetime .fromtimestamp (params_fp .stat ().st_mtime )}
52
+ self ._info = {'time_created' : datetime .fromtimestamp (params_filepath .stat ().st_ctime ),
53
+ 'time_modified' : datetime .fromtimestamp (params_filepath .stat ().st_mtime )}
54
54
55
55
@property
56
56
def data (self ):
@@ -64,29 +64,29 @@ def info(self):
64
64
65
65
def _stat (self ):
66
66
self ._data = {}
67
- for i in Kilosort .ks_files :
68
- f = self ._ks_dir / i
67
+ for ks_filename in Kilosort .ks_files :
68
+ ks_filepath = self ._ks_dir / ks_filename
69
69
70
- if not f .exists ():
71
- log .debug ('skipping {} - doesnt exist' .format (f ))
70
+ if not ks_filepath .exists ():
71
+ log .debug ('skipping {} - does not exist' .format (ks_filepath ))
72
72
continue
73
73
74
- base , ext = path .splitext (i )
75
- self ._files [base ] = f
74
+ base , ext = path .splitext (ks_filename )
75
+ self ._files [base ] = ks_filepath
76
76
77
- if i == 'params.py' :
78
- log .debug ('loading params.py {}' .format (f ))
77
+ if ks_filename == 'params.py' :
78
+ log .debug ('loading params.py {}' .format (ks_filepath ))
79
79
# params.py is a 'key = val' file
80
- prm = {}
81
- for line in open (f , 'r' ).readlines ():
80
+ params = {}
81
+ for line in open (ks_filepath , 'r' ).readlines ():
82
82
k , v = line .strip ('\n ' ).split ('=' )
83
- prm [k .strip ()] = convert_to_number (v .strip ())
84
- log .debug ('prm : {}' .format (prm ))
85
- self ._data [base ] = prm
83
+ params [k .strip ()] = convert_to_number (v .strip ())
84
+ log .debug ('params : {}' .format (params ))
85
+ self ._data [base ] = params
86
86
87
87
if ext == '.npy' :
88
- log .debug ('loading npy {}' .format (f ))
89
- d = np .load (f , mmap_mode = 'r' , allow_pickle = False , fix_imports = False )
88
+ log .debug ('loading npy {}' .format (ks_filepath ))
89
+ d = np .load (ks_filepath , mmap_mode = 'r' , allow_pickle = False , fix_imports = False )
90
90
self ._data [base ] = (np .reshape (d , d .shape [0 ])
91
91
if d .ndim == 2 and d .shape [1 ] == 1 else d )
92
92
@@ -105,11 +105,11 @@ def _stat(self):
105
105
def get_best_channel (self , unit ):
106
106
template_idx = self .data ['spike_templates' ][
107
107
np .where (self .data ['spike_clusters' ] == unit )[0 ][0 ]]
108
- chn_templates = self .data ['templates' ][template_idx , :, :]
109
- max_chn_idx = np .abs (np .abs (chn_templates ).max (axis = 0 )).argmax ()
110
- max_chn = self .data ['channel_map' ][max_chn_idx ]
108
+ channel_templates = self .data ['templates' ][template_idx , :, :]
109
+ max_channel_idx = np .abs (np .abs (channel_templates ).max (axis = 0 )).argmax ()
110
+ max_channel = self .data ['channel_map' ][max_channel_idx ]
111
111
112
- return max_chn , max_chn_idx
112
+ return max_channel , max_channel_idx
113
113
114
114
def extract_spike_depths (self ):
115
115
""" Reimplemented from https://github.com/cortex-lab/spikes/blob/master/analysis/ksDriftmap.m """
@@ -138,9 +138,9 @@ def extract_clustering_info(cluster_output_dir):
138
138
139
139
phy_curation_indicators = ['Merge clusters' , 'Split cluster' , 'Change metadata_group' ]
140
140
# ---- Manual curation? ----
141
- phylog_fp = cluster_output_dir / 'phy.log'
142
- if phylog_fp .exists ():
143
- phylog = pd .read_fwf (phylog_fp , colspecs = [(6 , 40 ), (41 , 250 )])
141
+ phylog_filepath = cluster_output_dir / 'phy.log'
142
+ if phylog_filepath .exists ():
143
+ phylog = pd .read_fwf (phylog_filepath , colspecs = [(6 , 40 ), (41 , 250 )])
144
144
phylog .columns = ['meta' , 'detail' ]
145
145
curation_row = [bool (re .match ('|' .join (phy_curation_indicators ), str (s )))
146
146
for s in phylog .detail ]
@@ -151,7 +151,7 @@ def extract_clustering_info(cluster_output_dir):
151
151
if datetime_str :
152
152
creation_time = datetime .strptime (datetime_str .group (), '%Y-%m-%d %H:%M:%S' )
153
153
else :
154
- creation_time = datetime .fromtimestamp (phylog_fp .stat ().st_ctime )
154
+ creation_time = datetime .fromtimestamp (phylog_filepath .stat ().st_ctime )
155
155
time_str = re .search ('\d{2}:\d{2}:\d{2}' , row_meta )
156
156
if time_str :
157
157
creation_time = datetime .combine (
@@ -161,16 +161,16 @@ def extract_clustering_info(cluster_output_dir):
161
161
is_curated = False
162
162
163
163
# ---- Quality control? ----
164
- metric_fp = cluster_output_dir / 'metrics.csv'
165
- if metric_fp .exists ():
164
+ metric_filepath = cluster_output_dir / 'metrics.csv'
165
+ if metric_filepath .exists ():
166
166
is_qc = True
167
167
if creation_time is None :
168
- creation_time = datetime .fromtimestamp (metric_fp .stat ().st_ctime )
168
+ creation_time = datetime .fromtimestamp (metric_filepath .stat ().st_ctime )
169
169
else :
170
170
is_qc = False
171
171
172
172
if creation_time is None :
173
- spk_fp = next (cluster_output_dir .glob ('spike_times.npy' ))
174
- creation_time = datetime .fromtimestamp (spk_fp .stat ().st_ctime )
173
+ spiketimes_filepath = next (cluster_output_dir .glob ('spike_times.npy' ))
174
+ creation_time = datetime .fromtimestamp (spiketimes_filepath .stat ().st_ctime )
175
175
176
176
return creation_time , is_curated , is_qc
0 commit comments