@@ -243,11 +243,133 @@ def make(self, key):
243
243
f' not yet implemented' )
244
244
245
245
246
+ @schema
247
+ class PreClusterMethod (dj .Lookup ):
248
+ definition = """
249
+ # Method for pre-clustering
250
+ precluster_method: varchar(16)
251
+ ---
252
+ precluster_method_desc: varchar(1000)
253
+ """
254
+
255
+ contents = [('catgt' , 'Time shift, Common average referencing, Zeroing' )]
256
+
257
+
258
+ @schema
259
+ class PreClusterParamSet (dj .Lookup ):
260
+ definition = """
261
+ # Parameter set to be used in a clustering procedure
262
+ paramset_idx: smallint
263
+ ---
264
+ -> PreClusterMethod
265
+ paramset_desc: varchar(128)
266
+ param_set_hash: uuid
267
+ unique index (param_set_hash)
268
+ params: longblob # dictionary of all applicable parameters
269
+ """
270
+
271
+ @classmethod
272
+ def insert_new_params (cls , precluster_method : str , paramset_idx : int ,
273
+ paramset_desc : str , params : dict ):
274
+ param_dict = {'precluster_method' : precluster_method ,
275
+ 'paramset_idx' : paramset_idx ,
276
+ 'paramset_desc' : paramset_desc ,
277
+ 'params' : params ,
278
+ 'param_set_hash' : dict_to_uuid (params )}
279
+ param_query = cls & {'param_set_hash' : param_dict ['param_set_hash' ]}
280
+
281
+ if param_query : # If the specified param-set already exists
282
+ existing_paramset_idx = param_query .fetch1 ('paramset_idx' )
283
+ if existing_paramset_idx == paramset_idx : # If the existing set has the same paramset_idx: job done
284
+ return
285
+ else : # If not same name: human error, trying to add the same paramset with different name
286
+ raise dj .DataJointError (
287
+ 'The specified param-set'
288
+ ' already exists - paramset_idx: {}' .format (existing_paramset_idx ))
289
+ else :
290
+ cls .insert1 (param_dict )
291
+
292
+
293
+ @schema
294
+ class PreClusterParamList (dj .Lookup ):
295
+ definition = """
296
+ precluster_param_list_id: smallint # unique id for each ordered list of paramset_idx that are to be run
297
+ ---
298
+ order_id=null: smallint # order of operations
299
+ -> [nullable] PreClusterParamSet
300
+ """
301
+ contents = [(0 ,0 ,None )] # Allow nullable secondary attributes for the case where preclustering is not performed.
302
+
303
+
304
+ @schema
305
+ class PreClusterTask (dj .Manual ):
306
+ definition = """
307
+ # Manual table for defining a clustering task ready to be run
308
+ -> EphysRecording
309
+ -> PreClusterParamList
310
+ ---
311
+ precluster_output_dir: varchar(255) # pre-clustering output directory relative to the root data directory
312
+ task_mode='none': enum('none','load', 'trigger') # 'none': no pre-clustering analysis
313
+ # 'load': load analysis results
314
+ # 'trigger': trigger computation
315
+ """
316
+
317
+
318
+ @schema
319
+ class PreCluster (dj .Imported ):
320
+ """
321
+ A processing table to handle each PreClusterTask:
322
+ + If `task_mode == "none"`: no pre-clustering performed
323
+ + If `task_mode == "trigger"`: trigger pre-clustering analysis according to the
324
+ PreClusterParamSet
325
+ + If `task_mode == "load"`: verify output
326
+ """
327
+ definition = """
328
+ # Pre-clustering Procedure
329
+ -> PreClusterTask
330
+ ---
331
+ precluster_time: datetime # time of generation of this set of pre-clustering results
332
+ package_version='': varchar(16)
333
+ """
334
+
335
+ def make (self , key ):
336
+ task_mode , output_dir = (PreClusterTask & key ).fetch1 ('task_mode' ,
337
+ 'precluster_output_dir' )
338
+ precluster_output_dir = find_full_path (get_ephys_root_data_dir (), output_dir )
339
+
340
+ if task_mode == 'none' :
341
+ creation_time = (EphysRecording & key ).fetch1 ('recording_datetime' )
342
+ elif task_mode == 'load' :
343
+ acq_software = (EphysRecording & key ).fetch1 ('acq_software' )
344
+ inserted_probe_serial_number = (ProbeInsertion * probe .Probe & key ).fetch1 ('probe' )
345
+
346
+ if acq_software == 'SpikeGLX' :
347
+ for meta_filepath in precluster_output_dir .rglob ('*.ap.meta' ):
348
+ spikeglx_meta = spikeglx .SpikeGLXMeta (meta_filepath )
349
+
350
+ if str (spikeglx_meta .probe_SN ) == inserted_probe_serial_number :
351
+ creation_time = spikeglx_meta .recording_time
352
+ break
353
+ else :
354
+ raise FileNotFoundError (
355
+ 'No SpikeGLX data found for probe insertion: {}' .format (key ))
356
+ else :
357
+ raise NotImplementedError (f'Pre-clustering analysis of { acq_software } '
358
+ 'is not yet supported.' )
359
+ elif task_mode == 'trigger' :
360
+ raise NotImplementedError ('Automatic triggering of'
361
+ ' pre-clustering analysis is not yet supported.' )
362
+ else :
363
+ raise ValueError (f'Unknown task mode: { task_mode } ' )
364
+
365
+ self .insert1 ({** key , 'precluster_time' : creation_time })
366
+
367
+
246
368
@schema
247
369
class LFP (dj .Imported ):
248
370
definition = """
249
371
# Acquired local field potential (LFP) from a given Ephys recording.
250
- -> EphysRecording
372
+ -> PreCluster
251
373
---
252
374
lfp_sampling_rate: float # (Hz)
253
375
lfp_time_stamps: longblob # (s) timestamps with respect to the start of the recording (recording_timestamp)
@@ -407,7 +529,7 @@ class ClusterQualityLabel(dj.Lookup):
407
529
class ClusteringTask (dj .Manual ):
408
530
definition = """
409
531
# Manual table for defining a clustering task ready to be run
410
- -> EphysRecording
532
+ -> PreCluster
411
533
-> ClusteringParamSet
412
534
---
413
535
clustering_output_dir: varchar(255) # clustering output directory relative to the clustering root data directory
0 commit comments