Skip to content

Commit 0b7884d

Browse files
Merge branch 'main' of https://github.com/computational-cell-analytics/synaptic-reconstruction into more-inner-ear-analysis
2 parents 903e59e + ef44f99 commit 0b7884d

20 files changed

+1633
-115
lines changed

environment.yaml

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -8,5 +8,9 @@ dependencies:
88
- pip
99
- pyqt
1010
- magicgui
11+
- pytorch
12+
- bioimageio.core
13+
- kornia
14+
- tensorboard
1115
- pip:
1216
- napari-skimage-regionprops

run_correction.sh

Lines changed: 0 additions & 1 deletion
This file was deleted.

scripts/aggregate_data_information.py

Lines changed: 451 additions & 0 deletions
Large diffs are not rendered by default.
Lines changed: 66 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,66 @@
1+
import os
2+
from glob import glob
3+
4+
import h5py
5+
import numpy as np
6+
7+
from scipy.ndimage import binary_closing
8+
from skimage.measure import label
9+
from synaptic_reconstruction.ground_truth.shape_refinement import edge_filter
10+
from tqdm import tqdm
11+
12+
ROOT = "/mnt/lustre-emmy-hdd/projects/nim00007/data/synaptic-reconstruction/cooper/20241102_TOMO_DATA_Imig2014/final_Imig2014_seg_autoComp" # noqa
13+
14+
OUTPUT_AZ = "./boundary_az"
15+
16+
17+
def filter_az(path):
18+
# Check if we have the output already.
19+
ds, fname = os.path.split(path)
20+
ds = os.path.basename(ds)
21+
out_path = os.path.join(OUTPUT_AZ, ds, fname)
22+
os.makedirs(os.path.join(OUTPUT_AZ, ds), exist_ok=True)
23+
if os.path.exists(out_path):
24+
return
25+
26+
with h5py.File(path, "r") as f:
27+
raw = f["raw"][:]
28+
az = f["AZ/segment_from_AZmodel_v3"][:]
29+
vesicles = f["/vesicles/segment_from_combined_vesicles"][:]
30+
31+
# Compute the sato filter of the raw data, smooth it afterwards.
32+
# This will highlight dark ridge-like structures, and so
33+
# will yield high values for the plasma membrane.
34+
hmap = edge_filter(raw, sigma=1.0, method="sato", per_slice=True, n_threads=8)
35+
36+
# Filter the active zone by combining a bunch of things:
37+
# 1. Find a mask with high values in the ridge filter.
38+
threshold_hmap = 0.5
39+
az_filtered = hmap > threshold_hmap
40+
# 2. Intersect it with the active zone predictions.
41+
az_filtered = np.logical_and(az_filtered, az)
42+
# 3. Intersect it with the negative vesicle mask.
43+
az_filtered = np.logical_and(az_filtered, vesicles == 0)
44+
45+
# Postprocessing of the filtered active zone:
46+
# 1. Apply connected components and only keep the largest component.
47+
az_filtered = label(az_filtered)
48+
ids, sizes = np.unique(az_filtered, return_counts=True)
49+
ids, sizes = ids[1:], sizes[1:]
50+
az_filtered = (az_filtered == ids[np.argmax(sizes)]).astype("uint8")
51+
# 2. Apply binary closing.
52+
az_filtered = np.logical_or(az_filtered, binary_closing(az_filtered, iterations=4)).astype("uint8")
53+
54+
# Save the result.
55+
with h5py.File(out_path, "a") as f:
56+
f.create_dataset("filtered_az", data=az_filtered, compression="gzip")
57+
58+
59+
def main():
60+
files = sorted(glob(os.path.join(ROOT, "**/*.h5"), recursive=True))
61+
for ff in tqdm(files):
62+
filter_az(ff)
63+
64+
65+
if __name__ == "__main__":
66+
main()

scripts/inner_ear/check_results.py

Lines changed: 7 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@
88
import numpy as np
99
import pandas
1010

11-
from synaptic_reconstruction.distance_measurements import create_object_distance_lines
11+
from synaptic_reconstruction.distance_measurements import create_object_distance_lines, load_distances
1212
from synaptic_reconstruction.file_utils import get_data_path
1313
from synaptic_reconstruction.tools.distance_measurement import _downsample
1414

@@ -21,11 +21,12 @@
2121
def get_distance_visualization(
2222
tomo, segmentations, distance_paths, vesicle_ids, scale, return_mem_props=False
2323
):
24-
ribbon_lines, _ = create_object_distance_lines(distance_paths["ribbon"], seg_ids=vesicle_ids, scale=scale)
25-
pd_lines, _ = create_object_distance_lines(distance_paths["PD"], seg_ids=vesicle_ids, scale=scale)
26-
membrane_lines, mem_props = create_object_distance_lines(
27-
distance_paths["membrane"], seg_ids=vesicle_ids, scale=scale
28-
)
24+
d, e1, e2, ids = load_distances(distance_paths["ribbon"])
25+
ribbon_lines, _ = create_object_distance_lines(d, e1, e2, ids, filter_seg_ids=vesicle_ids, scale=scale)
26+
d, e1, e2, ids = load_distances(distance_paths["PD"])
27+
pd_lines, _ = create_object_distance_lines(d, e1, e2, ids, filter_seg_ids=vesicle_ids, scale=scale)
28+
d, e1, e2, ids = load_distances(distance_paths["membrane"])
29+
membrane_lines, mem_props = create_object_distance_lines(d, e1, e2, ids, filter_seg_ids=vesicle_ids, scale=scale)
2930

3031
distance_lines = {
3132
"ribbon_distances": ribbon_lines,

scripts/inner_ear/training/train_structure_segmentation.py

Lines changed: 36 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,22 @@
1010
from tqdm import tqdm
1111

1212
ROOT = "/mnt/lustre-emmy-hdd/projects/nim00007/data/synaptic-reconstruction/moser/inner_ear_data"
13+
ROOT_OTHER_TOMOS = "/mnt/lustre-emmy-hdd/projects/nim00007/data/synaptic-reconstruction/moser/other_tomograms/"
14+
1315
LABEL_KEY = "labels/inner_ear_structures"
16+
OTHER_NAMES = ["vesicle_pools", "tether", "rat"]
17+
18+
19+
def get_other_paths(name):
20+
assert name in OTHER_NAMES, f"Invalid name {name}"
21+
if name == "vesicle_pools":
22+
folder = "01_vesicle_pools"
23+
elif name == "tether":
24+
folder = "02_tether"
25+
else:
26+
folder = "03_ratten_tomos"
27+
paths = sorted(glob(os.path.join(ROOT, folder, "*.h5")))
28+
return paths
1429

1530

1631
def get_train_val_test_split(root):
@@ -34,8 +49,7 @@ def get_train_val_test_split(root):
3449
return train_tomos, val_tomos, test_tomos
3550

3651

37-
def preprocess_labels(tomograms):
38-
structure_keys = ("ribbon", "PD", "membrane")
52+
def preprocess_labels(tomograms, structure_keys=("ribbon", "PD", "membrane")):
3953
nc = len(structure_keys)
4054

4155
for tomo in tqdm(tomograms, desc="Preprocess labels"):
@@ -60,11 +74,11 @@ def noop(x):
6074
return x
6175

6276

63-
def train_inner_ear_structures(train_tomograms, val_tomograms):
77+
def train_inner_ear_structures(train_tomograms, val_tomograms, name):
6478
patch_shape = (64, 512, 512)
6579
sampler = MinForegroundSampler(min_fraction=0.05, p_reject=0.95)
6680
supervised_training(
67-
name="inner_ear_structure_model",
81+
name=name,
6882
train_paths=train_tomograms, val_paths=val_tomograms,
6983
label_key=LABEL_KEY, patch_shape=patch_shape, save_root=".",
7084
sampler=sampler, label_transform=noop, out_channels=3,
@@ -73,11 +87,27 @@ def train_inner_ear_structures(train_tomograms, val_tomograms):
7387
)
7488

7589

76-
def main():
90+
def training_v1():
91+
train_tomograms, val_tomograms, _ = get_train_val_test_split(ROOT)
92+
preprocess_labels(train_tomograms)
93+
preprocess_labels(val_tomograms)
94+
train_inner_ear_structures(train_tomograms, val_tomograms, name="inner_ear_structure_model")
95+
96+
97+
def training_v2():
7798
train_tomograms, val_tomograms, _ = get_train_val_test_split(ROOT)
7899
preprocess_labels(train_tomograms)
100+
for name in OTHER_NAMES:
101+
other_tomograms = get_other_paths(name)
102+
preprocess_labels(other_tomograms, structure_keys=("ribbons", "presynapse", "membrane"))
103+
train_tomograms.extend(other_tomograms)
79104
preprocess_labels(val_tomograms)
80-
train_inner_ear_structures(train_tomograms, val_tomograms)
105+
train_inner_ear_structures(train_tomograms, val_tomograms, name="inner_ear_structure_model_v2")
106+
107+
108+
def main():
109+
# training_v1()
110+
training_v2()
81111

82112

83113
if __name__ == "__main__":

setup.py

Lines changed: 6 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -8,13 +8,15 @@
88
name="synaptic_reconstruction",
99
packages=find_packages(exclude=["test"]),
1010
version=__version__,
11-
author="Constantin Pape; Sarah Muth",
11+
author="Constantin Pape; Sarah Muth; Luca Freckmann",
1212
url="https://github.com/computational-cell-analytics/synaptic_reconstruction",
1313
license="MIT",
1414
entry_points={
1515
"console_scripts": [
16-
"sr_tools.correct_segmentation = synaptic_reconstruction.tools.segmentation_correction:main",
17-
"sr_tools.measure_distances = synaptic_reconstruction.tools.distance_measurement:main",
18-
]
16+
"synapse_net.run_segmentation = synaptic_reconstruction.tools.cli:segmentation_cli"
17+
],
18+
"napari.manifest": [
19+
"synaptic_reconstruction = synaptic_reconstruction:napari.yaml",
20+
],
1921
},
2022
)

0 commit comments

Comments
 (0)