-
Notifications
You must be signed in to change notification settings - Fork 21
Expand file tree
/
Copy pathmodel_instance_seg.py
More file actions
242 lines (190 loc) · 8.03 KB
/
model_instance_seg.py
File metadata and controls
242 lines (190 loc) · 8.03 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
from __future__ import division
from __future__ import print_function
import numpy as np
# from skimage.measure import marching_cubes
# from skimage.measure import mesh_surface_area
from skimage.measure import label
from skimage.measure import regionprops
from skimage.morphology import remove_small_objects
from skimage.segmentation import watershed
from skimage.transform import resize
from tifffile import imread
from napari_cellseg3d.utils import fill_list_in_between
from napari_cellseg3d.utils import sphericity_axis
# from napari_cellseg3d.utils import sphericity_volume_area
def binary_connected(
volume, thres=0.5, thres_small=3, scale_factors=(1.0, 1.0, 1.0)
):
r"""Convert binary foreground probability maps to instance masks via
connected-component labeling.
Args:
volume (numpy.ndarray): foreground probability of shape :math:`(C, Z, Y, X)`.
thres (float): threshold of foreground. Default: 0.8
thres_small (int): size threshold of small objects to remove. Default: 128
scale_factors (tuple): scale factors for resizing in :math:`(Z, Y, X)` order. Default: (1.0, 1.0, 1.0)
"""
semantic = np.squeeze(volume)
foreground = semantic > thres # int(255 * thres)
segm = label(foreground)
segm = remove_small_objects(segm, thres_small)
if not all(x == 1.0 for x in scale_factors):
target_size = (
int(semantic.shape[0] * scale_factors[0]),
int(semantic.shape[1] * scale_factors[1]),
int(semantic.shape[2] * scale_factors[2]),
)
segm = resize(
segm,
target_size,
order=0,
anti_aliasing=False,
preserve_range=True,
)
return segm
def binary_watershed(
volume,
thres_seeding=0.9,
thres_small=10,
thres_objects=0.3,
scale_factors=(1.0, 1.0, 1.0),
rem_seed_thres=3,
):
r"""Convert binary foreground probability maps to instance masks via
watershed segmentation algorithm.
Note:
This function uses the `skimage.segmentation.watershed <https://github.com/scikit-image/scikit-image/blob/master/skimage/segmentation/_watershed.py#L89>`_
function that converts the input image into ``np.float64`` data type for processing. Therefore please make sure enough memory is allocated when handling large arrays.
Args:
volume (numpy.ndarray): foreground probability of shape :math:`(C, Z, Y, X)`.
thres_seeding (float): threshold for seeding. Default: 0.98
thres_objects (float): threshold for foreground objects. Default: 0.3
thres_small (int): size threshold of small objects removal. Default: 10
scale_factors (tuple): scale factors for resizing in :math:`(Z, Y, X)` order. Default: (1.0, 1.0, 1.0)
rem_seed_thres (int): threshold for small seeds removal. Default : 3
"""
semantic = np.squeeze(volume)
seed_map = semantic > thres_seeding
foreground = semantic > thres_objects
seed = label(seed_map)
seed = remove_small_objects(seed, rem_seed_thres)
segm = watershed(-semantic.astype(np.float64), seed, mask=foreground)
segm = remove_small_objects(segm, thres_small)
if not all(x == 1.0 for x in scale_factors):
target_size = (
int(semantic.shape[0] * scale_factors[0]),
int(semantic.shape[1] * scale_factors[1]),
int(semantic.shape[2] * scale_factors[2]),
)
segm = resize(
segm,
target_size,
order=0,
anti_aliasing=False,
preserve_range=True,
)
return np.array(segm)
def clear_small_objects(image, threshold, is_file_path=False):
"""Calls skimage.remove_small_objects to remove small fragments that might be artifacts.
Args:
image: array containing the image
threshold: size threshold for removal of objects in pixels. E.g. if 10, all objects smaller than 10 pixels as a whole will be removed.
is_file_path: if True, will load the image from a file path directly. Default : False
Returns:
array: The image with small objects removed
"""
if is_file_path:
image = imread(image)
# print(threshold)
labeled = label(image)
result = remove_small_objects(labeled, threshold)
# print(np.sum(labeled))
# print(np.sum(result))
if np.sum(labeled) == np.sum(result):
print("Warning : no objects were removed")
if np.amax(image) == 1:
result = to_semantic(result)
return result
def to_instance(image, is_file_path=False):
"""Converts a **ground-truth** label to instance (unique id per object) labels. Does not remove small objects.
Args:
image: image or path to image
is_file_path: if True, will consider ``image`` to be a string containing a path to a file, if not treats it as an image data array.
Returns: resulting converted labels
"""
if is_file_path:
image = [imread(image)]
# image = image.compute()
result = binary_watershed(
image, thres_small=0, thres_seeding=0.3, rem_seed_thres=0
) # TODO add params
return result
def to_semantic(image, is_file_path=False):
"""Converts a **ground-truth** label to semantic (binary 0/1) labels.
Args:
image: image or path to image
is_file_path: if True, will consider ``image`` to be a string containing a path to a file, if not treats it as an image data array.
Returns: resulting converted labels
"""
if is_file_path:
image = imread(image)
# image = image.compute()
image[image >= 1] = 1
result = image.astype(np.uint16)
return result
def volume_stats(volume_image):
"""Computes various statistics from instance labels and returns them in a dict.
Currently provided :
* "Volume": volume of each object
* "Centroid": x,y,z centroid coordinates for each object
* "Sphericity (axes)": sphericity computed from semi-minor and semi-major axes
* "Image size": size of the image
* "Total image volume": volume in pixels of the whole image
* "Total object volume (pixels)": total labeled volume in pixels
* "Filling ratio": ratio of labeled over total pixel volume
* "Number objects": total number of unique labeled objects
Args:
volume_image: instance labels image
Returns:
dict: Statistics described above
"""
properties = regionprops(volume_image)
# sphericity_va = []
def sphericity(region):
try:
return sphericity_axis(
region.axis_major_length * 0.5, region.axis_minor_length * 0.5
)
except ValueError:
return (
np.nan
) # FIXME better way ? inconsistent errors in region.axis_minor_length
sphericity_ax = [sphericity(region) for region in properties]
# for region in properties:
# object = (volume_image == region.label).transpose(1, 2, 0)
# verts, faces, _, values = marching_cubes(
# object, level=0, spacing=(1.0, 1.0, 1.0)
# )
# surface_area_pixels = mesh_surface_area(verts, faces)
# sphericity_va.append(
# sphericity_volume_area(region.area, surface_area_pixels)
# )
volume = [region.area for region in properties]
def fill(lst, n=len(properties) - 1):
return fill_list_in_between(lst, n, "")
if len(volume_image.flatten()) != 0:
ratio = fill([np.sum(volume) / len(volume_image.flatten())])
else:
ratio = 0
return {
"Volume": volume,
"Centroid x": [region.centroid[0] for region in properties],
"Centroid y": [region.centroid[1] for region in properties],
"Centroid z": [region.centroid[2] for region in properties],
# "Sphericity (volume/area)": sphericity_va,
"Sphericity (axes)": sphericity_ax,
"Image size": fill([volume_image.shape]),
"Total image volume": fill([len(volume_image.flatten())]),
"Total object volume (pixels)": fill([np.sum(volume)]),
"Filling ratio": ratio,
"Number objects": fill([len(properties)]),
}