Coverage for qubalab/images/labeled_server.py: 98%
48 statements
« prev ^ index » next coverage.py v7.6.10, created at 2025-01-31 11:24 +0000
« prev ^ index » next coverage.py v7.6.10, created at 2025-01-31 11:24 +0000
1import numpy as np
2from typing import Iterable
3import shapely
4import PIL
5from .image_server import ImageServer
6from .metadata.image_metadata import ImageMetadata
7from .metadata.image_shape import ImageShape
8from .metadata.pixel_calibration import PixelCalibration, PixelLength
9from .region_2d import Region2D
10from ..objects.image_feature import ImageFeature
11from ..objects.classification import Classification
12from ..objects.draw import draw_geometry
15class LabeledImageServer(ImageServer):
16 """
17 An ImageServer where pixel values are labels corresponding to image features (such as annotations)
18 present on an image.
20 The returned image will have one timepoint and one z-stack. The size of the remaining dimensions depend
21 on the metadata provided when creating the server --- usually, the same as the ImageServer that the labeled image corresponds to.
23 The image will only have one resolution level; the downsample for this level may be greater than or less than 1, and consequently region requests and downsamples should be considered relative to the metadata provided at server creation, **not** relative to the downsampled (or upsampled) LabeledImageServer coordinates.
24 """
26 def __init__(
27 self,
28 base_image_metadata: ImageMetadata,
29 features: Iterable[ImageFeature],
30 label_map: dict[Classification, int] = None,
31 downsample: float = None,
32 multichannel: bool = False,
33 resize_method=PIL.Image.Resampling.NEAREST,
34 **kwargs,
35 ):
36 """
37 :param base_image_metadata: the metadata of the image containing the image features
38 :param features: the image features to draw
39 :param label_map: a dictionary mapping a classification to a label. The value of pixels where an image feature with
40 a certain classification is present will be taken from this dictionnary. If not provided, each feature
41 will be assigned a unique integer. All labels must be greater than 0
42 :param downsample: the downsample to apply to the image. Can be omitted to use the full resolution image
43 :param multichannel: if False, the image returned by this server will have a single channel where pixel values will be unsigned
44 integers representing a label (see the label_map parameter). If True, the number of channels will be
45 equal to the highest label value + 1, and the pixel located at (c, y, x) is a boolean indicating if an annotation
46 with label c is present on the pixel located at (x, y)
47 :param resize_method: the resampling method to use when resizing the image for downsampling. Nearest neighbour by default for labeled images.
48 :raises ValueError: when a label in label_map is less than or equal to 0
49 """
50 super().__init__(resize_method=resize_method, **kwargs)
52 if label_map is not None and any(label <= 0 for label in label_map.values()):
53 raise ValueError(
54 "A label in label_map is less than or equal to 0: " + str(label_map)
55 )
57 self._base_image_metadata = base_image_metadata
58 self._downsample = 1 if downsample is None else downsample
59 self._multichannel = multichannel
60 self._features = [
61 f for f in features if label_map is None or f.classification in label_map
62 ]
63 self._geometries = [
64 shapely.affinity.scale(
65 shapely.geometry.shape(f.geometry),
66 1 / self._downsample,
67 1 / self._downsample,
68 origin=(0, 0, 0),
69 )
70 for f in self._features
71 ]
72 self._tree = shapely.STRtree(self._geometries)
74 if label_map is None:
75 self._feature_index_to_label = {
76 i: i + 1 for i in range(len(self._features))
77 }
78 else:
79 self._feature_index_to_label = {
80 i: label_map[self._features[i].classification]
81 for i in range(len(self._features))
82 }
84 def close(self):
85 pass
87 def _build_metadata(self) -> ImageMetadata:
88 return ImageMetadata(
89 self._base_image_metadata.path,
90 f"{self._base_image_metadata.name} - labels",
91 (
92 ImageShape(
93 int(self._base_image_metadata.width),
94 int(self._base_image_metadata.height),
95 1,
96 max(self._feature_index_to_label.values(), default=0) + 1
97 if self._multichannel
98 else 1,
99 1,
100 ),
101 ),
102 PixelCalibration(
103 PixelLength(
104 self._base_image_metadata.pixel_calibration.length_x.length,
105 self._base_image_metadata.pixel_calibration.length_x.unit,
106 ),
107 PixelLength(
108 self._base_image_metadata.pixel_calibration.length_y.length,
109 self._base_image_metadata.pixel_calibration.length_y.unit,
110 ),
111 self._base_image_metadata.pixel_calibration.length_z,
112 ),
113 False,
114 bool if self._multichannel else np.uint32,
115 downsamples=[self._downsample],
116 )
118 def _read_block(self, level: int, region: Region2D) -> np.ndarray:
119 if self._multichannel:
120 full_image = np.zeros(
121 (self.metadata.n_channels, region.height, region.width),
122 dtype=self.metadata.dtype,
123 )
124 feature_indices = self._tree.query(region.geometry)
125 labels = set(self._feature_index_to_label.values())
127 for label in labels:
128 image = PIL.Image.new("1", (region.width, region.height))
129 drawing_context = PIL.ImageDraw.Draw(image)
131 for i in feature_indices:
132 if label == self._feature_index_to_label[i]:
133 draw_geometry(
134 image.size,
135 drawing_context,
136 shapely.affinity.translate(
137 self._geometries[i], -region.x, -region.y
138 ),
139 1,
140 )
141 full_image[label, :, :] = np.asarray(image, dtype=self.metadata.dtype)
143 return full_image
144 else:
145 image = PIL.Image.new("I", (region.width, region.height))
146 drawing_context = PIL.ImageDraw.Draw(image)
147 for i in self._tree.query(region.geometry):
148 draw_geometry(
149 image.size,
150 drawing_context,
151 shapely.affinity.translate(
152 self._geometries[i], -region.x, -region.y
153 ),
154 self._feature_index_to_label[i],
155 )
156 return np.expand_dims(np.asarray(image, dtype=self.metadata.dtype), axis=0)