vkit.mechanism.distortion.photometric.color

  1# Copyright 2022 vkit-x Administrator. All Rights Reserved.
  2#
  3# This project (vkit-x/vkit) is dual-licensed under commercial and SSPL licenses.
  4#
  5# The commercial license gives you the full rights to create and distribute software
  6# on your own terms without any SSPL license obligations. For more information,
  7# please see the "LICENSE_COMMERCIAL.txt" file.
  8#
  9# This project is also available under Server Side Public License (SSPL).
 10# The SSPL licensing is ideal for use cases such as open source projects with
 11# SSPL distribution, student/academic purposes, hobby projects, internal research
 12# projects without external distribution, or other projects where all SSPL
 13# obligations can be met. For more information, please see the "LICENSE_SSPL.txt" file.
 14from typing import cast, Any, Optional, Mapping, Sequence
 15
 16import attrs
 17import numpy as np
 18from numpy.random import Generator as RandomGenerator
 19import cv2 as cv
 20
 21from vkit.element import Image, ImageMode
 22from ..interface import DistortionConfig, DistortionNopState, Distortion
 23from .opt import (
 24    extract_mat_from_image,
 25    clip_mat_back_to_uint8,
 26    handle_out_of_bound_and_dtype,
 27    generate_new_image,
 28    OutOfBoundBehavior,
 29)
 30
 31
 32def _mean_shift(
 33    image: Image,
 34    channels: Optional[Sequence[int]],
 35    delta: int,
 36    threshold: Optional[int],
 37    oob_behavior: OutOfBoundBehavior,
 38):
 39    if delta == 0:
 40        return image
 41
 42    mat = extract_mat_from_image(image, np.int16, channels)
 43
 44    if threshold is None:
 45        mat += delta
 46    else:
 47        if delta > 0:
 48            mask = (mat <= threshold)
 49        else:
 50            assert delta < 0
 51            mask = (threshold <= mat)
 52        mat[mask] += delta
 53
 54    mat = handle_out_of_bound_and_dtype(mat, oob_behavior)
 55    return generate_new_image(image, mat, channels)
 56
 57
 58@attrs.define
 59class MeanShiftConfig(DistortionConfig):
 60    delta: int
 61    threshold: Optional[int] = None
 62    channels: Optional[Sequence[int]] = None
 63    oob_behavior: OutOfBoundBehavior = OutOfBoundBehavior.CLIP
 64
 65
 66def mean_shift_image(
 67    config: MeanShiftConfig,
 68    state: Optional[DistortionNopState[MeanShiftConfig]],
 69    image: Image,
 70    rng: Optional[RandomGenerator],
 71):
 72    return _mean_shift(
 73        image=image,
 74        channels=config.channels,
 75        delta=config.delta,
 76        threshold=config.threshold,
 77        oob_behavior=config.oob_behavior,
 78    )
 79
 80
 81mean_shift = Distortion(
 82    config_cls=MeanShiftConfig,
 83    state_cls=DistortionNopState[MeanShiftConfig],
 84    func_image=mean_shift_image,
 85)
 86
 87
 88@attrs.define
 89class ColorShiftConfig(DistortionConfig):
 90    delta: int
 91
 92
 93def color_shift_image(
 94    config: ColorShiftConfig,
 95    state: Optional[DistortionNopState[ColorShiftConfig]],
 96    image: Image,
 97    rng: Optional[RandomGenerator],
 98):
 99    mode = image.mode
100    if mode not in (ImageMode.HSV, ImageMode.HSL):
101        # To HSV.
102        image = image.to_hsv_image()
103
104    image = _mean_shift(
105        image=image,
106        # Operate the hue channel.
107        channels=[0],
108        delta=config.delta,
109        threshold=None,
110        oob_behavior=OutOfBoundBehavior.CYCLE,
111    )
112
113    if mode not in (ImageMode.HSV, ImageMode.HSL):
114        image = image.to_target_mode_image(mode)
115
116    return image
117
118
119color_shift = Distortion(
120    config_cls=ColorShiftConfig,
121    state_cls=DistortionNopState[ColorShiftConfig],
122    func_image=color_shift_image,
123)
124
125
126@attrs.define
127class BrightnessShiftConfig(DistortionConfig):
128    delta: int
129    intermediate_image_mode: ImageMode = ImageMode.HSL
130
131
132def brightness_shift_image(
133    config: BrightnessShiftConfig,
134    state: Optional[DistortionNopState[BrightnessShiftConfig]],
135    image: Image,
136    rng: Optional[RandomGenerator],
137):
138    mode = image.mode
139    if mode not in (ImageMode.HSV, ImageMode.HSL):
140        assert config.intermediate_image_mode in (ImageMode.HSV, ImageMode.HSL)
141        image = image.to_target_mode_image(config.intermediate_image_mode)
142
143    image = _mean_shift(
144        image=image,
145        # Operate the lighting channel.
146        channels=[2],
147        delta=config.delta,
148        threshold=None,
149        oob_behavior=OutOfBoundBehavior.CLIP,
150    )
151
152    if mode not in (ImageMode.HSV, ImageMode.HSL):
153        image = image.to_target_mode_image(mode)
154
155    return image
156
157
158brightness_shift = Distortion(
159    config_cls=BrightnessShiftConfig,
160    state_cls=DistortionNopState[BrightnessShiftConfig],
161    func_image=brightness_shift_image,
162)
163
164
165def _std_shift(
166    image: Image,
167    channels: Optional[Sequence[int]],
168    scale: float,
169    oob_behavior: OutOfBoundBehavior,
170):
171    mat = extract_mat_from_image(image, np.float32, channels)
172
173    assert scale > 0
174    if mat.ndim == 2:
175        mean = np.mean(mat)
176    elif mat.ndim == 3:
177        mean = np.mean(mat.reshape(-1, mat.shape[-1]), axis=0)
178    else:
179        raise NotImplementedError()
180    mat = mat * scale - mean * (scale - 1)
181
182    mat = handle_out_of_bound_and_dtype(mat, oob_behavior)
183    return generate_new_image(image, mat, channels)
184
185
186@attrs.define
187class StdShiftConfig(DistortionConfig):
188    scale: float
189    channels: Optional[Sequence[int]] = None
190
191
192def std_shift_image(
193    config: StdShiftConfig,
194    state: Optional[DistortionNopState[StdShiftConfig]],
195    image: Image,
196    rng: Optional[RandomGenerator],
197):
198    return _std_shift(
199        image,
200        config.channels,
201        config.scale,
202        OutOfBoundBehavior.CLIP,
203    )
204
205
206std_shift = Distortion(
207    config_cls=StdShiftConfig,
208    state_cls=DistortionNopState[StdShiftConfig],
209    func_image=std_shift_image,
210)
211
212
213@attrs.define
214class BoundaryEqualizationConfig(DistortionConfig):
215    channels: Optional[Sequence[int]] = None
216
217
218def boundary_equalization_image(
219    config: BoundaryEqualizationConfig,
220    state: Optional[DistortionNopState[BoundaryEqualizationConfig]],
221    image: Image,
222    rng: Optional[RandomGenerator],
223):
224    mat = extract_mat_from_image(image, np.float32, config.channels)
225
226    # Equalize each channel to [0, 255].
227    if mat.ndim == 2:
228        delta: np.ndarray = mat.max() - mat.min()
229        if delta == 0.0:
230            return image
231
232        mat -= mat.min()
233        mat *= 255.0 / delta
234
235    elif mat.ndim == 3:
236        flatten_mat = mat.reshape(-1, mat.shape[-1])
237        val_min = flatten_mat.min(axis=0)
238        val_max = flatten_mat.max(axis=0)
239        delta = val_max - val_min
240
241        mask = (delta > 0)
242        if not mask.any():
243            return image
244
245        num_channels = mask.sum()
246        masked_min = mat[:, :, mask].reshape(-1, num_channels).min(axis=0)
247        mat[:, :, mask] -= masked_min
248        mat[:, :, mask] *= 255.0 / delta[mask]
249
250    else:
251        raise NotImplementedError()
252
253    mat = handle_out_of_bound_and_dtype(mat, OutOfBoundBehavior.CLIP)
254    return generate_new_image(image, mat, config.channels)
255
256
257boundary_equalization = Distortion(
258    config_cls=BoundaryEqualizationConfig,
259    state_cls=DistortionNopState[BoundaryEqualizationConfig],
260    func_image=boundary_equalization_image,
261)
262
263
264@attrs.define
265class HistogramEqualizationConfig(DistortionConfig):
266    channels: Optional[Sequence[int]] = None
267
268
269def histogram_equalization_image(
270    config: HistogramEqualizationConfig,
271    state: Optional[DistortionNopState[HistogramEqualizationConfig]],
272    image: Image,
273    rng: Optional[RandomGenerator],
274):
275    mat = extract_mat_from_image(image, np.uint8, config.channels)
276
277    if mat.ndim == 2:
278        channel_mats: Sequence[np.ndarray] = [mat]
279    elif mat.ndim == 3:
280        channel_mats: Sequence[np.ndarray] = np.dsplit(mat, mat.shape[-1])
281    else:
282        raise NotImplementedError()
283
284    new_mats = [cv.equalizeHist(channel_mat) for channel_mat in channel_mats]
285
286    if mat.ndim == 2:
287        return attrs.evolve(image, mat=new_mats[0])
288    elif mat.ndim == 3:
289        return generate_new_image(image, np.dstack(new_mats), config.channels)
290    else:
291        raise NotImplementedError()
292
293
294histogram_equalization = Distortion(
295    config_cls=HistogramEqualizationConfig,
296    state_cls=DistortionNopState[HistogramEqualizationConfig],
297    func_image=histogram_equalization_image,
298)
299
300
301@attrs.define
302class ComplementConfig(DistortionConfig):
303    threshold: Optional[int] = None
304    enable_threshold_lte: bool = False
305    channels: Optional[Sequence[int]] = None
306
307
308def complement_image(
309    config: ComplementConfig,
310    state: Optional[DistortionNopState[ComplementConfig]],
311    image: Image,
312    rng: Optional[RandomGenerator],
313):
314    mat = extract_mat_from_image(image, np.uint8, config.channels)
315
316    if config.threshold is None:
317        mat = 255 - mat
318    else:
319        assert 0 <= config.threshold <= 255
320        if not config.enable_threshold_lte:
321            mask = (config.threshold <= mat)
322        else:
323            mask = (mat <= config.threshold)
324        mat[mask] = 255 - mat[mask]
325
326    return generate_new_image(image, mat, config.channels)
327
328
329complement = Distortion(
330    config_cls=ComplementConfig,
331    state_cls=DistortionNopState[ComplementConfig],
332    func_image=complement_image,
333)
334
335
336@attrs.define
337class PosterizationConfig(DistortionConfig):
338    num_bits: int
339    channels: Optional[Sequence[int]] = None
340
341
342def posterization_image(
343    config: PosterizationConfig,
344    state: Optional[DistortionNopState[PosterizationConfig]],
345    image: Image,
346    rng: Optional[RandomGenerator],
347):
348    assert 0 <= config.num_bits < 8
349
350    if config.num_bits == 0:
351        return image
352
353    mat = extract_mat_from_image(image, np.uint8, config.channels)
354    # Clear lower n bits.
355    mat = np.bitwise_and(mat, (0xFF >> config.num_bits) << config.num_bits)
356    return generate_new_image(image, mat, config.channels)
357
358
359posterization = Distortion(
360    config_cls=PosterizationConfig,
361    state_cls=DistortionNopState[PosterizationConfig],
362    func_image=posterization_image,
363)
364
365
366@attrs.define
367class ColorBalanceConfig(DistortionConfig):
368    ratio: float
369
370
371def color_balance_image(
372    config: ColorBalanceConfig,
373    state: Optional[DistortionNopState[ColorBalanceConfig]],
374    image: Image,
375    rng: Optional[RandomGenerator],
376):
377    if image.mode == ImageMode.GRAYSCALE:
378        return image
379
380    grayscale_like_image = image.to_grayscale_image().to_target_mode_image(image.mode)
381    grayscale_like_mat = grayscale_like_image.mat.astype(np.float32)
382    mat = image.mat.astype(np.float32)
383
384    if image.mode in (ImageMode.HSV, ImageMode.HSL):
385        channels = cast(Sequence[int], [1, 2])
386        grayscale_like_mat = grayscale_like_mat[:, :, channels]
387        mat = mat[:, :, channels]
388
389    assert 0.0 <= config.ratio <= 1.0
390    mat = (1 - config.ratio) * grayscale_like_mat + config.ratio * mat
391    mat = clip_mat_back_to_uint8(mat)
392
393    if image.mode in (ImageMode.HSV, ImageMode.HSL):
394        return generate_new_image(image, mat, [1, 2])
395    else:
396        return attrs.evolve(image, mat=mat)
397
398
399color_balance = Distortion(
400    config_cls=ColorBalanceConfig,
401    state_cls=DistortionNopState[ColorBalanceConfig],
402    func_image=color_balance_image,
403)
404
405
406@attrs.define
407class ChannelPermutationConfig(DistortionConfig):
408    _rng_state: Optional[Mapping[str, Any]] = None
409
410    @property
411    def supports_rng_state(self) -> bool:
412        return True
413
414    @property
415    def rng_state(self) -> Optional[Mapping[str, Any]]:
416        return self._rng_state
417
418    @rng_state.setter
419    def rng_state(self, val: Mapping[str, Any]):
420        self._rng_state = val
421
422
423def channel_permutation_image(
424    config: ChannelPermutationConfig,
425    state: Optional[DistortionNopState[ChannelPermutationConfig]],
426    image: Image,
427    rng: Optional[RandomGenerator],
428):
429    assert rng
430    indices = rng.permutation(image.num_channels)
431    mat = image.mat[:, :, indices]
432    return attrs.evolve(image, mat=mat)
433
434
435channel_permutation = Distortion(
436    config_cls=ChannelPermutationConfig,
437    state_cls=DistortionNopState[ChannelPermutationConfig],
438    func_image=channel_permutation_image,
439)
class MeanShiftConfig(vkit.mechanism.distortion.interface.DistortionConfig):
60class MeanShiftConfig(DistortionConfig):
61    delta: int
62    threshold: Optional[int] = None
63    channels: Optional[Sequence[int]] = None
64    oob_behavior: OutOfBoundBehavior = OutOfBoundBehavior.CLIP
MeanShiftConfig( delta: int, threshold: Union[int, NoneType] = None, channels: Union[Sequence[int], NoneType] = None, oob_behavior: vkit.mechanism.distortion.photometric.opt.OutOfBoundBehavior = <OutOfBoundBehavior.CLIP: 'clip'>)
2def __init__(self, delta, threshold=attr_dict['threshold'].default, channels=attr_dict['channels'].default, oob_behavior=attr_dict['oob_behavior'].default):
3    self.delta = delta
4    self.threshold = threshold
5    self.channels = channels
6    self.oob_behavior = oob_behavior

Method generated by attrs for class MeanShiftConfig.

def mean_shift_image( config: vkit.mechanism.distortion.photometric.color.MeanShiftConfig, state: Union[vkit.mechanism.distortion.interface.DistortionNopState[vkit.mechanism.distortion.photometric.color.MeanShiftConfig], NoneType], image: vkit.element.image.Image, rng: Union[numpy.random._generator.Generator, NoneType]):
67def mean_shift_image(
68    config: MeanShiftConfig,
69    state: Optional[DistortionNopState[MeanShiftConfig]],
70    image: Image,
71    rng: Optional[RandomGenerator],
72):
73    return _mean_shift(
74        image=image,
75        channels=config.channels,
76        delta=config.delta,
77        threshold=config.threshold,
78        oob_behavior=config.oob_behavior,
79    )
class ColorShiftConfig(vkit.mechanism.distortion.interface.DistortionConfig):
90class ColorShiftConfig(DistortionConfig):
91    delta: int
ColorShiftConfig(delta: int)
2def __init__(self, delta):
3    self.delta = delta

Method generated by attrs for class ColorShiftConfig.

def color_shift_image( config: vkit.mechanism.distortion.photometric.color.ColorShiftConfig, state: Union[vkit.mechanism.distortion.interface.DistortionNopState[vkit.mechanism.distortion.photometric.color.ColorShiftConfig], NoneType], image: vkit.element.image.Image, rng: Union[numpy.random._generator.Generator, NoneType]):
 94def color_shift_image(
 95    config: ColorShiftConfig,
 96    state: Optional[DistortionNopState[ColorShiftConfig]],
 97    image: Image,
 98    rng: Optional[RandomGenerator],
 99):
100    mode = image.mode
101    if mode not in (ImageMode.HSV, ImageMode.HSL):
102        # To HSV.
103        image = image.to_hsv_image()
104
105    image = _mean_shift(
106        image=image,
107        # Operate the hue channel.
108        channels=[0],
109        delta=config.delta,
110        threshold=None,
111        oob_behavior=OutOfBoundBehavior.CYCLE,
112    )
113
114    if mode not in (ImageMode.HSV, ImageMode.HSL):
115        image = image.to_target_mode_image(mode)
116
117    return image
class BrightnessShiftConfig(vkit.mechanism.distortion.interface.DistortionConfig):
128class BrightnessShiftConfig(DistortionConfig):
129    delta: int
130    intermediate_image_mode: ImageMode = ImageMode.HSL
BrightnessShiftConfig( delta: int, intermediate_image_mode: vkit.element.image.ImageMode = <ImageMode.HSL: 'hsl'>)
2def __init__(self, delta, intermediate_image_mode=attr_dict['intermediate_image_mode'].default):
3    self.delta = delta
4    self.intermediate_image_mode = intermediate_image_mode

Method generated by attrs for class BrightnessShiftConfig.

def brightness_shift_image( config: vkit.mechanism.distortion.photometric.color.BrightnessShiftConfig, state: Union[vkit.mechanism.distortion.interface.DistortionNopState[vkit.mechanism.distortion.photometric.color.BrightnessShiftConfig], NoneType], image: vkit.element.image.Image, rng: Union[numpy.random._generator.Generator, NoneType]):
133def brightness_shift_image(
134    config: BrightnessShiftConfig,
135    state: Optional[DistortionNopState[BrightnessShiftConfig]],
136    image: Image,
137    rng: Optional[RandomGenerator],
138):
139    mode = image.mode
140    if mode not in (ImageMode.HSV, ImageMode.HSL):
141        assert config.intermediate_image_mode in (ImageMode.HSV, ImageMode.HSL)
142        image = image.to_target_mode_image(config.intermediate_image_mode)
143
144    image = _mean_shift(
145        image=image,
146        # Operate the lighting channel.
147        channels=[2],
148        delta=config.delta,
149        threshold=None,
150        oob_behavior=OutOfBoundBehavior.CLIP,
151    )
152
153    if mode not in (ImageMode.HSV, ImageMode.HSL):
154        image = image.to_target_mode_image(mode)
155
156    return image
class StdShiftConfig(vkit.mechanism.distortion.interface.DistortionConfig):
188class StdShiftConfig(DistortionConfig):
189    scale: float
190    channels: Optional[Sequence[int]] = None
StdShiftConfig(scale: float, channels: Union[Sequence[int], NoneType] = None)
2def __init__(self, scale, channels=attr_dict['channels'].default):
3    self.scale = scale
4    self.channels = channels

Method generated by attrs for class StdShiftConfig.

def std_shift_image( config: vkit.mechanism.distortion.photometric.color.StdShiftConfig, state: Union[vkit.mechanism.distortion.interface.DistortionNopState[vkit.mechanism.distortion.photometric.color.StdShiftConfig], NoneType], image: vkit.element.image.Image, rng: Union[numpy.random._generator.Generator, NoneType]):
193def std_shift_image(
194    config: StdShiftConfig,
195    state: Optional[DistortionNopState[StdShiftConfig]],
196    image: Image,
197    rng: Optional[RandomGenerator],
198):
199    return _std_shift(
200        image,
201        config.channels,
202        config.scale,
203        OutOfBoundBehavior.CLIP,
204    )
class BoundaryEqualizationConfig(vkit.mechanism.distortion.interface.DistortionConfig):
215class BoundaryEqualizationConfig(DistortionConfig):
216    channels: Optional[Sequence[int]] = None
BoundaryEqualizationConfig(channels: Union[Sequence[int], NoneType] = None)
2def __init__(self, channels=attr_dict['channels'].default):
3    self.channels = channels

Method generated by attrs for class BoundaryEqualizationConfig.

def boundary_equalization_image( config: vkit.mechanism.distortion.photometric.color.BoundaryEqualizationConfig, state: Union[vkit.mechanism.distortion.interface.DistortionNopState[vkit.mechanism.distortion.photometric.color.BoundaryEqualizationConfig], NoneType], image: vkit.element.image.Image, rng: Union[numpy.random._generator.Generator, NoneType]):
219def boundary_equalization_image(
220    config: BoundaryEqualizationConfig,
221    state: Optional[DistortionNopState[BoundaryEqualizationConfig]],
222    image: Image,
223    rng: Optional[RandomGenerator],
224):
225    mat = extract_mat_from_image(image, np.float32, config.channels)
226
227    # Equalize each channel to [0, 255].
228    if mat.ndim == 2:
229        delta: np.ndarray = mat.max() - mat.min()
230        if delta == 0.0:
231            return image
232
233        mat -= mat.min()
234        mat *= 255.0 / delta
235
236    elif mat.ndim == 3:
237        flatten_mat = mat.reshape(-1, mat.shape[-1])
238        val_min = flatten_mat.min(axis=0)
239        val_max = flatten_mat.max(axis=0)
240        delta = val_max - val_min
241
242        mask = (delta > 0)
243        if not mask.any():
244            return image
245
246        num_channels = mask.sum()
247        masked_min = mat[:, :, mask].reshape(-1, num_channels).min(axis=0)
248        mat[:, :, mask] -= masked_min
249        mat[:, :, mask] *= 255.0 / delta[mask]
250
251    else:
252        raise NotImplementedError()
253
254    mat = handle_out_of_bound_and_dtype(mat, OutOfBoundBehavior.CLIP)
255    return generate_new_image(image, mat, config.channels)
class HistogramEqualizationConfig(vkit.mechanism.distortion.interface.DistortionConfig):
266class HistogramEqualizationConfig(DistortionConfig):
267    channels: Optional[Sequence[int]] = None
HistogramEqualizationConfig(channels: Union[Sequence[int], NoneType] = None)
2def __init__(self, channels=attr_dict['channels'].default):
3    self.channels = channels

Method generated by attrs for class HistogramEqualizationConfig.

def histogram_equalization_image( config: vkit.mechanism.distortion.photometric.color.HistogramEqualizationConfig, state: Union[vkit.mechanism.distortion.interface.DistortionNopState[vkit.mechanism.distortion.photometric.color.HistogramEqualizationConfig], NoneType], image: vkit.element.image.Image, rng: Union[numpy.random._generator.Generator, NoneType]):
270def histogram_equalization_image(
271    config: HistogramEqualizationConfig,
272    state: Optional[DistortionNopState[HistogramEqualizationConfig]],
273    image: Image,
274    rng: Optional[RandomGenerator],
275):
276    mat = extract_mat_from_image(image, np.uint8, config.channels)
277
278    if mat.ndim == 2:
279        channel_mats: Sequence[np.ndarray] = [mat]
280    elif mat.ndim == 3:
281        channel_mats: Sequence[np.ndarray] = np.dsplit(mat, mat.shape[-1])
282    else:
283        raise NotImplementedError()
284
285    new_mats = [cv.equalizeHist(channel_mat) for channel_mat in channel_mats]
286
287    if mat.ndim == 2:
288        return attrs.evolve(image, mat=new_mats[0])
289    elif mat.ndim == 3:
290        return generate_new_image(image, np.dstack(new_mats), config.channels)
291    else:
292        raise NotImplementedError()
class ComplementConfig(vkit.mechanism.distortion.interface.DistortionConfig):
303class ComplementConfig(DistortionConfig):
304    threshold: Optional[int] = None
305    enable_threshold_lte: bool = False
306    channels: Optional[Sequence[int]] = None
ComplementConfig( threshold: Union[int, NoneType] = None, enable_threshold_lte: bool = False, channels: Union[Sequence[int], NoneType] = None)
2def __init__(self, threshold=attr_dict['threshold'].default, enable_threshold_lte=attr_dict['enable_threshold_lte'].default, channels=attr_dict['channels'].default):
3    self.threshold = threshold
4    self.enable_threshold_lte = enable_threshold_lte
5    self.channels = channels

Method generated by attrs for class ComplementConfig.

def complement_image( config: vkit.mechanism.distortion.photometric.color.ComplementConfig, state: Union[vkit.mechanism.distortion.interface.DistortionNopState[vkit.mechanism.distortion.photometric.color.ComplementConfig], NoneType], image: vkit.element.image.Image, rng: Union[numpy.random._generator.Generator, NoneType]):
309def complement_image(
310    config: ComplementConfig,
311    state: Optional[DistortionNopState[ComplementConfig]],
312    image: Image,
313    rng: Optional[RandomGenerator],
314):
315    mat = extract_mat_from_image(image, np.uint8, config.channels)
316
317    if config.threshold is None:
318        mat = 255 - mat
319    else:
320        assert 0 <= config.threshold <= 255
321        if not config.enable_threshold_lte:
322            mask = (config.threshold <= mat)
323        else:
324            mask = (mat <= config.threshold)
325        mat[mask] = 255 - mat[mask]
326
327    return generate_new_image(image, mat, config.channels)
class PosterizationConfig(vkit.mechanism.distortion.interface.DistortionConfig):
338class PosterizationConfig(DistortionConfig):
339    num_bits: int
340    channels: Optional[Sequence[int]] = None
PosterizationConfig(num_bits: int, channels: Union[Sequence[int], NoneType] = None)
2def __init__(self, num_bits, channels=attr_dict['channels'].default):
3    self.num_bits = num_bits
4    self.channels = channels

Method generated by attrs for class PosterizationConfig.

def posterization_image( config: vkit.mechanism.distortion.photometric.color.PosterizationConfig, state: Union[vkit.mechanism.distortion.interface.DistortionNopState[vkit.mechanism.distortion.photometric.color.PosterizationConfig], NoneType], image: vkit.element.image.Image, rng: Union[numpy.random._generator.Generator, NoneType]):
343def posterization_image(
344    config: PosterizationConfig,
345    state: Optional[DistortionNopState[PosterizationConfig]],
346    image: Image,
347    rng: Optional[RandomGenerator],
348):
349    assert 0 <= config.num_bits < 8
350
351    if config.num_bits == 0:
352        return image
353
354    mat = extract_mat_from_image(image, np.uint8, config.channels)
355    # Clear lower n bits.
356    mat = np.bitwise_and(mat, (0xFF >> config.num_bits) << config.num_bits)
357    return generate_new_image(image, mat, config.channels)
class ColorBalanceConfig(vkit.mechanism.distortion.interface.DistortionConfig):
368class ColorBalanceConfig(DistortionConfig):
369    ratio: float
ColorBalanceConfig(ratio: float)
2def __init__(self, ratio):
3    self.ratio = ratio

Method generated by attrs for class ColorBalanceConfig.

def color_balance_image( config: vkit.mechanism.distortion.photometric.color.ColorBalanceConfig, state: Union[vkit.mechanism.distortion.interface.DistortionNopState[vkit.mechanism.distortion.photometric.color.ColorBalanceConfig], NoneType], image: vkit.element.image.Image, rng: Union[numpy.random._generator.Generator, NoneType]):
372def color_balance_image(
373    config: ColorBalanceConfig,
374    state: Optional[DistortionNopState[ColorBalanceConfig]],
375    image: Image,
376    rng: Optional[RandomGenerator],
377):
378    if image.mode == ImageMode.GRAYSCALE:
379        return image
380
381    grayscale_like_image = image.to_grayscale_image().to_target_mode_image(image.mode)
382    grayscale_like_mat = grayscale_like_image.mat.astype(np.float32)
383    mat = image.mat.astype(np.float32)
384
385    if image.mode in (ImageMode.HSV, ImageMode.HSL):
386        channels = cast(Sequence[int], [1, 2])
387        grayscale_like_mat = grayscale_like_mat[:, :, channels]
388        mat = mat[:, :, channels]
389
390    assert 0.0 <= config.ratio <= 1.0
391    mat = (1 - config.ratio) * grayscale_like_mat + config.ratio * mat
392    mat = clip_mat_back_to_uint8(mat)
393
394    if image.mode in (ImageMode.HSV, ImageMode.HSL):
395        return generate_new_image(image, mat, [1, 2])
396    else:
397        return attrs.evolve(image, mat=mat)
class ChannelPermutationConfig(vkit.mechanism.distortion.interface.DistortionConfig):
408class ChannelPermutationConfig(DistortionConfig):
409    _rng_state: Optional[Mapping[str, Any]] = None
410
411    @property
412    def supports_rng_state(self) -> bool:
413        return True
414
415    @property
416    def rng_state(self) -> Optional[Mapping[str, Any]]:
417        return self._rng_state
418
419    @rng_state.setter
420    def rng_state(self, val: Mapping[str, Any]):
421        self._rng_state = val
ChannelPermutationConfig(rng_state: Union[Mapping[str, Any], NoneType] = None)
2def __init__(self, rng_state=attr_dict['_rng_state'].default):
3    self._rng_state = rng_state

Method generated by attrs for class ChannelPermutationConfig.

def channel_permutation_image( config: vkit.mechanism.distortion.photometric.color.ChannelPermutationConfig, state: Union[vkit.mechanism.distortion.interface.DistortionNopState[vkit.mechanism.distortion.photometric.color.ChannelPermutationConfig], NoneType], image: vkit.element.image.Image, rng: Union[numpy.random._generator.Generator, NoneType]):
424def channel_permutation_image(
425    config: ChannelPermutationConfig,
426    state: Optional[DistortionNopState[ChannelPermutationConfig]],
427    image: Image,
428    rng: Optional[RandomGenerator],
429):
430    assert rng
431    indices = rng.permutation(image.num_channels)
432    mat = image.mat[:, :, indices]
433    return attrs.evolve(image, mat=mat)