vkit.mechanism.distortion.geometric.camera

  1# Copyright 2022 vkit-x Administrator. All Rights Reserved.
  2#
  3# This project (vkit-x/vkit) is dual-licensed under commercial and SSPL licenses.
  4#
  5# The commercial license gives you the full rights to create and distribute software
  6# on your own terms without any SSPL license obligations. For more information,
  7# please see the "LICENSE_COMMERCIAL.txt" file.
  8#
  9# This project is also available under Server Side Public License (SSPL).
 10# The SSPL licensing is ideal for use cases such as open source projects with
 11# SSPL distribution, student/academic purposes, hobby projects, internal research
 12# projects without external distribution, or other projects where all SSPL
 13# obligations can be met. For more information, please see the "LICENSE_SSPL.txt" file.
 14from typing import (
 15    cast,
 16    Callable,
 17    Sequence,
 18    Optional,
 19    Tuple,
 20    Union,
 21    Iterable,
 22    TypeVar,
 23)
 24import math
 25
 26import attrs
 27import cv2 as cv
 28import numpy as np
 29from numpy.random import Generator as RandomGenerator
 30
 31from vkit.element import Point, PointList, PointTuple
 32from ..interface import DistortionConfig
 33from .grid_rendering.interface import (
 34    PointProjector,
 35    DistortionStateImageGridBased,
 36    DistortionImageGridBased,
 37)
 38from .grid_rendering.grid_creator import create_src_image_grid
 39
 40_T_CONFIG = TypeVar('_T_CONFIG', bound=DistortionConfig)
 41
 42
 43class Point2dTo3dStrategy:
 44
 45    def generate_np_3d_points(self, points: PointTuple) -> np.ndarray:
 46        raise NotImplementedError()
 47
 48
 49@attrs.define
 50class CameraModelConfig:
 51    rotation_unit_vec: Sequence[float]
 52    rotation_theta: float
 53    focal_length: Optional[float] = None
 54    principal_point: Optional[Sequence[float]] = None
 55    camera_distance: Optional[float] = None
 56
 57
 58class CameraModel:
 59
 60    @classmethod
 61    def prep_rotation_unit_vec(cls, rotation_unit_vec: Sequence[float]) -> np.ndarray:
 62        vec = np.asarray(rotation_unit_vec, dtype=np.float32)
 63        length = np.linalg.norm(vec)
 64        if length != 1.0:
 65            vec /= length
 66        return vec
 67
 68    @classmethod
 69    def prep_rotation_theta(cls, rotation_theta: float):
 70        return float(np.clip(rotation_theta, -89, 89) / 180 * np.pi)
 71
 72    @classmethod
 73    def prep_principal_point(cls, principal_point: Sequence[float]):
 74        principal_point = list(principal_point)
 75        if len(principal_point) == 2:
 76            principal_point.append(0)
 77        return np.asarray(principal_point, dtype=np.float32).reshape(-1, 1)
 78
 79    @classmethod
 80    def generate_rotation_vec(cls, rotation_unit_vec: np.ndarray, rotation_theta: float):
 81        # Defines how the object is rotated, following the right hand rule:
 82        # https://en.wikipedia.org/wiki/Right-hand_rule#Rotations
 83        #
 84        # Multiplication is for cv.Rodrigues, as explained in
 85        # https://stackoverflow.com/a/12977143
 86        return rotation_unit_vec * rotation_theta
 87
 88    @classmethod
 89    def generate_rotation_mat_and_translation_vec(
 90        cls,
 91        rotation_vec: np.ndarray,
 92        camera_distance: float,
 93        principal_point: np.ndarray,
 94    ):
 95        # rotation_mat is a 3x3 matrix [rotated-x, rotated-y, rotated-z]
 96        rotation_mat, _ = cv.Rodrigues(rotation_vec)
 97        rotation_mat = cast(np.ndarray, rotation_mat)
 98
 99        # Assume that image is place in the world coordinate in the way as followed:
100        # 1. top-left corner: (0, 0, *)
101        # 2. top-right corner: (width - 1, 0, *)
102        # 3. bottom-left corner: (0, height - 1, *)
103        # 4. bottom-right corner: (width - 1, height - 1, *)
104        #
105        # principal_point is defined to represent the intersection between axis-z
106        # of the camera coordinate and the image. principal_point is defined in the
107        # world coordinate with axis-z = 0. principal_point should be in position
108        # (0, 0, camera_distance) after transformation.
109        #
110        # "cc_" is for camera coordinate and "wc_" is for world coordinate.
111        cc_principal_point_vec = np.asarray(
112            [0, 0, camera_distance],
113            dtype=np.float32,
114        ).reshape(-1, 1)
115        wc_shifted_origin_vec = np.matmul(
116            # NOTE: The rotation matrix is orthogonal,
117            # hence it's inverse is equal to it's transpose.
118            rotation_mat.transpose(),
119            cc_principal_point_vec,
120        )
121        wc_shifted_principal_point_vec = wc_shifted_origin_vec - principal_point
122
123        translation_vec: np.ndarray = np.matmul(
124            rotation_mat,
125            wc_shifted_principal_point_vec.reshape(-1, 1),
126        )
127        return rotation_mat, translation_vec
128
129    @classmethod
130    def generate_translation_vec(
131        cls,
132        rotation_vec: np.ndarray,
133        camera_distance: float,
134        principal_point: np.ndarray,
135    ):
136        _, translation_vec = CameraModel.generate_rotation_mat_and_translation_vec(
137            rotation_vec,
138            camera_distance,
139            principal_point,
140        )
141        return translation_vec
142
143    @classmethod
144    def generate_extrinsic_mat(
145        cls,
146        rotation_unit_vec: np.ndarray,
147        rotation_theta: float,
148        camera_distance: float,
149        principal_point: np.ndarray,
150    ):
151        rotation_vec = CameraModel.generate_rotation_vec(rotation_unit_vec, rotation_theta)
152        rotation_mat, translation_vec = CameraModel.generate_rotation_mat_and_translation_vec(
153            rotation_vec,
154            camera_distance,
155            principal_point,
156        )
157        extrinsic_mat = np.hstack((rotation_mat, translation_vec.reshape((-1, 1))))
158        return extrinsic_mat
159
160    @classmethod
161    def generate_intrinsic_mat(cls, focal_length: float):
162        return np.asarray(
163            [
164                [focal_length, 0, 0],
165                [0, focal_length, 0],
166                [0, 0, 1],
167            ],
168            dtype=np.float32,
169        )
170
171    def __init__(self, config: CameraModelConfig):
172        assert config.focal_length
173        assert config.camera_distance
174        assert config.principal_point
175
176        rotation_unit_vec = self.prep_rotation_unit_vec(config.rotation_unit_vec)
177        rotation_theta = self.prep_rotation_theta(config.rotation_theta)
178        self.rotation_vec = self.generate_rotation_vec(rotation_unit_vec, rotation_theta)
179
180        principal_point = self.prep_principal_point(config.principal_point)
181        self.translation_vec = self.generate_translation_vec(
182            self.rotation_vec,
183            config.camera_distance,
184            principal_point,
185        )
186        self.intrinsic_mat = self.generate_intrinsic_mat(config.focal_length)
187
188    def project_np_points_from_3d_to_2d(self, np_3d_points: np.ndarray) -> np.ndarray:
189        camera_2d_points, _ = cv.projectPoints(
190            np_3d_points,
191            self.rotation_vec,
192            self.translation_vec,
193            self.intrinsic_mat,
194            np.zeros(5),
195        )
196        return camera_2d_points.reshape(-1, 2)
197
198
199class CameraPointProjector(PointProjector):
200
201    def __init__(
202        self,
203        point_2d_to_3d_strategy: Point2dTo3dStrategy,
204        camera_model_config: CameraModelConfig,
205    ):
206        self.point_2d_to_3d_strategy = point_2d_to_3d_strategy
207        self.camera_model = CameraModel(camera_model_config)
208
209    def project_points(self, src_points: Union[PointList, PointTuple, Iterable[Point]]):
210        np_3d_points = self.point_2d_to_3d_strategy.generate_np_3d_points(PointTuple(src_points))
211        camera_2d_points = self.camera_model.project_np_points_from_3d_to_2d(np_3d_points)
212        return PointTuple.from_np_array(camera_2d_points)
213
214    def project_point(self, src_point: Point):
215        return self.project_points(PointTuple.from_point(src_point))[0]
216
217
218class DistortionStateCameraOperation(DistortionStateImageGridBased[_T_CONFIG]):
219
220    @classmethod
221    def complete_camera_model_config(
222        cls,
223        height: int,
224        width: int,
225        camera_model_config: CameraModelConfig,
226    ):
227        if camera_model_config.principal_point \
228                and camera_model_config.focal_length \
229                and camera_model_config.camera_distance:
230            return camera_model_config
231
232        # Make a copy.
233        camera_model_config = attrs.evolve(camera_model_config)
234
235        if not camera_model_config.principal_point:
236            camera_model_config.principal_point = [height // 2, width // 2]
237
238        if not camera_model_config.focal_length \
239                or not camera_model_config.camera_distance:
240            camera_model_config.focal_length = max(height, width)
241            camera_model_config.camera_distance = camera_model_config.focal_length
242
243        return camera_model_config
244
245    def initialize_camera_operation(
246        self,
247        height: int,
248        width: int,
249        grid_size: int,
250        point_2d_to_3d_strategy: Point2dTo3dStrategy,
251        camera_model_config: CameraModelConfig,
252    ):
253        src_image_grid = create_src_image_grid(height, width, grid_size)
254
255        camera_model_config = self.complete_camera_model_config(
256            height,
257            width,
258            camera_model_config,
259        )
260        point_projector = CameraPointProjector(
261            point_2d_to_3d_strategy,
262            camera_model_config,
263        )
264
265        self.initialize_image_grid_based(src_image_grid, point_projector)
266
267
268@attrs.define
269class CameraPlaneOnlyConfig(DistortionConfig):
270    camera_model_config: CameraModelConfig
271    grid_size: int
272
273
274class CameraPlaneOnlyPoint2dTo3dStrategy(Point2dTo3dStrategy):
275
276    def generate_np_3d_points(self, points: PointTuple) -> np.ndarray:
277        np_2d_points = points.to_smooth_np_array()
278        np_3d_points = np.hstack((
279            np_2d_points,
280            np.zeros((np_2d_points.shape[0], 1), dtype=np.float32),
281        ))
282        return np_3d_points
283
284
285class CameraPlaneOnlyState(DistortionStateCameraOperation[CameraPlaneOnlyConfig]):
286
287    @classmethod
288    def weights_func(cls, norm_distances: np.ndarray, alpha: float):
289        return alpha / (norm_distances + alpha)
290
291    def __init__(
292        self,
293        config: CameraPlaneOnlyConfig,
294        shape: Tuple[int, int],
295        rng: Optional[RandomGenerator],
296    ):
297        height, width = shape
298        self.initialize_camera_operation(
299            height,
300            width,
301            config.grid_size,
302            CameraPlaneOnlyPoint2dTo3dStrategy(),
303            config.camera_model_config,
304        )
305
306
307camera_plane_only = DistortionImageGridBased(
308    config_cls=CameraPlaneOnlyConfig,
309    state_cls=CameraPlaneOnlyState,
310)
311
312
313@attrs.define
314class CameraCubicCurveConfig(DistortionConfig):
315    curve_alpha: float
316    curve_beta: float
317    # Clockwise, [0, 180]
318    curve_direction: float
319    curve_scale: float
320    camera_model_config: CameraModelConfig
321    grid_size: int
322
323
324class CameraCubicCurvePoint2dTo3dStrategy(Point2dTo3dStrategy):
325
326    def __init__(
327        self,
328        height: int,
329        width: int,
330        curve_alpha: float,
331        curve_beta: float,
332        curve_direction: float,
333        curve_scale: float,
334    ):
335        # Plane area.
336        self.height = height
337        self.width = width
338
339        # Curve endpoint slopes.
340        self.curve_alpha = math.tan(np.clip(curve_alpha, -80, 80) / 180 * np.pi)
341        self.curve_beta = math.tan(np.clip(curve_beta, -80, 80) / 180 * np.pi)
342
343        # Plane projection direction.
344        self.curve_direction = (curve_direction % 180) / 180 * np.pi
345
346        self.rotation_mat = np.asarray(
347            [
348                [
349                    math.cos(self.curve_direction),
350                    math.sin(self.curve_direction),
351                ],
352                [
353                    -math.sin(self.curve_direction),
354                    math.cos(self.curve_direction),
355                ],
356            ],
357            dtype=np.float32,
358        )
359
360        corners = np.asarray(
361            [
362                [0, 0],
363                [self.width - 1, 0],
364                [self.width - 1, self.height - 1],
365                [0, self.height - 1],
366            ],
367            dtype=np.float32,
368        )
369        rotated_corners = np.matmul(self.rotation_mat, corners.transpose())
370        self.plane_projection_min = rotated_corners[0].min()
371        self.plane_projection_range = rotated_corners[0].max() - self.plane_projection_min
372
373        self.curve_scale = curve_scale
374
375    def generate_np_3d_points(self, points: PointTuple) -> np.ndarray:
376        np_2d_points = points.to_smooth_np_array()
377
378        # Project based on theta.
379        plane_projected_points = np.matmul(self.rotation_mat, np_2d_points.transpose())
380        plane_projected_xs = plane_projected_points[0]
381        plane_projected_ratios = (
382            plane_projected_xs - self.plane_projection_min
383        ) / self.plane_projection_range
384
385        # Axis-z.
386        poly = np.asarray([
387            self.curve_alpha + self.curve_beta,
388            -2 * self.curve_alpha - self.curve_beta,
389            self.curve_alpha,
390            0,
391        ])
392        pos_zs = np.polyval(poly, plane_projected_ratios)
393        pos_zs = pos_zs * self.plane_projection_range * self.curve_scale
394        # Shift mean to zero.
395        pos_zs: np.ndarray = pos_zs - pos_zs.mean()
396
397        np_3d_points = np.hstack((np_2d_points, pos_zs.reshape((-1, 1))))
398        return np_3d_points
399
400
401class CameraCubicCurveState(DistortionStateCameraOperation[CameraCubicCurveConfig]):
402
403    def __init__(
404        self,
405        config: CameraCubicCurveConfig,
406        shape: Tuple[int, int],
407        rng: Optional[RandomGenerator],
408    ):
409        height, width = shape
410        self.initialize_camera_operation(
411            height,
412            width,
413            config.grid_size,
414            CameraCubicCurvePoint2dTo3dStrategy(
415                height,
416                width,
417                config.curve_alpha,
418                config.curve_beta,
419                config.curve_direction,
420                config.curve_scale,
421            ),
422            config.camera_model_config,
423        )
424
425
426camera_cubic_curve = DistortionImageGridBased(
427    config_cls=CameraCubicCurveConfig,
428    state_cls=CameraCubicCurveState,
429)
430
431
432class CameraPlaneLinePoint2dTo3dStrategy(Point2dTo3dStrategy):
433
434    def __init__(
435        self,
436        height: int,
437        width: int,
438        point: Tuple[float, float],
439        direction: float,
440        perturb_vec: Tuple[float, float, float],
441        alpha: float,
442        weights_func: Callable[[np.ndarray, float], np.ndarray],
443    ):
444        # Plane area.
445        self.height = height
446        self.width = width
447
448        # Define a line.
449        self.point = np.asarray(point, dtype=np.float32)
450        direction = (direction % 180) / 180 * np.pi
451        cos_theta = np.cos(direction)
452        sin_theta = np.sin(direction)
453        self.line_params_a_b = np.asarray([sin_theta, -cos_theta], dtype=np.float32)
454        self.line_param_c = -self.point[0] * sin_theta + self.point[1] * cos_theta
455
456        # For weight calculationn.
457        self.distance_max = np.sqrt(height**2 + width**2)
458        self.alpha = alpha
459        self.weights_func = weights_func
460
461        # Deformation vector.
462        self.perturb_vec = np.asarray(perturb_vec, dtype=np.float32)
463
464    def generate_np_3d_points(self, points: PointTuple) -> np.ndarray:
465        np_2d_points = points.to_smooth_np_array()
466
467        # Calculate weights.
468        distances = np.abs((np_2d_points * self.line_params_a_b).sum(axis=1) + self.line_param_c)
469        norm_distances = distances / self.distance_max
470        weights = self.weights_func(norm_distances, self.alpha)
471
472        # Add weighted fold vector.
473        np_3d_points = np.hstack(
474            (np_2d_points, np.zeros((np_2d_points.shape[0], 1), dtype=np.float32))
475        )
476        np_perturb = weights.reshape(-1, 1) * self.perturb_vec
477        # Shift mean to zero.
478        np_perturb -= np_perturb.mean(axis=0)
479        np_3d_points += np_perturb
480        return np_3d_points
481
482
483@attrs.define
484class CameraPlaneLineFoldConfig(DistortionConfig):
485    fold_point: Tuple[float, float]
486    # Clockwise, [0, 180]
487    fold_direction: float
488    fold_perturb_vec: Tuple[float, float, float]
489    fold_alpha: float
490    camera_model_config: CameraModelConfig
491    grid_size: int
492
493
494class CameraPlaneLineFoldState(DistortionStateCameraOperation[CameraPlaneLineFoldConfig]):
495
496    @classmethod
497    def weights_func(cls, norm_distances: np.ndarray, alpha: float):
498        return alpha / (norm_distances + alpha)
499
500    def __init__(
501        self,
502        config: CameraPlaneLineFoldConfig,
503        shape: Tuple[int, int],
504        rng: Optional[RandomGenerator],
505    ):
506        height, width = shape
507        self.initialize_camera_operation(
508            height,
509            width,
510            config.grid_size,
511            CameraPlaneLinePoint2dTo3dStrategy(
512                height=height,
513                width=width,
514                point=config.fold_point,
515                direction=config.fold_direction,
516                perturb_vec=config.fold_perturb_vec,
517                alpha=config.fold_alpha,
518                weights_func=self.weights_func,
519            ),
520            config.camera_model_config,
521        )
522
523
524camera_plane_line_fold = DistortionImageGridBased(
525    config_cls=CameraPlaneLineFoldConfig,
526    state_cls=CameraPlaneLineFoldState,
527)
528
529
530@attrs.define
531class CameraPlaneLineCurveConfig(DistortionConfig):
532    curve_point: Tuple[float, float]
533    # Clockwise, [0, 180]
534    curve_direction: float
535    curve_perturb_vec: Tuple[float, float, float]
536    curve_alpha: float
537    camera_model_config: CameraModelConfig
538    grid_size: int
539
540
541class CameraPlaneLineCurveState(DistortionStateCameraOperation[CameraPlaneLineCurveConfig]):
542
543    @classmethod
544    def weights_func(cls, norm_distances: np.ndarray, alpha: float):
545        return 1 - norm_distances**alpha
546
547    def __init__(
548        self,
549        config: CameraPlaneLineCurveConfig,
550        shape: Tuple[int, int],
551        rng: Optional[RandomGenerator],
552    ):
553        height, width = shape
554        self.initialize_camera_operation(
555            height,
556            width,
557            config.grid_size,
558            CameraPlaneLinePoint2dTo3dStrategy(
559                height=height,
560                width=width,
561                point=config.curve_point,
562                direction=config.curve_direction,
563                perturb_vec=config.curve_perturb_vec,
564                alpha=config.curve_alpha,
565                weights_func=self.weights_func,
566            ),
567            config.camera_model_config,
568        )
569
570
571camera_plane_line_curve = DistortionImageGridBased(
572    config_cls=CameraPlaneLineCurveConfig,
573    state_cls=CameraPlaneLineCurveState,
574)
class Point2dTo3dStrategy:
44class Point2dTo3dStrategy:
45
46    def generate_np_3d_points(self, points: PointTuple) -> np.ndarray:
47        raise NotImplementedError()
Point2dTo3dStrategy()
def generate_np_3d_points(self, points: vkit.element.point.PointTuple) -> numpy.ndarray:
46    def generate_np_3d_points(self, points: PointTuple) -> np.ndarray:
47        raise NotImplementedError()
class CameraModelConfig:
51class CameraModelConfig:
52    rotation_unit_vec: Sequence[float]
53    rotation_theta: float
54    focal_length: Optional[float] = None
55    principal_point: Optional[Sequence[float]] = None
56    camera_distance: Optional[float] = None
CameraModelConfig( rotation_unit_vec: Sequence[float], rotation_theta: float, focal_length: Union[float, NoneType] = None, principal_point: Union[Sequence[float], NoneType] = None, camera_distance: Union[float, NoneType] = None)
2def __init__(self, rotation_unit_vec, rotation_theta, focal_length=attr_dict['focal_length'].default, principal_point=attr_dict['principal_point'].default, camera_distance=attr_dict['camera_distance'].default):
3    self.rotation_unit_vec = rotation_unit_vec
4    self.rotation_theta = rotation_theta
5    self.focal_length = focal_length
6    self.principal_point = principal_point
7    self.camera_distance = camera_distance

Method generated by attrs for class CameraModelConfig.

class CameraModel:
 59class CameraModel:
 60
 61    @classmethod
 62    def prep_rotation_unit_vec(cls, rotation_unit_vec: Sequence[float]) -> np.ndarray:
 63        vec = np.asarray(rotation_unit_vec, dtype=np.float32)
 64        length = np.linalg.norm(vec)
 65        if length != 1.0:
 66            vec /= length
 67        return vec
 68
 69    @classmethod
 70    def prep_rotation_theta(cls, rotation_theta: float):
 71        return float(np.clip(rotation_theta, -89, 89) / 180 * np.pi)
 72
 73    @classmethod
 74    def prep_principal_point(cls, principal_point: Sequence[float]):
 75        principal_point = list(principal_point)
 76        if len(principal_point) == 2:
 77            principal_point.append(0)
 78        return np.asarray(principal_point, dtype=np.float32).reshape(-1, 1)
 79
 80    @classmethod
 81    def generate_rotation_vec(cls, rotation_unit_vec: np.ndarray, rotation_theta: float):
 82        # Defines how the object is rotated, following the right hand rule:
 83        # https://en.wikipedia.org/wiki/Right-hand_rule#Rotations
 84        #
 85        # Multiplication is for cv.Rodrigues, as explained in
 86        # https://stackoverflow.com/a/12977143
 87        return rotation_unit_vec * rotation_theta
 88
 89    @classmethod
 90    def generate_rotation_mat_and_translation_vec(
 91        cls,
 92        rotation_vec: np.ndarray,
 93        camera_distance: float,
 94        principal_point: np.ndarray,
 95    ):
 96        # rotation_mat is a 3x3 matrix [rotated-x, rotated-y, rotated-z]
 97        rotation_mat, _ = cv.Rodrigues(rotation_vec)
 98        rotation_mat = cast(np.ndarray, rotation_mat)
 99
100        # Assume that image is place in the world coordinate in the way as followed:
101        # 1. top-left corner: (0, 0, *)
102        # 2. top-right corner: (width - 1, 0, *)
103        # 3. bottom-left corner: (0, height - 1, *)
104        # 4. bottom-right corner: (width - 1, height - 1, *)
105        #
106        # principal_point is defined to represent the intersection between axis-z
107        # of the camera coordinate and the image. principal_point is defined in the
108        # world coordinate with axis-z = 0. principal_point should be in position
109        # (0, 0, camera_distance) after transformation.
110        #
111        # "cc_" is for camera coordinate and "wc_" is for world coordinate.
112        cc_principal_point_vec = np.asarray(
113            [0, 0, camera_distance],
114            dtype=np.float32,
115        ).reshape(-1, 1)
116        wc_shifted_origin_vec = np.matmul(
117            # NOTE: The rotation matrix is orthogonal,
118            # hence it's inverse is equal to it's transpose.
119            rotation_mat.transpose(),
120            cc_principal_point_vec,
121        )
122        wc_shifted_principal_point_vec = wc_shifted_origin_vec - principal_point
123
124        translation_vec: np.ndarray = np.matmul(
125            rotation_mat,
126            wc_shifted_principal_point_vec.reshape(-1, 1),
127        )
128        return rotation_mat, translation_vec
129
130    @classmethod
131    def generate_translation_vec(
132        cls,
133        rotation_vec: np.ndarray,
134        camera_distance: float,
135        principal_point: np.ndarray,
136    ):
137        _, translation_vec = CameraModel.generate_rotation_mat_and_translation_vec(
138            rotation_vec,
139            camera_distance,
140            principal_point,
141        )
142        return translation_vec
143
144    @classmethod
145    def generate_extrinsic_mat(
146        cls,
147        rotation_unit_vec: np.ndarray,
148        rotation_theta: float,
149        camera_distance: float,
150        principal_point: np.ndarray,
151    ):
152        rotation_vec = CameraModel.generate_rotation_vec(rotation_unit_vec, rotation_theta)
153        rotation_mat, translation_vec = CameraModel.generate_rotation_mat_and_translation_vec(
154            rotation_vec,
155            camera_distance,
156            principal_point,
157        )
158        extrinsic_mat = np.hstack((rotation_mat, translation_vec.reshape((-1, 1))))
159        return extrinsic_mat
160
161    @classmethod
162    def generate_intrinsic_mat(cls, focal_length: float):
163        return np.asarray(
164            [
165                [focal_length, 0, 0],
166                [0, focal_length, 0],
167                [0, 0, 1],
168            ],
169            dtype=np.float32,
170        )
171
172    def __init__(self, config: CameraModelConfig):
173        assert config.focal_length
174        assert config.camera_distance
175        assert config.principal_point
176
177        rotation_unit_vec = self.prep_rotation_unit_vec(config.rotation_unit_vec)
178        rotation_theta = self.prep_rotation_theta(config.rotation_theta)
179        self.rotation_vec = self.generate_rotation_vec(rotation_unit_vec, rotation_theta)
180
181        principal_point = self.prep_principal_point(config.principal_point)
182        self.translation_vec = self.generate_translation_vec(
183            self.rotation_vec,
184            config.camera_distance,
185            principal_point,
186        )
187        self.intrinsic_mat = self.generate_intrinsic_mat(config.focal_length)
188
189    def project_np_points_from_3d_to_2d(self, np_3d_points: np.ndarray) -> np.ndarray:
190        camera_2d_points, _ = cv.projectPoints(
191            np_3d_points,
192            self.rotation_vec,
193            self.translation_vec,
194            self.intrinsic_mat,
195            np.zeros(5),
196        )
197        return camera_2d_points.reshape(-1, 2)
172    def __init__(self, config: CameraModelConfig):
173        assert config.focal_length
174        assert config.camera_distance
175        assert config.principal_point
176
177        rotation_unit_vec = self.prep_rotation_unit_vec(config.rotation_unit_vec)
178        rotation_theta = self.prep_rotation_theta(config.rotation_theta)
179        self.rotation_vec = self.generate_rotation_vec(rotation_unit_vec, rotation_theta)
180
181        principal_point = self.prep_principal_point(config.principal_point)
182        self.translation_vec = self.generate_translation_vec(
183            self.rotation_vec,
184            config.camera_distance,
185            principal_point,
186        )
187        self.intrinsic_mat = self.generate_intrinsic_mat(config.focal_length)
@classmethod
def prep_rotation_unit_vec(cls, rotation_unit_vec: Sequence[float]) -> numpy.ndarray:
61    @classmethod
62    def prep_rotation_unit_vec(cls, rotation_unit_vec: Sequence[float]) -> np.ndarray:
63        vec = np.asarray(rotation_unit_vec, dtype=np.float32)
64        length = np.linalg.norm(vec)
65        if length != 1.0:
66            vec /= length
67        return vec
@classmethod
def prep_rotation_theta(cls, rotation_theta: float):
69    @classmethod
70    def prep_rotation_theta(cls, rotation_theta: float):
71        return float(np.clip(rotation_theta, -89, 89) / 180 * np.pi)
@classmethod
def prep_principal_point(cls, principal_point: Sequence[float]):
73    @classmethod
74    def prep_principal_point(cls, principal_point: Sequence[float]):
75        principal_point = list(principal_point)
76        if len(principal_point) == 2:
77            principal_point.append(0)
78        return np.asarray(principal_point, dtype=np.float32).reshape(-1, 1)
@classmethod
def generate_rotation_vec(cls, rotation_unit_vec: numpy.ndarray, rotation_theta: float):
80    @classmethod
81    def generate_rotation_vec(cls, rotation_unit_vec: np.ndarray, rotation_theta: float):
82        # Defines how the object is rotated, following the right hand rule:
83        # https://en.wikipedia.org/wiki/Right-hand_rule#Rotations
84        #
85        # Multiplication is for cv.Rodrigues, as explained in
86        # https://stackoverflow.com/a/12977143
87        return rotation_unit_vec * rotation_theta
@classmethod
def generate_rotation_mat_and_translation_vec( cls, rotation_vec: numpy.ndarray, camera_distance: float, principal_point: numpy.ndarray):
 89    @classmethod
 90    def generate_rotation_mat_and_translation_vec(
 91        cls,
 92        rotation_vec: np.ndarray,
 93        camera_distance: float,
 94        principal_point: np.ndarray,
 95    ):
 96        # rotation_mat is a 3x3 matrix [rotated-x, rotated-y, rotated-z]
 97        rotation_mat, _ = cv.Rodrigues(rotation_vec)
 98        rotation_mat = cast(np.ndarray, rotation_mat)
 99
100        # Assume that image is place in the world coordinate in the way as followed:
101        # 1. top-left corner: (0, 0, *)
102        # 2. top-right corner: (width - 1, 0, *)
103        # 3. bottom-left corner: (0, height - 1, *)
104        # 4. bottom-right corner: (width - 1, height - 1, *)
105        #
106        # principal_point is defined to represent the intersection between axis-z
107        # of the camera coordinate and the image. principal_point is defined in the
108        # world coordinate with axis-z = 0. principal_point should be in position
109        # (0, 0, camera_distance) after transformation.
110        #
111        # "cc_" is for camera coordinate and "wc_" is for world coordinate.
112        cc_principal_point_vec = np.asarray(
113            [0, 0, camera_distance],
114            dtype=np.float32,
115        ).reshape(-1, 1)
116        wc_shifted_origin_vec = np.matmul(
117            # NOTE: The rotation matrix is orthogonal,
118            # hence it's inverse is equal to it's transpose.
119            rotation_mat.transpose(),
120            cc_principal_point_vec,
121        )
122        wc_shifted_principal_point_vec = wc_shifted_origin_vec - principal_point
123
124        translation_vec: np.ndarray = np.matmul(
125            rotation_mat,
126            wc_shifted_principal_point_vec.reshape(-1, 1),
127        )
128        return rotation_mat, translation_vec
@classmethod
def generate_translation_vec( cls, rotation_vec: numpy.ndarray, camera_distance: float, principal_point: numpy.ndarray):
130    @classmethod
131    def generate_translation_vec(
132        cls,
133        rotation_vec: np.ndarray,
134        camera_distance: float,
135        principal_point: np.ndarray,
136    ):
137        _, translation_vec = CameraModel.generate_rotation_mat_and_translation_vec(
138            rotation_vec,
139            camera_distance,
140            principal_point,
141        )
142        return translation_vec
@classmethod
def generate_extrinsic_mat( cls, rotation_unit_vec: numpy.ndarray, rotation_theta: float, camera_distance: float, principal_point: numpy.ndarray):
144    @classmethod
145    def generate_extrinsic_mat(
146        cls,
147        rotation_unit_vec: np.ndarray,
148        rotation_theta: float,
149        camera_distance: float,
150        principal_point: np.ndarray,
151    ):
152        rotation_vec = CameraModel.generate_rotation_vec(rotation_unit_vec, rotation_theta)
153        rotation_mat, translation_vec = CameraModel.generate_rotation_mat_and_translation_vec(
154            rotation_vec,
155            camera_distance,
156            principal_point,
157        )
158        extrinsic_mat = np.hstack((rotation_mat, translation_vec.reshape((-1, 1))))
159        return extrinsic_mat
@classmethod
def generate_intrinsic_mat(cls, focal_length: float):
161    @classmethod
162    def generate_intrinsic_mat(cls, focal_length: float):
163        return np.asarray(
164            [
165                [focal_length, 0, 0],
166                [0, focal_length, 0],
167                [0, 0, 1],
168            ],
169            dtype=np.float32,
170        )
def project_np_points_from_3d_to_2d(self, np_3d_points: numpy.ndarray) -> numpy.ndarray:
189    def project_np_points_from_3d_to_2d(self, np_3d_points: np.ndarray) -> np.ndarray:
190        camera_2d_points, _ = cv.projectPoints(
191            np_3d_points,
192            self.rotation_vec,
193            self.translation_vec,
194            self.intrinsic_mat,
195            np.zeros(5),
196        )
197        return camera_2d_points.reshape(-1, 2)
200class CameraPointProjector(PointProjector):
201
202    def __init__(
203        self,
204        point_2d_to_3d_strategy: Point2dTo3dStrategy,
205        camera_model_config: CameraModelConfig,
206    ):
207        self.point_2d_to_3d_strategy = point_2d_to_3d_strategy
208        self.camera_model = CameraModel(camera_model_config)
209
210    def project_points(self, src_points: Union[PointList, PointTuple, Iterable[Point]]):
211        np_3d_points = self.point_2d_to_3d_strategy.generate_np_3d_points(PointTuple(src_points))
212        camera_2d_points = self.camera_model.project_np_points_from_3d_to_2d(np_3d_points)
213        return PointTuple.from_np_array(camera_2d_points)
214
215    def project_point(self, src_point: Point):
216        return self.project_points(PointTuple.from_point(src_point))[0]
CameraPointProjector( point_2d_to_3d_strategy: vkit.mechanism.distortion.geometric.camera.Point2dTo3dStrategy, camera_model_config: vkit.mechanism.distortion.geometric.camera.CameraModelConfig)
202    def __init__(
203        self,
204        point_2d_to_3d_strategy: Point2dTo3dStrategy,
205        camera_model_config: CameraModelConfig,
206    ):
207        self.point_2d_to_3d_strategy = point_2d_to_3d_strategy
208        self.camera_model = CameraModel(camera_model_config)
def project_points( self, src_points: Union[vkit.element.point.PointList, vkit.element.point.PointTuple, Iterable[vkit.element.point.Point]]):
210    def project_points(self, src_points: Union[PointList, PointTuple, Iterable[Point]]):
211        np_3d_points = self.point_2d_to_3d_strategy.generate_np_3d_points(PointTuple(src_points))
212        camera_2d_points = self.camera_model.project_np_points_from_3d_to_2d(np_3d_points)
213        return PointTuple.from_np_array(camera_2d_points)
def project_point(self, src_point: vkit.element.point.Point):
215    def project_point(self, src_point: Point):
216        return self.project_points(PointTuple.from_point(src_point))[0]
219class DistortionStateCameraOperation(DistortionStateImageGridBased[_T_CONFIG]):
220
221    @classmethod
222    def complete_camera_model_config(
223        cls,
224        height: int,
225        width: int,
226        camera_model_config: CameraModelConfig,
227    ):
228        if camera_model_config.principal_point \
229                and camera_model_config.focal_length \
230                and camera_model_config.camera_distance:
231            return camera_model_config
232
233        # Make a copy.
234        camera_model_config = attrs.evolve(camera_model_config)
235
236        if not camera_model_config.principal_point:
237            camera_model_config.principal_point = [height // 2, width // 2]
238
239        if not camera_model_config.focal_length \
240                or not camera_model_config.camera_distance:
241            camera_model_config.focal_length = max(height, width)
242            camera_model_config.camera_distance = camera_model_config.focal_length
243
244        return camera_model_config
245
246    def initialize_camera_operation(
247        self,
248        height: int,
249        width: int,
250        grid_size: int,
251        point_2d_to_3d_strategy: Point2dTo3dStrategy,
252        camera_model_config: CameraModelConfig,
253    ):
254        src_image_grid = create_src_image_grid(height, width, grid_size)
255
256        camera_model_config = self.complete_camera_model_config(
257            height,
258            width,
259            camera_model_config,
260        )
261        point_projector = CameraPointProjector(
262            point_2d_to_3d_strategy,
263            camera_model_config,
264        )
265
266        self.initialize_image_grid_based(src_image_grid, point_projector)

Abstract base class for generic types.

A generic type is typically declared by inheriting from this class parameterized with one or more type variables. For example, a generic mapping type might be defined as::

class Mapping(Generic[KT, VT]): def __getitem__(self, key: KT) -> VT: ... # Etc.

This class can then be used as follows::

def lookup_name(mapping: Mapping[KT, VT], key: KT, default: VT) -> VT: try: return mapping[key] except KeyError: return default

@classmethod
def complete_camera_model_config( cls, height: int, width: int, camera_model_config: vkit.mechanism.distortion.geometric.camera.CameraModelConfig):
221    @classmethod
222    def complete_camera_model_config(
223        cls,
224        height: int,
225        width: int,
226        camera_model_config: CameraModelConfig,
227    ):
228        if camera_model_config.principal_point \
229                and camera_model_config.focal_length \
230                and camera_model_config.camera_distance:
231            return camera_model_config
232
233        # Make a copy.
234        camera_model_config = attrs.evolve(camera_model_config)
235
236        if not camera_model_config.principal_point:
237            camera_model_config.principal_point = [height // 2, width // 2]
238
239        if not camera_model_config.focal_length \
240                or not camera_model_config.camera_distance:
241            camera_model_config.focal_length = max(height, width)
242            camera_model_config.camera_distance = camera_model_config.focal_length
243
244        return camera_model_config
def initialize_camera_operation( self, height: int, width: int, grid_size: int, point_2d_to_3d_strategy: vkit.mechanism.distortion.geometric.camera.Point2dTo3dStrategy, camera_model_config: vkit.mechanism.distortion.geometric.camera.CameraModelConfig):
246    def initialize_camera_operation(
247        self,
248        height: int,
249        width: int,
250        grid_size: int,
251        point_2d_to_3d_strategy: Point2dTo3dStrategy,
252        camera_model_config: CameraModelConfig,
253    ):
254        src_image_grid = create_src_image_grid(height, width, grid_size)
255
256        camera_model_config = self.complete_camera_model_config(
257            height,
258            width,
259            camera_model_config,
260        )
261        point_projector = CameraPointProjector(
262            point_2d_to_3d_strategy,
263            camera_model_config,
264        )
265
266        self.initialize_image_grid_based(src_image_grid, point_projector)
class CameraPlaneOnlyConfig(vkit.mechanism.distortion.interface.DistortionConfig):
270class CameraPlaneOnlyConfig(DistortionConfig):
271    camera_model_config: CameraModelConfig
272    grid_size: int
CameraPlaneOnlyConfig( camera_model_config: vkit.mechanism.distortion.geometric.camera.CameraModelConfig, grid_size: int)
2def __init__(self, camera_model_config, grid_size):
3    self.camera_model_config = camera_model_config
4    self.grid_size = grid_size

Method generated by attrs for class CameraPlaneOnlyConfig.

class CameraPlaneOnlyPoint2dTo3dStrategy(Point2dTo3dStrategy):
275class CameraPlaneOnlyPoint2dTo3dStrategy(Point2dTo3dStrategy):
276
277    def generate_np_3d_points(self, points: PointTuple) -> np.ndarray:
278        np_2d_points = points.to_smooth_np_array()
279        np_3d_points = np.hstack((
280            np_2d_points,
281            np.zeros((np_2d_points.shape[0], 1), dtype=np.float32),
282        ))
283        return np_3d_points
CameraPlaneOnlyPoint2dTo3dStrategy()
def generate_np_3d_points(self, points: vkit.element.point.PointTuple) -> numpy.ndarray:
277    def generate_np_3d_points(self, points: PointTuple) -> np.ndarray:
278        np_2d_points = points.to_smooth_np_array()
279        np_3d_points = np.hstack((
280            np_2d_points,
281            np.zeros((np_2d_points.shape[0], 1), dtype=np.float32),
282        ))
283        return np_3d_points
286class CameraPlaneOnlyState(DistortionStateCameraOperation[CameraPlaneOnlyConfig]):
287
288    @classmethod
289    def weights_func(cls, norm_distances: np.ndarray, alpha: float):
290        return alpha / (norm_distances + alpha)
291
292    def __init__(
293        self,
294        config: CameraPlaneOnlyConfig,
295        shape: Tuple[int, int],
296        rng: Optional[RandomGenerator],
297    ):
298        height, width = shape
299        self.initialize_camera_operation(
300            height,
301            width,
302            config.grid_size,
303            CameraPlaneOnlyPoint2dTo3dStrategy(),
304            config.camera_model_config,
305        )

Abstract base class for generic types.

A generic type is typically declared by inheriting from this class parameterized with one or more type variables. For example, a generic mapping type might be defined as::

class Mapping(Generic[KT, VT]): def __getitem__(self, key: KT) -> VT: ... # Etc.

This class can then be used as follows::

def lookup_name(mapping: Mapping[KT, VT], key: KT, default: VT) -> VT: try: return mapping[key] except KeyError: return default

CameraPlaneOnlyState( config: vkit.mechanism.distortion.geometric.camera.CameraPlaneOnlyConfig, shape: Tuple[int, int], rng: Union[numpy.random._generator.Generator, NoneType])
292    def __init__(
293        self,
294        config: CameraPlaneOnlyConfig,
295        shape: Tuple[int, int],
296        rng: Optional[RandomGenerator],
297    ):
298        height, width = shape
299        self.initialize_camera_operation(
300            height,
301            width,
302            config.grid_size,
303            CameraPlaneOnlyPoint2dTo3dStrategy(),
304            config.camera_model_config,
305        )
@classmethod
def weights_func(cls, norm_distances: numpy.ndarray, alpha: float):
288    @classmethod
289    def weights_func(cls, norm_distances: np.ndarray, alpha: float):
290        return alpha / (norm_distances + alpha)
class CameraCubicCurveConfig(vkit.mechanism.distortion.interface.DistortionConfig):
315class CameraCubicCurveConfig(DistortionConfig):
316    curve_alpha: float
317    curve_beta: float
318    # Clockwise, [0, 180]
319    curve_direction: float
320    curve_scale: float
321    camera_model_config: CameraModelConfig
322    grid_size: int
CameraCubicCurveConfig( curve_alpha: float, curve_beta: float, curve_direction: float, curve_scale: float, camera_model_config: vkit.mechanism.distortion.geometric.camera.CameraModelConfig, grid_size: int)
2def __init__(self, curve_alpha, curve_beta, curve_direction, curve_scale, camera_model_config, grid_size):
3    self.curve_alpha = curve_alpha
4    self.curve_beta = curve_beta
5    self.curve_direction = curve_direction
6    self.curve_scale = curve_scale
7    self.camera_model_config = camera_model_config
8    self.grid_size = grid_size

Method generated by attrs for class CameraCubicCurveConfig.

class CameraCubicCurvePoint2dTo3dStrategy(Point2dTo3dStrategy):
325class CameraCubicCurvePoint2dTo3dStrategy(Point2dTo3dStrategy):
326
327    def __init__(
328        self,
329        height: int,
330        width: int,
331        curve_alpha: float,
332        curve_beta: float,
333        curve_direction: float,
334        curve_scale: float,
335    ):
336        # Plane area.
337        self.height = height
338        self.width = width
339
340        # Curve endpoint slopes.
341        self.curve_alpha = math.tan(np.clip(curve_alpha, -80, 80) / 180 * np.pi)
342        self.curve_beta = math.tan(np.clip(curve_beta, -80, 80) / 180 * np.pi)
343
344        # Plane projection direction.
345        self.curve_direction = (curve_direction % 180) / 180 * np.pi
346
347        self.rotation_mat = np.asarray(
348            [
349                [
350                    math.cos(self.curve_direction),
351                    math.sin(self.curve_direction),
352                ],
353                [
354                    -math.sin(self.curve_direction),
355                    math.cos(self.curve_direction),
356                ],
357            ],
358            dtype=np.float32,
359        )
360
361        corners = np.asarray(
362            [
363                [0, 0],
364                [self.width - 1, 0],
365                [self.width - 1, self.height - 1],
366                [0, self.height - 1],
367            ],
368            dtype=np.float32,
369        )
370        rotated_corners = np.matmul(self.rotation_mat, corners.transpose())
371        self.plane_projection_min = rotated_corners[0].min()
372        self.plane_projection_range = rotated_corners[0].max() - self.plane_projection_min
373
374        self.curve_scale = curve_scale
375
376    def generate_np_3d_points(self, points: PointTuple) -> np.ndarray:
377        np_2d_points = points.to_smooth_np_array()
378
379        # Project based on theta.
380        plane_projected_points = np.matmul(self.rotation_mat, np_2d_points.transpose())
381        plane_projected_xs = plane_projected_points[0]
382        plane_projected_ratios = (
383            plane_projected_xs - self.plane_projection_min
384        ) / self.plane_projection_range
385
386        # Axis-z.
387        poly = np.asarray([
388            self.curve_alpha + self.curve_beta,
389            -2 * self.curve_alpha - self.curve_beta,
390            self.curve_alpha,
391            0,
392        ])
393        pos_zs = np.polyval(poly, plane_projected_ratios)
394        pos_zs = pos_zs * self.plane_projection_range * self.curve_scale
395        # Shift mean to zero.
396        pos_zs: np.ndarray = pos_zs - pos_zs.mean()
397
398        np_3d_points = np.hstack((np_2d_points, pos_zs.reshape((-1, 1))))
399        return np_3d_points
CameraCubicCurvePoint2dTo3dStrategy( height: int, width: int, curve_alpha: float, curve_beta: float, curve_direction: float, curve_scale: float)
327    def __init__(
328        self,
329        height: int,
330        width: int,
331        curve_alpha: float,
332        curve_beta: float,
333        curve_direction: float,
334        curve_scale: float,
335    ):
336        # Plane area.
337        self.height = height
338        self.width = width
339
340        # Curve endpoint slopes.
341        self.curve_alpha = math.tan(np.clip(curve_alpha, -80, 80) / 180 * np.pi)
342        self.curve_beta = math.tan(np.clip(curve_beta, -80, 80) / 180 * np.pi)
343
344        # Plane projection direction.
345        self.curve_direction = (curve_direction % 180) / 180 * np.pi
346
347        self.rotation_mat = np.asarray(
348            [
349                [
350                    math.cos(self.curve_direction),
351                    math.sin(self.curve_direction),
352                ],
353                [
354                    -math.sin(self.curve_direction),
355                    math.cos(self.curve_direction),
356                ],
357            ],
358            dtype=np.float32,
359        )
360
361        corners = np.asarray(
362            [
363                [0, 0],
364                [self.width - 1, 0],
365                [self.width - 1, self.height - 1],
366                [0, self.height - 1],
367            ],
368            dtype=np.float32,
369        )
370        rotated_corners = np.matmul(self.rotation_mat, corners.transpose())
371        self.plane_projection_min = rotated_corners[0].min()
372        self.plane_projection_range = rotated_corners[0].max() - self.plane_projection_min
373
374        self.curve_scale = curve_scale
def generate_np_3d_points(self, points: vkit.element.point.PointTuple) -> numpy.ndarray:
376    def generate_np_3d_points(self, points: PointTuple) -> np.ndarray:
377        np_2d_points = points.to_smooth_np_array()
378
379        # Project based on theta.
380        plane_projected_points = np.matmul(self.rotation_mat, np_2d_points.transpose())
381        plane_projected_xs = plane_projected_points[0]
382        plane_projected_ratios = (
383            plane_projected_xs - self.plane_projection_min
384        ) / self.plane_projection_range
385
386        # Axis-z.
387        poly = np.asarray([
388            self.curve_alpha + self.curve_beta,
389            -2 * self.curve_alpha - self.curve_beta,
390            self.curve_alpha,
391            0,
392        ])
393        pos_zs = np.polyval(poly, plane_projected_ratios)
394        pos_zs = pos_zs * self.plane_projection_range * self.curve_scale
395        # Shift mean to zero.
396        pos_zs: np.ndarray = pos_zs - pos_zs.mean()
397
398        np_3d_points = np.hstack((np_2d_points, pos_zs.reshape((-1, 1))))
399        return np_3d_points
402class CameraCubicCurveState(DistortionStateCameraOperation[CameraCubicCurveConfig]):
403
404    def __init__(
405        self,
406        config: CameraCubicCurveConfig,
407        shape: Tuple[int, int],
408        rng: Optional[RandomGenerator],
409    ):
410        height, width = shape
411        self.initialize_camera_operation(
412            height,
413            width,
414            config.grid_size,
415            CameraCubicCurvePoint2dTo3dStrategy(
416                height,
417                width,
418                config.curve_alpha,
419                config.curve_beta,
420                config.curve_direction,
421                config.curve_scale,
422            ),
423            config.camera_model_config,
424        )

Abstract base class for generic types.

A generic type is typically declared by inheriting from this class parameterized with one or more type variables. For example, a generic mapping type might be defined as::

class Mapping(Generic[KT, VT]): def __getitem__(self, key: KT) -> VT: ... # Etc.

This class can then be used as follows::

def lookup_name(mapping: Mapping[KT, VT], key: KT, default: VT) -> VT: try: return mapping[key] except KeyError: return default

CameraCubicCurveState( config: vkit.mechanism.distortion.geometric.camera.CameraCubicCurveConfig, shape: Tuple[int, int], rng: Union[numpy.random._generator.Generator, NoneType])
404    def __init__(
405        self,
406        config: CameraCubicCurveConfig,
407        shape: Tuple[int, int],
408        rng: Optional[RandomGenerator],
409    ):
410        height, width = shape
411        self.initialize_camera_operation(
412            height,
413            width,
414            config.grid_size,
415            CameraCubicCurvePoint2dTo3dStrategy(
416                height,
417                width,
418                config.curve_alpha,
419                config.curve_beta,
420                config.curve_direction,
421                config.curve_scale,
422            ),
423            config.camera_model_config,
424        )
class CameraPlaneLinePoint2dTo3dStrategy(Point2dTo3dStrategy):
433class CameraPlaneLinePoint2dTo3dStrategy(Point2dTo3dStrategy):
434
435    def __init__(
436        self,
437        height: int,
438        width: int,
439        point: Tuple[float, float],
440        direction: float,
441        perturb_vec: Tuple[float, float, float],
442        alpha: float,
443        weights_func: Callable[[np.ndarray, float], np.ndarray],
444    ):
445        # Plane area.
446        self.height = height
447        self.width = width
448
449        # Define a line.
450        self.point = np.asarray(point, dtype=np.float32)
451        direction = (direction % 180) / 180 * np.pi
452        cos_theta = np.cos(direction)
453        sin_theta = np.sin(direction)
454        self.line_params_a_b = np.asarray([sin_theta, -cos_theta], dtype=np.float32)
455        self.line_param_c = -self.point[0] * sin_theta + self.point[1] * cos_theta
456
457        # For weight calculationn.
458        self.distance_max = np.sqrt(height**2 + width**2)
459        self.alpha = alpha
460        self.weights_func = weights_func
461
462        # Deformation vector.
463        self.perturb_vec = np.asarray(perturb_vec, dtype=np.float32)
464
465    def generate_np_3d_points(self, points: PointTuple) -> np.ndarray:
466        np_2d_points = points.to_smooth_np_array()
467
468        # Calculate weights.
469        distances = np.abs((np_2d_points * self.line_params_a_b).sum(axis=1) + self.line_param_c)
470        norm_distances = distances / self.distance_max
471        weights = self.weights_func(norm_distances, self.alpha)
472
473        # Add weighted fold vector.
474        np_3d_points = np.hstack(
475            (np_2d_points, np.zeros((np_2d_points.shape[0], 1), dtype=np.float32))
476        )
477        np_perturb = weights.reshape(-1, 1) * self.perturb_vec
478        # Shift mean to zero.
479        np_perturb -= np_perturb.mean(axis=0)
480        np_3d_points += np_perturb
481        return np_3d_points
CameraPlaneLinePoint2dTo3dStrategy( height: int, width: int, point: Tuple[float, float], direction: float, perturb_vec: Tuple[float, float, float], alpha: float, weights_func: Callable[[numpy.ndarray, float], numpy.ndarray])
435    def __init__(
436        self,
437        height: int,
438        width: int,
439        point: Tuple[float, float],
440        direction: float,
441        perturb_vec: Tuple[float, float, float],
442        alpha: float,
443        weights_func: Callable[[np.ndarray, float], np.ndarray],
444    ):
445        # Plane area.
446        self.height = height
447        self.width = width
448
449        # Define a line.
450        self.point = np.asarray(point, dtype=np.float32)
451        direction = (direction % 180) / 180 * np.pi
452        cos_theta = np.cos(direction)
453        sin_theta = np.sin(direction)
454        self.line_params_a_b = np.asarray([sin_theta, -cos_theta], dtype=np.float32)
455        self.line_param_c = -self.point[0] * sin_theta + self.point[1] * cos_theta
456
457        # For weight calculationn.
458        self.distance_max = np.sqrt(height**2 + width**2)
459        self.alpha = alpha
460        self.weights_func = weights_func
461
462        # Deformation vector.
463        self.perturb_vec = np.asarray(perturb_vec, dtype=np.float32)
def generate_np_3d_points(self, points: vkit.element.point.PointTuple) -> numpy.ndarray:
465    def generate_np_3d_points(self, points: PointTuple) -> np.ndarray:
466        np_2d_points = points.to_smooth_np_array()
467
468        # Calculate weights.
469        distances = np.abs((np_2d_points * self.line_params_a_b).sum(axis=1) + self.line_param_c)
470        norm_distances = distances / self.distance_max
471        weights = self.weights_func(norm_distances, self.alpha)
472
473        # Add weighted fold vector.
474        np_3d_points = np.hstack(
475            (np_2d_points, np.zeros((np_2d_points.shape[0], 1), dtype=np.float32))
476        )
477        np_perturb = weights.reshape(-1, 1) * self.perturb_vec
478        # Shift mean to zero.
479        np_perturb -= np_perturb.mean(axis=0)
480        np_3d_points += np_perturb
481        return np_3d_points
class CameraPlaneLineFoldConfig(vkit.mechanism.distortion.interface.DistortionConfig):
485class CameraPlaneLineFoldConfig(DistortionConfig):
486    fold_point: Tuple[float, float]
487    # Clockwise, [0, 180]
488    fold_direction: float
489    fold_perturb_vec: Tuple[float, float, float]
490    fold_alpha: float
491    camera_model_config: CameraModelConfig
492    grid_size: int
CameraPlaneLineFoldConfig( fold_point: Tuple[float, float], fold_direction: float, fold_perturb_vec: Tuple[float, float, float], fold_alpha: float, camera_model_config: vkit.mechanism.distortion.geometric.camera.CameraModelConfig, grid_size: int)
2def __init__(self, fold_point, fold_direction, fold_perturb_vec, fold_alpha, camera_model_config, grid_size):
3    self.fold_point = fold_point
4    self.fold_direction = fold_direction
5    self.fold_perturb_vec = fold_perturb_vec
6    self.fold_alpha = fold_alpha
7    self.camera_model_config = camera_model_config
8    self.grid_size = grid_size

Method generated by attrs for class CameraPlaneLineFoldConfig.

495class CameraPlaneLineFoldState(DistortionStateCameraOperation[CameraPlaneLineFoldConfig]):
496
497    @classmethod
498    def weights_func(cls, norm_distances: np.ndarray, alpha: float):
499        return alpha / (norm_distances + alpha)
500
501    def __init__(
502        self,
503        config: CameraPlaneLineFoldConfig,
504        shape: Tuple[int, int],
505        rng: Optional[RandomGenerator],
506    ):
507        height, width = shape
508        self.initialize_camera_operation(
509            height,
510            width,
511            config.grid_size,
512            CameraPlaneLinePoint2dTo3dStrategy(
513                height=height,
514                width=width,
515                point=config.fold_point,
516                direction=config.fold_direction,
517                perturb_vec=config.fold_perturb_vec,
518                alpha=config.fold_alpha,
519                weights_func=self.weights_func,
520            ),
521            config.camera_model_config,
522        )

Abstract base class for generic types.

A generic type is typically declared by inheriting from this class parameterized with one or more type variables. For example, a generic mapping type might be defined as::

class Mapping(Generic[KT, VT]): def __getitem__(self, key: KT) -> VT: ... # Etc.

This class can then be used as follows::

def lookup_name(mapping: Mapping[KT, VT], key: KT, default: VT) -> VT: try: return mapping[key] except KeyError: return default

CameraPlaneLineFoldState( config: vkit.mechanism.distortion.geometric.camera.CameraPlaneLineFoldConfig, shape: Tuple[int, int], rng: Union[numpy.random._generator.Generator, NoneType])
501    def __init__(
502        self,
503        config: CameraPlaneLineFoldConfig,
504        shape: Tuple[int, int],
505        rng: Optional[RandomGenerator],
506    ):
507        height, width = shape
508        self.initialize_camera_operation(
509            height,
510            width,
511            config.grid_size,
512            CameraPlaneLinePoint2dTo3dStrategy(
513                height=height,
514                width=width,
515                point=config.fold_point,
516                direction=config.fold_direction,
517                perturb_vec=config.fold_perturb_vec,
518                alpha=config.fold_alpha,
519                weights_func=self.weights_func,
520            ),
521            config.camera_model_config,
522        )
@classmethod
def weights_func(cls, norm_distances: numpy.ndarray, alpha: float):
497    @classmethod
498    def weights_func(cls, norm_distances: np.ndarray, alpha: float):
499        return alpha / (norm_distances + alpha)
class CameraPlaneLineCurveConfig(vkit.mechanism.distortion.interface.DistortionConfig):
532class CameraPlaneLineCurveConfig(DistortionConfig):
533    curve_point: Tuple[float, float]
534    # Clockwise, [0, 180]
535    curve_direction: float
536    curve_perturb_vec: Tuple[float, float, float]
537    curve_alpha: float
538    camera_model_config: CameraModelConfig
539    grid_size: int
CameraPlaneLineCurveConfig( curve_point: Tuple[float, float], curve_direction: float, curve_perturb_vec: Tuple[float, float, float], curve_alpha: float, camera_model_config: vkit.mechanism.distortion.geometric.camera.CameraModelConfig, grid_size: int)
2def __init__(self, curve_point, curve_direction, curve_perturb_vec, curve_alpha, camera_model_config, grid_size):
3    self.curve_point = curve_point
4    self.curve_direction = curve_direction
5    self.curve_perturb_vec = curve_perturb_vec
6    self.curve_alpha = curve_alpha
7    self.camera_model_config = camera_model_config
8    self.grid_size = grid_size

Method generated by attrs for class CameraPlaneLineCurveConfig.

542class CameraPlaneLineCurveState(DistortionStateCameraOperation[CameraPlaneLineCurveConfig]):
543
544    @classmethod
545    def weights_func(cls, norm_distances: np.ndarray, alpha: float):
546        return 1 - norm_distances**alpha
547
548    def __init__(
549        self,
550        config: CameraPlaneLineCurveConfig,
551        shape: Tuple[int, int],
552        rng: Optional[RandomGenerator],
553    ):
554        height, width = shape
555        self.initialize_camera_operation(
556            height,
557            width,
558            config.grid_size,
559            CameraPlaneLinePoint2dTo3dStrategy(
560                height=height,
561                width=width,
562                point=config.curve_point,
563                direction=config.curve_direction,
564                perturb_vec=config.curve_perturb_vec,
565                alpha=config.curve_alpha,
566                weights_func=self.weights_func,
567            ),
568            config.camera_model_config,
569        )

Abstract base class for generic types.

A generic type is typically declared by inheriting from this class parameterized with one or more type variables. For example, a generic mapping type might be defined as::

class Mapping(Generic[KT, VT]): def __getitem__(self, key: KT) -> VT: ... # Etc.

This class can then be used as follows::

def lookup_name(mapping: Mapping[KT, VT], key: KT, default: VT) -> VT: try: return mapping[key] except KeyError: return default

CameraPlaneLineCurveState( config: vkit.mechanism.distortion.geometric.camera.CameraPlaneLineCurveConfig, shape: Tuple[int, int], rng: Union[numpy.random._generator.Generator, NoneType])
548    def __init__(
549        self,
550        config: CameraPlaneLineCurveConfig,
551        shape: Tuple[int, int],
552        rng: Optional[RandomGenerator],
553    ):
554        height, width = shape
555        self.initialize_camera_operation(
556            height,
557            width,
558            config.grid_size,
559            CameraPlaneLinePoint2dTo3dStrategy(
560                height=height,
561                width=width,
562                point=config.curve_point,
563                direction=config.curve_direction,
564                perturb_vec=config.curve_perturb_vec,
565                alpha=config.curve_alpha,
566                weights_func=self.weights_func,
567            ),
568            config.camera_model_config,
569        )
@classmethod
def weights_func(cls, norm_distances: numpy.ndarray, alpha: float):
544    @classmethod
545    def weights_func(cls, norm_distances: np.ndarray, alpha: float):
546        return 1 - norm_distances**alpha