diff --git a/demos/camera_motion/src/demo.py b/demos/camera_motion/src/demo.py index cafef3ca..f0a0c791 100644 --- a/demos/camera_motion/src/demo.py +++ b/demos/camera_motion/src/demo.py @@ -142,7 +142,7 @@ def run(): "--fixed-camera-scale", type=float, default=0, - help="Scale of the fixed camera, set to 0 to disable. Note that this only works for translation", + help="Scale of the fixed camera, set to 0 to disable.", ) parser.add_argument( "--draw-absolute-grid", diff --git a/norfair/drawing/fixed_camera.py b/norfair/drawing/fixed_camera.py index c2e5b1bb..3e74d8bd 100644 --- a/norfair/drawing/fixed_camera.py +++ b/norfair/drawing/fixed_camera.py @@ -100,6 +100,7 @@ def adjust_frame( np.array(self._background.shape[:2]) // 2 - np.array(frame.shape[:2]) // 2 ) + self.top_left = self.top_left[::-1] else: self._background = (self._background * self._attenuation_factor).astype( frame.dtype @@ -109,7 +110,7 @@ def adjust_frame( # top_left_translation o rel_to_abs if isinstance(coord_transformation, HomographyTransformation): top_left_translation = np.array( - [[1, 0, self.top_left[1]], [0, 1, self.top_left[0]], [0, 0, 1]] + [[1, 0, self.top_left[0]], [0, 1, self.top_left[1]], [0, 0, 1]] ) full_transformation = ( top_left_translation @ coord_transformation.inverse_homography_matrix @@ -126,8 +127,8 @@ def adjust_frame( full_transformation = np.array( [ - [1, 0, self.top_left[1] - coord_transformation.movement_vector[0]], - [0, 1, self.top_left[0] - coord_transformation.movement_vector[1]], + [1, 0, self.top_left[0] - coord_transformation.movement_vector[0]], + [0, 1, self.top_left[1] - coord_transformation.movement_vector[1]], ] ) background_with_current_frame = cv2.warpAffine( diff --git a/norfair/drawing/path.py b/norfair/drawing/path.py index c9e80410..0b607062 100644 --- a/norfair/drawing/path.py +++ b/norfair/drawing/path.py @@ -249,6 +249,7 @@ def draw(self, frame, tracked_objects, coord_transform=None): np.array(self._background.shape[:2]) // 2 - np.array(frame.shape[:2]) // 2 ) + self.top_left = self.top_left[::-1] else: self._background = (self._background * self._attenuation_factor).astype( frame.dtype