From 5c7b4c5d98e5bcae7b9e124fece4e7703d60a98f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Agust=C3=ADn=20Castro?= Date: Fri, 1 Mar 2024 12:40:37 -0300 Subject: [PATCH] Update multi camera documentation --- demos/multi_camera/README.md | 17 ++++++++++------- norfair/common_reference_ui.py | 31 ++++++++++++++++++++----------- norfair/multi_camera.py | 13 ++++++++++++- 3 files changed, 42 insertions(+), 19 deletions(-) diff --git a/demos/multi_camera/README.md b/demos/multi_camera/README.md index 0b534390..883728cd 100644 --- a/demos/multi_camera/README.md +++ b/demos/multi_camera/README.md @@ -15,7 +15,7 @@ This method will allow you to associate trackers from different footage of the s python3 demo.py video1.mp4 video2.mp4 video3.mp4 ``` -A UI will appear to associate points in `video1.mp4` with points in the other videos, to set `video1.mp4` as a common frame of reference. You can save the transformation you have created in the UI by using the `--save-transformation` flag, and you can load it later with the `--load-transformation` flag. +A UI will appear to associate points in `video1.mp4` with points in the other videos, to set `video1.mp4` as a common frame of reference. If the videos move, you should also use the `--use-motion-estimator-footage` flag to consider camera movement. @@ -27,7 +27,7 @@ This method will allow you to associate trackers from different footage of the s python3 demo.py video1.mp4 video2.mp4 video3.mp4 --reference path_to_reference_file ``` -As before, you will have to use the UI, or if you have already done that and saved the transformation with the `--save-transformation` flag, you can load that same transformation with the `--load-transformation` flag. +As before, you will have to use the UI. If the videos where you are tracking have camera movement, you should also use the `--use-motion-estimator-footage` flag to consider camera movement in those videos. @@ -37,18 +37,17 @@ If you are using a video for the reference file, and the camera moves in the ref For additional settings, you may display the instructions using `python demo.py --help`. - - - ## UI usage -The UI has the puropose of annotating points that match in the reference and the footage, to estimate a transformation. +The UI has the puropose of annotating points that match in the reference and the footage (either images or videos), to estimate a transformation. To add a point, just click a pair of points (one from the footage window, and another from the reference window) and select `"Add"`. To remove a point, just select the corresponding point at the bottom left corner, and select `"Remove"`. You can also ignore points, by clicking them and selecting `"Ignore"`. The transformation will not used ingored points. To 'uningnore' points that have been previously ignored, just click them and select `"Unignore"`. +To resize the footage or the reference image, you can use the `"+"` and `"-"` buttons in the `'Resize footage'` and `'Resize reference'` sections of the Menu. + If either footage or reference are videos, you can jump to future frames to pick points that match. For example, to jump 215 frames in the footage, just write that number next to `'Frames to skip (footage)'`, and select `"Skip frames"`. @@ -58,4 +57,8 @@ Once a transformation has been estimated (you will know that if the `"Finished"` To Test your transformation, Select the `"Test"` mode, and pick a point in either the reference or the footage, and see the associated point in the other window. You can go back to the `"Annotate"` mode keep adding more associated points until you are satisfied with the estimated transformation. -Once you are happy with the transformation, just click on `"Finish"`. \ No newline at end of file +You can also save the state (points and transformation you have) to a `.pkl` file using the `"Save"` button, so that you can later load that state from the UI with the `"Load"` button. + +You can swap the reference points with the footage points (inverting the transformation) with the `"Invert"` button. This is particularly useful if you have previously saved a state in which the reference was the current footage, and the footage was the current reference. + +Once you are happy with the transformation, just click on `"Finished"`. \ No newline at end of file diff --git a/norfair/common_reference_ui.py b/norfair/common_reference_ui.py index d0dbbebd..ce3d8a53 100644 --- a/norfair/common_reference_ui.py +++ b/norfair/common_reference_ui.py @@ -38,22 +38,31 @@ def set_reference( UI usage: - Creates a UI to annotate points that match in reference and footage, and estimate the transformation. - To add a point, just click a pair of points (one from the footage window, and another from the reference window) and select "Add". - To remove a point, just select the corresponding point at the bottom left corner, and select "Remove". - You can also ignore points, by clicking them and selecting "Ignore". The transformation will not used ingored points. - To 'uningnore' points that have been previously ignored, just click them and select "Unignore". + The UI has the puropose of annotating points that match in the reference and the footage (either images or videos), to estimate a transformation. + + To add a point, just click a pair of points (one from the footage window, and another from the reference window) and select `"Add"`. + To remove a point, just select the corresponding point at the bottom left corner, and select `"Remove"`. + You can also ignore points, by clicking them and selecting `"Ignore"`. The transformation will not used ingored points. + To 'uningnore' points that have been previously ignored, just click them and select `"Unignore"`. + + To resize the footage or the reference image, you can use the `"+"` and `"-"` buttons in the `'Resize footage'` and `'Resize reference'` sections of the Menu. If either footage or reference are videos, you can jump to future frames to pick points that match. - For example, to jump 215 frames in the footage, just write an integer number of frames to jump next to 'Frames to skip (footage)', and select "Skip frames". - A motion estimator can be used to relate the coordinates of the current frame you see (in either footage or reference) to coordinates in its corresponding first frame. + For example, to jump 215 frames in the footage, just write that number next to `'Frames to skip (footage)'`, and select `"Skip frames"`. + You can go back to the first frame of the video (in either footage or reference) by selecting "Reset video". - Once a transformation has been estimated, you can test it: - To Test your transformation, Select the 'Test' mode, and pick a point in either the reference or the footage, and see the associated point in the other window. - You can keep adding more associated points until you are satisfied with the estimated transformation. + Once a transformation has been estimated (you will know that if the `"Finished"` button is green), you can test it: + To Test your transformation, Select the `"Test"` mode, and pick a point in either the reference or the footage, and see the associated point in the other window. + You can go back to the `"Annotate"` mode keep adding more associated points until you are satisfied with the estimated transformation. + + You can also save the state (points and transformation you have) to a `.pkl` file using the `"Save"` button, so that you can later load that state from the UI with the `"Load"` button. + + You can swap the reference points with the footage points (inverting the transformation) with the `"Invert"` button. This is particularly useful if you have previously saved a state in which the reference was the current footage, and the footage was the current reference. + + Once you are happy with the transformation, just click on `"Finished"`. - Argument: + Argumentsco: - reference: str Path to the reference image or video diff --git a/norfair/multi_camera.py b/norfair/multi_camera.py index ebc7e956..f5b2d4e8 100644 --- a/norfair/multi_camera.py +++ b/norfair/multi_camera.py @@ -42,6 +42,7 @@ def __init__(self, tracked_object=None, id=None): Class that relates trackers from different videos Attributes: + - Cluster.id: number identifying the cluster - Cluster.tracked_objects: dict of the form {str: TrackedObject} where str will indicate the name of the camera/video - Cluster.tracked_ids: list of tuples of the form (str, int) @@ -90,7 +91,9 @@ def __init__( How far two clusters (group of trackers) need to be to not join them. - join_distance_by: str. - String indicating how we 'merge' distance between trackers to construct a distance between clusters. + String indicating how we combine distance between trackers to construct a distance between clusters. + Each cluster will have several TrackedObject instances, so in our approach we can either take + the maximum distance between their TrackedObject instances, or the average distance. Can be either 'max' or 'mean'. - max_votes_grow: int. @@ -104,6 +107,14 @@ def __init__( - memory: int. Merge the information of the current update with past updates to generate clusters and vote (to grow, split or neither). This parameter indicates how far into the past we should look. + + - initialization_delay: int. + When a new cluster is created, we wait a few frames before returning it in the update method, so that new clusters + have the chance to be merged with other existing clusters. + + - filter_by_objects_age: bool. + When we wait to return a cluster, we can either wait by considering the age of the cluster, or the + age of the TrackedObject instances in the cluster. """ if max_votes_grow < 1: raise ValueError("max_votes_grow parameter needs to be >= 1")