-
Notifications
You must be signed in to change notification settings - Fork 13
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Merge pull request #108 from lf-lang/working
Resurrection of the YOLO example
- Loading branch information
Showing
5 changed files
with
204 additions
and
132 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,13 +1,22 @@ | ||
To run the example(s): | ||
# YOLO | ||
|
||
This collection of examples show how to process video data in Python and how to invoke a DNN-based object recognition algorithm on the video frames. | ||
|
||
# Setup | ||
First, go to the PyTorch website and follow the instructions to install PyTorch: https://pytorch.org/get-started/locally/ | ||
|
||
IMPORTANT: If running with NVidia GPU, select the correct CUDA version on the installation page. | ||
|
||
Then, install other libraries and compile the LF file: | ||
|
||
Then, install other libraries: | ||
|
||
python3 -m pip install -r requirements.txt | ||
lfc YOLOv5_Webcam.lf # (or lfc YOLOv5_Webcam_Timer.lf) | ||
|
||
Follow the instructions printed by `lfc` to run the program. | ||
Compile the programs with `lfc`. | ||
|
||
# Examples | ||
|
||
* [Video.lf](Video.lf): Simple video capture and display. Here, the timing of capturing of frames is controled by a Lingua Franca timer whose period is a parameter of the `WebCam` reactor. | ||
* [VideoAsync.lf](VideoAsync.lf): This is similar except that the frame rate is set on the camera and the `WebCamAsync` reactor blocks on input video frames. This puts the camera in charge of the timing of program execution. | ||
* [YOLOv5_Webcam.lf](YOLOv5_Webcam.lf): This example analyzes each video frame using a pre-trained object-recognition DNN and displays an annotated image. This version uses the `WebCamAsync` reactor from `VideoAsync.l`. | ||
* * [YOLOv5_Webcam_Timer.lf](YOLOv5_Webcam_Timer.lf): This example is similar but use `WebCam` from `Video.lf`, so its timing is driven by a timer rather than by the camera. | ||
|
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,77 @@ | ||
/** Video capture and playback example using OpenCV. Please see README.md for instructions. */ | ||
target Python { | ||
single-threaded: true # OpenCV crashes if we use the multithreaded version. | ||
} | ||
|
||
preamble {= | ||
import cv2 | ||
=} | ||
|
||
/** | ||
* Produce a sequence of frames with the specified offset and period. | ||
* @param webcam_id The ID of the camera (default 0). | ||
* @param offset Time until frames start to be captured. | ||
* @param period The period with which frames will be read. | ||
*/ | ||
reactor WebCam(webcam_id=0, offset = 0 s, period = 100 ms) { | ||
output camera_frame | ||
|
||
state stream | ||
timer camera_tick(offset, period) | ||
|
||
reaction(startup) {= | ||
self.stream = cv2.VideoCapture(self.webcam_id, cv2.CAP_ANY) # or CAP_DSHOW | ||
if not self.stream.isOpened(): | ||
sys.stderr.write("Error: Failed to capture from the webcam.\n") | ||
exit(1) | ||
|
||
# Here, LF is in charge of the timing, so do not set the frame rate. | ||
# self.stream.set(cv2.CAP_PROP_FPS, 30) # Set the camera's FPS to 30 | ||
=} | ||
|
||
reaction(camera_tick) -> camera_frame {= | ||
# read() is a combination of grab() and retrieve(). | ||
ret, frame = self.stream.read() | ||
if ret: | ||
camera_frame.set(frame) | ||
else: | ||
print("WARNING: Camera frame missing.") | ||
=} | ||
|
||
reaction(shutdown) {= | ||
self.stream.release() | ||
=} | ||
} | ||
|
||
/** Display video frames. */ | ||
reactor Display { | ||
input frame | ||
state frame_count = 0 | ||
|
||
reaction(startup) {= | ||
print("\n******* Press 'q' in the video window to exit *******\n") | ||
=} | ||
|
||
reaction(frame) {= | ||
self.frame_count += 1 | ||
# Every hundred or so frames, report the frame rate. | ||
if (self.frame_count % 100 == 0): | ||
print(f"** Average frame rate: {self.frame_count * SEC(1) / lf.time.physical_elapsed()} f/s") | ||
|
||
cv2.imshow("frame", frame.value) | ||
# press 'Q' if you want to exit | ||
if cv2.waitKey(1) & 0xFF == ord('q'): | ||
request_stop() | ||
=} | ||
|
||
reaction(shutdown) {= | ||
# Destroy the all windows now | ||
cv2.destroyAllWindows() | ||
=} | ||
} | ||
|
||
main reactor { | ||
webcam = new WebCam() | ||
display = new Display() | ||
webcam.camera_frame -> display.frame | ||
} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,68 @@ | ||
/** | ||
* Video capture and playback example using OpenCV with the camera driving the timing. Please see | ||
* README.md for instructions. | ||
*/ | ||
target Python { | ||
keepalive: true, | ||
single-threaded: true # OpenCV crashes if we use the multithreaded version. | ||
} | ||
|
||
import Display from "Video.lf" | ||
|
||
preamble {= | ||
import cv2 | ||
=} | ||
|
||
/** | ||
* Produce a sequence of frames as they are delivered by the camera. This version uses blocking | ||
* reads to read a video frame and starts the read shortly after completing the previous read. This | ||
* version should only be used in programs where the camera frames drive everything because the | ||
* WebCamAsync will block until it gets a camera frame. | ||
* | ||
* @param webcam_id The ID of the camera (default 0). | ||
* @param offset Time until frames start to be captured. | ||
* @param frames_per_second The number of frames per second to set the camera to. | ||
*/ | ||
reactor WebCamAsync(webcam_id=0, offset = 0 s, frames_per_second=30) { | ||
input trigger | ||
output camera_frame | ||
|
||
timer start(offset) | ||
state stream | ||
|
||
reaction(start) -> camera_frame {= | ||
self.stream = cv2.VideoCapture(self.webcam_id, cv2.CAP_ANY) | ||
if (self.stream.isOpened() is not True): | ||
sys.stderr.write("Error: Failed to open the camera.\n") | ||
exit(1) | ||
|
||
self.stream.set(cv2.CAP_PROP_FPS, self.frames_per_second) | ||
|
||
# Read the first frame. This is a blocking read. | ||
ret, frame = self.stream.read() | ||
if ret is True: | ||
camera_frame.set(frame) | ||
else: | ||
print("Warning, failed to get first frame.") | ||
=} | ||
|
||
reaction(trigger) -> camera_frame {= | ||
# Read a frame. This is a blocking read. | ||
ret, frame = self.stream.read() | ||
if ret is True: | ||
camera_frame.set(frame) | ||
else: | ||
print("Warning, failed to get first frame.") | ||
=} | ||
|
||
reaction(shutdown) {= | ||
self.stream.release() | ||
=} | ||
} | ||
|
||
main reactor { | ||
webcam = new WebCamAsync() | ||
display = new Display() | ||
webcam.camera_frame -> display.frame | ||
webcam.camera_frame ~> webcam.trigger | ||
} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Oops, something went wrong.