diff --git a/.gitignore b/.gitignore old mode 100644 new mode 100755 diff --git a/Q01_footfall_counter/exe/coco-labels-2014_2017.txt b/Q01_footfall_counter/exe_v2h/coco-labels-2014_2017.txt old mode 100644 new mode 100755 similarity index 100% rename from Q01_footfall_counter/exe/coco-labels-2014_2017.txt rename to Q01_footfall_counter/exe_v2h/coco-labels-2014_2017.txt diff --git a/Q01_footfall_counter/exe_v2h/config.ini b/Q01_footfall_counter/exe_v2h/config.ini new file mode 100755 index 0000000..6bc02fb --- /dev/null +++ b/Q01_footfall_counter/exe_v2h/config.ini @@ -0,0 +1,24 @@ +; Configuration File + +[line] +x1=96; +y1=444; +x2=296; +y2=387; +[region] +n=4; +x1=270; +y1=442; +x2=400; +y2=390; +x3=480; +y3=410; +x4=364; +y4=476; +[tracking] +kmin=4; +conf=0.1; +objects=person; +[display] +display_text=Human Count; +region_display_text=People in region; \ No newline at end of file diff --git a/Q01_footfall_counter/exe_v2h/d-yolov3/deploy.json b/Q01_footfall_counter/exe_v2h/d-yolov3/deploy.json new file mode 100755 index 0000000..62b6854 --- /dev/null +++ b/Q01_footfall_counter/exe_v2h/d-yolov3/deploy.json @@ -0,0 +1,76 @@ +{ + "nodes": [ + { + "op": "null", + "name": "input1", + "inputs": [] + }, + { + "op": "tvm_op", + "name": "tvmgen_default_tvmgen_default_tvmgen_default_mera_drp_main_0", + "attrs": { + "flatten_data": "0", + "func_name": "tvmgen_default_tvmgen_default_tvmgen_default_mera_drp_main_0", + "hash": "036f4701453dc3f3", + "num_outputs": "3", + "num_inputs": "1", + "global_symbol": "tvmgen_default_tvmgen_default_tvmgen_default_mera_drp_main_0", + "Compiler": "mera_drp" + }, + "inputs": [ + [ + 0, + 0, + 0 + ] + ] + } + ], + "arg_nodes": [0], + "heads": [ + [ + 1, + 0, + 0 + ], + [ + 1, + 1, + 0 + ], + [ + 1, + 2, + 0 + ] + ], + "attrs": { + "dltype": [ + "list_str", + [ + "float32", + "float16", + "float16", + "float16" + ] + ], + "device_index": [ + "list_int", + [1, 1, 1, 1] + ], + "storage_id": [ + "list_int", + [0, 1, 2, 3] + ], + "shape": [ + "list_shape", + [ + [1, 3, 416, 416], + [1, 255, 13, 13], + [1, 255, 26, 26], + [1, 255, 52, 52] + ] + ] + }, + "node_row_ptr": [0, 1, 4] +} \ No newline at end of file diff --git a/R01_object_detection/exe/yolov3_onnx/deploy.params b/Q01_footfall_counter/exe_v2h/d-yolov3/deploy.params similarity index 100% rename from R01_object_detection/exe/yolov3_onnx/deploy.params rename to Q01_footfall_counter/exe_v2h/d-yolov3/deploy.params diff --git a/Q01_footfall_counter/exe_v2h/object_tracker b/Q01_footfall_counter/exe_v2h/object_tracker new file mode 100755 index 0000000..7361619 Binary files /dev/null and b/Q01_footfall_counter/exe_v2h/object_tracker differ diff --git a/R01_object_detection/exe/coco-labels-2014_2017.txt b/Q01_footfall_counter/exe_v2l/coco-labels-2014_2017.txt old mode 100755 new mode 100644 similarity index 100% rename from R01_object_detection/exe/coco-labels-2014_2017.txt rename to Q01_footfall_counter/exe_v2l/coco-labels-2014_2017.txt diff --git a/Q01_footfall_counter/exe/config.ini b/Q01_footfall_counter/exe_v2l/config.ini similarity index 100% rename from Q01_footfall_counter/exe/config.ini rename to Q01_footfall_counter/exe_v2l/config.ini diff --git a/Q01_footfall_counter/exe/object_tracker b/Q01_footfall_counter/exe_v2l/object_tracker similarity index 100% rename from Q01_footfall_counter/exe/object_tracker rename to Q01_footfall_counter/exe_v2l/object_tracker diff --git a/Q01_footfall_counter/exe/tinyyolov3_onnx/deploy.json b/Q01_footfall_counter/exe_v2l/tinyyolov3_onnx/deploy.json similarity index 100% rename from Q01_footfall_counter/exe/tinyyolov3_onnx/deploy.json rename to Q01_footfall_counter/exe_v2l/tinyyolov3_onnx/deploy.json diff --git a/Q01_footfall_counter/exe/tinyyolov3_onnx/deploy.params b/Q01_footfall_counter/exe_v2l/tinyyolov3_onnx/deploy.params similarity index 100% rename from Q01_footfall_counter/exe/tinyyolov3_onnx/deploy.params rename to Q01_footfall_counter/exe_v2l/tinyyolov3_onnx/deploy.params diff --git a/Q01_footfall_counter/exe/tinyyolov3_onnx/deploy.so b/Q01_footfall_counter/exe_v2l/tinyyolov3_onnx/deploy.so similarity index 100% rename from Q01_footfall_counter/exe/tinyyolov3_onnx/deploy.so rename to Q01_footfall_counter/exe_v2l/tinyyolov3_onnx/deploy.so diff --git a/Q01_footfall_counter/exe/tinyyolov3_onnx/preprocess/aimac_desc.bin b/Q01_footfall_counter/exe_v2l/tinyyolov3_onnx/preprocess/aimac_desc.bin similarity index 100% rename from Q01_footfall_counter/exe/tinyyolov3_onnx/preprocess/aimac_desc.bin rename to Q01_footfall_counter/exe_v2l/tinyyolov3_onnx/preprocess/aimac_desc.bin diff --git a/Q01_footfall_counter/exe/tinyyolov3_onnx/preprocess/drp_desc.bin b/Q01_footfall_counter/exe_v2l/tinyyolov3_onnx/preprocess/drp_desc.bin similarity index 100% rename from Q01_footfall_counter/exe/tinyyolov3_onnx/preprocess/drp_desc.bin rename to Q01_footfall_counter/exe_v2l/tinyyolov3_onnx/preprocess/drp_desc.bin diff --git a/Q01_footfall_counter/exe/tinyyolov3_onnx/preprocess/drp_param.bin b/Q01_footfall_counter/exe_v2l/tinyyolov3_onnx/preprocess/drp_param.bin similarity index 100% rename from Q01_footfall_counter/exe/tinyyolov3_onnx/preprocess/drp_param.bin rename to Q01_footfall_counter/exe_v2l/tinyyolov3_onnx/preprocess/drp_param.bin diff --git a/Q01_footfall_counter/exe/tinyyolov3_onnx/preprocess/drp_param_info.txt b/Q01_footfall_counter/exe_v2l/tinyyolov3_onnx/preprocess/drp_param_info.txt similarity index 100% rename from Q01_footfall_counter/exe/tinyyolov3_onnx/preprocess/drp_param_info.txt rename to Q01_footfall_counter/exe_v2l/tinyyolov3_onnx/preprocess/drp_param_info.txt diff --git a/Q01_footfall_counter/exe/tinyyolov3_onnx/preprocess/pp_addrmap_intm.txt b/Q01_footfall_counter/exe_v2l/tinyyolov3_onnx/preprocess/pp_addrmap_intm.txt similarity index 100% rename from Q01_footfall_counter/exe/tinyyolov3_onnx/preprocess/pp_addrmap_intm.txt rename to Q01_footfall_counter/exe_v2l/tinyyolov3_onnx/preprocess/pp_addrmap_intm.txt diff --git a/Q01_footfall_counter/exe/tinyyolov3_onnx/preprocess/pp_drpcfg.mem b/Q01_footfall_counter/exe_v2l/tinyyolov3_onnx/preprocess/pp_drpcfg.mem similarity index 100% rename from Q01_footfall_counter/exe/tinyyolov3_onnx/preprocess/pp_drpcfg.mem rename to Q01_footfall_counter/exe_v2l/tinyyolov3_onnx/preprocess/pp_drpcfg.mem diff --git a/Q01_footfall_counter/exe/tinyyolov3_onnx/preprocess/pp_weight.dat b/Q01_footfall_counter/exe_v2l/tinyyolov3_onnx/preprocess/pp_weight.dat similarity index 100% rename from Q01_footfall_counter/exe/tinyyolov3_onnx/preprocess/pp_weight.dat rename to Q01_footfall_counter/exe_v2l/tinyyolov3_onnx/preprocess/pp_weight.dat diff --git a/Q01_footfall_counter/images/ObjectTracking.gif b/Q01_footfall_counter/images/ObjectTracking.gif old mode 100644 new mode 100755 diff --git a/Q01_footfall_counter/images/Q01_image_V2H.png b/Q01_footfall_counter/images/Q01_image_V2H.png new file mode 100755 index 0000000..e849d10 Binary files /dev/null and b/Q01_footfall_counter/images/Q01_image_V2H.png differ diff --git a/Q01_footfall_counter/images/hw_conf_v2h.png b/Q01_footfall_counter/images/hw_conf_v2h.png new file mode 100755 index 0000000..7b3d546 Binary files /dev/null and b/Q01_footfall_counter/images/hw_conf_v2h.png differ diff --git a/Q01_footfall_counter/images/hw_conf_v2l.png b/Q01_footfall_counter/images/hw_conf_v2l.png new file mode 100755 index 0000000..0005ea3 Binary files /dev/null and b/Q01_footfall_counter/images/hw_conf_v2l.png differ diff --git a/Q01_footfall_counter/images/obj_trk_out.JPG b/Q01_footfall_counter/images/obj_trk_out.JPG old mode 100644 new mode 100755 diff --git a/Q01_footfall_counter/readme.md b/Q01_footfall_counter/readme.md old mode 100644 new mode 100755 index bc97228..5da0e8f --- a/Q01_footfall_counter/readme.md +++ b/Q01_footfall_counter/readme.md @@ -1,234 +1,409 @@ -# Footfall Counter - -## Application: Overview -This application is designed to track and count the number of people entering a designated boundary line, -while simultaneously reducing the count for those exiting the boundary line. - -Additionally, the application -has the ability to measure the time spent by a particular person within a specified region of interest. - -This software could be useful in a variety of settings, such as retail stores, museums, and events, -where managers need to monitor and analyze traffic flow and customer behavior. - -The AI model used for the sample application is [TinyYoloV3](https://arxiv.org/pdf/1804.02767.pdf). - -**NOTE:** This sample application can be used to track different objects, like animal, car, etc. The list of objects that can be tracked are provided in [coco labels txt](./exe/coco-labels-2014_2017.txt) file. - -#### Demo - - - -## Application: Requirements - -#### Hardware Requirements -- RZ/V2L Evaluation Board Kit -- USB camera -- USB Keyboard -- USB Hub -- HDMI monitor with resolution 1280x720 -- micro HDMI to HDMI cable -- SD Card (for file system) ->**Note:** All external devices will be attached to the board and does not require any driver installation (Plug n Play Type) -#### Software Requirements -- Ubuntu 20.04 -- OpenCV 4.x -- C++11 or higher -- git 2.41 (or above) -- [Boost C++ libraries](https://boostorg.jfrog.io/artifactory/main/release/1.81.0/source) -- [Eigen linear algebra library](https://eigen.tuxfamily.org/index.php?title=Main_Page) - - -## Application: Build Stage - ->**Note:** User can skip to the next stage (deploy) if they don't want to build the application. All pre-built binaries are provided. - -**Note:** This project expects the user to have completed [Getting Started Guide](https://renesas-rz.github.io/rzv_ai_sdk/latest/getting_started) provided by Renesas. - -After completion of the guide, the user is expected of following things. -- The Board Set Up and booted. -- SD Card Prepared -- The docker image amd container for `rzv2l_ai_sdk_image` running on host machine. - ->**Note:** Docker container is required for building the sample application. By default the Renesas will provide the container named as `rzv2l_ai_sdk_container`. Please use the docker container name as assigned by the user when building the container. - -#### Application File Generation -1. Copy the repository from the GitHub to the desired location. - 1. It is recommended to copy/clone the repository on the `data` folder which is mounted on the `rzv2l_ai_sdk_container` docker container. - ```sh - cd - git clone https://github.com/renesas-rz/rzv_ai_sdk.git - ``` - > Note 1: Please verify the git repository url if error occurs - - > Note 2: This command will download whole repository, which include all other applications, if you have already downloaded the repository of the same version, you may not need to run this command. - -2. Run(or start) the docker container and open the bash terminal on the container. - -> Note: All the build steps/commands listed below are executed on the docker container bash terminal. - -3. Assign path to the `data` directory mounted on the `rzv2l_ai_sdk_container` docker container - -```sh -export PROJECT_PATH=/drp_ai_tvm/data/ -``` -4. Go to the `src` directory of the application - -```sh -cd ${PROJECT_PATH}/rzv_ai_sdk/Q01_footfall_counter/src/ -``` - -5. Download the `boost` tar file -```sh -wget https://boostorg.jfrog.io/artifactory/main/release/1.81.0/source/boost_1_81_0.tar.bz2 -``` ->**Note:** It is expected that the docker container is able to connect to the internet. If that's not the case, User can use the same command on the host PC to download the file. Make sure you are on the `src` folder present on the mounted `data` directory. - -6. Extract tar file to the current location - -```sh -tar -xvf boost_1_81_0.tar.bz2 -``` - -7. Copy the boost files to the `include` folder -```sh -mkdir -p include -cp -r boost_1_81_0/boost include/ -``` - -8. Remove boost files [Optional] - -```sh -rm boost_1_81_0.tar.bz2 -rm -rf boost_1_81_0 -``` - -9. Build the application on docker environment by following the steps below - -```sh -mkdir -p build && cd build -``` -```sh -cmake -DCMAKE_TOOLCHAIN_FILE=./toolchain/runtime.cmake .. -``` -```sh -make -j$(nproc) -``` -The following application file would be generated in the `src/build` directory -- object_tracker - - -## Application: Deploy Stage - -For the ease of deployment all the deployable files and folders for RZ/V2L are provided on the [exe](./exe) folder. - -|File | Details | -|:---|:---| -|tinyyolov3_onnx | Model object files for deployment. | -|coco-labels-2014_2017.txt | Label list for Object Detection. | -|config.ini | user input config for line, region and object. | -|object_tracker | application file. | - - -Follow the steps mentioned below to deploy the project on RZ/V2L Board. -* At the `/home/root/tvm` directory of the rootfs (on SD Card) for RZ/V2L board. - * Copy the files present in [exe](./exe) directory, which are listed in the table above. - * Copy the generated `object_tracker` application file if the application file is built at [build stage](#application-build-stage) - -* Check if libtvm_runtime.so is there on `/usr/lib64` directory of the rootfs (SD card) RZ/V2L board. - - -Folder structure in the rootfs (SD Card) would look like: -```sh -├── usr/ -│ └── lib64/ -│ └── libtvm_runtime.so -└── home/ - └── root/ - └── tvm/ - ├── tinyyolov3_onnx/ - │ ├── preprocess/ - │ ├── deploy.json - │ ├── deploy.params - │ └── deploy.so - ├── coco-labels-2014_2017.txt - ├── config.ini - └── object_tracker - -``` ->**Note:** The directory name could be anything instead of `tvm`. If you copy the whole `exe` folder on the board. You are not required to rename it `tvm`. - -## Application: Run Stage - -* For running the application, run the commands as shown below on the RZ/V2L Evaluation Board console. - * Go to the `/home/root/tvm` directory of the rootfs - ```sh - cd /home/root/tvm - ``` - * Change the values in `config.ini` as per the requirements. Detailed explanation of the `config.ini` file is given at below section. - ```sh - vi config.ini - ``` - * Run the application in the terminal of the RZ/V2L evaluation board kit using the command - ```sh - ./object_tracker - ``` -* The expected output will be the same as shown in the demo video - -#### Application: Runtime output details - - -The runtime application will look something like this - - - -- The application will track the person if the person crossed the `red line` and increment the `human count` and decrement it when the person crosses again. - - As per current logic, left to right crossing will increment the human count, and right to left crossing will decrement the count. - - Users need to arrange the line according to the requirement. -- The application will also keep track of the person in the available region `green box` and increment the `person in region` count. It will also decrement the count if the person leaves the area. -- Frame Per Sec (FPS) is shown on top right corner. -- Each person tracked is given a unique `id`. - - The `time` parameter of the tracked person indicates the time spent on the desired location. This incremented at regular interval. - -#### Application: Termination -- Application can be terminated by long pressing `esc` key (around 10 seconds) on the keyboard connected to the board. -- Alternatively, User can force close the application using `CTRL+c` on the board console. - -## Application: Configuration - -###### Explanation of the `config.ini` file - -The file contains three sections: [**line**], [**region**], and [**tracking**]. - ->**Note:** The x,y coordinates are ranged from [0,0] to [img_height, img_width]. The img_height and img_width depends on the camera capture resolution. This sample application is tested on 640x480 image. - -- The [**line**] section contains four key-value pairs that define the coordinates of the boundary line to be drawn.\ -The x1, y1, x2, and y2 values correspond to the x and y coordinates of the boundary line's -starting and ending points. - -- The [**region**] section contains five key-value pairs, which defines the Region of Interest.\ -The n value indicates the number of points that define a region, followed by x and y coordinates -for each point.\ -The region is defined by connecting these points in the order they are listed. - -- The [**tracking**] section contains two key-value pairs.\ -The conf value is a confidence threshold used for object tracking, and the kmin value is the minimum number of key-points required for tracking. - ->**Note:** The object tracked here is of class "Person", it can be changed to other classes present on the coco labels. - - -To modify the configuration settings, edit the values in this file using VI Editor, from the RZ/V2L Board. - -###### AI inference time -The AI inference time is 100-120 msec. - -### Time Tracking Backend Integration - ->**Note:** As per recent development status, the application have been tested for 100 numbers of people on the certain region without any error occurring, so if the use cases are expected for the number of people on the certain region to be less than 100, there is no need for code modification. - -- Currently for storing the person id and the time spent on the region of interest [ROI] is stored on the board memory as key-value pair. As board memory consumption is limited, this procedure could be moved to the Database/Cache which could be hosted on the cloud or host machine. - -- Customers can install SQL Database/Cache on the cloud or host-machine. - -- After each sec the application can call the API for inserting the data on the DB, if not present. and show the time taken for the persons in the ROI - -- When the person goes out of the ROI, the application can call the API to remove the person data from the DB and cache. +# Footfall Counter + +## Application: Overview +This application is designed to track and count the number of people entering a designated boundary line, +while simultaneously reducing the count for those exiting the boundary line. + +Additionally, the application +has the ability to measure the time spent by a particular person within a specified region of interest. + +This software could be useful in a variety of settings, such as retail stores, museums, and events, +where managers need to monitor and analyze traffic flow and customer behavior. + +The AI model used for the sample application is [YOLOV3/Tiny YOLOv3](https://arxiv.org/pdf/1804.02767.pdf). + +**NOTE:** This sample application can be used to track different objects, like animal, car, etc. The list of objects that can be tracked are provided in [coco labels txt](./exe/coco-labels-2014_2017.txt) file. + +It has following mode of running. + +- Using USB Camera as input + +### Supported Product +- RZ/V2L Evaluation Board Kit (RZ/V2L EVK) +- RZ/V2H Evaluation Board Kit (RZ/V2H EVK) + +### Demo +Following is the demo for RZ/V2L EVK. + + +## Application: Requirements + +### Hardware Requirements + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ForEquipmentDetails
RZ/V2LRZ/V2L EVKEvaluation Board Kit for RZ/V2L.
Includes followings. +
    +
  • + MIPI Camera Module(Google Coral Camera) +
  • +
  • MicroUSB to Serial Cable for serial communication.
  • +
+
AC AdapterUSB Power Delivery adapter for the board power supply.
MicroHDMI CableUsed to connect the HDMI Monitor and the board.
+ RZ/V2L EVK has microHDMI port.
RZ/V2HRZ/V2H EVKEvaluation Board Kit for RZ/V2H.
AC AdapterUSB Power Delivery adapter for the board power supply.
+ 100W is required.
HDMI CableUsed to connect the HDMI Monitor and the board.
+ RZ/V2H EVK has HDMI port.
USB CameraUsed as a camera input source.
CommonUSB Cable Type-CConnect AC adapter and the board.
HDMI MonitorUsed to display the graphics of the board.
microSD cardUsed as the filesystem.
+ Must have over 4GB capacity of blank space.
+ Operating Environment: Transcend UHS-I microSD 300S 16GB
Linux PCUsed to build application and setup microSD card.
+ Operating Environment: Ubuntu 20.04
SD card readerUsed for setting up microSD card.
USB HubUsed to connect USB Keyboard and USB Mouse to the board.
USB KeyboardUsed to type strings on the terminal of board.
USB MouseUsed to operate the mouse on the screen of board.
+ +>**Note:** All external devices will be attached to the board and does not require any driver installation (Plug n Play Type) + +Connect the hardware as shown below. + +|RZ/V2L EVK | RZ/V2H EVK | +|:---|:---| +|| | + +>**Note 1:** When using the keyboard connected to RZ/V Evaluation Board, the keyboard layout and language are fixed to English. +**Note 2:** For RZ/V2H EVK, there are USB 2.0 and USB 3.0 ports. +USB camera needs to be connected to appropriate port based on its requirement. + +## Application: Build Stage + +>**Note:** User can skip to the [next stage (deploy)](#application-deploy-stage) if they do not want to build the application. +All pre-built binaries are provided. + +### Prerequisites +This section expects the user to have completed Step 5 of [Getting Started Guide](https://renesas-rz.github.io/rzv_ai_sdk/latest/getting_started.html) provided by Renesas. + +After completion of the guide, the user is expected of following things. +- AI SDK setup is done. +- Following docker container is running on the host machine. + |Board | Docker container | + |:---|:---| + |RZ/V2L EVK|`rzv2l_ai_sdk_container` | + |RZ/V2H EVK|`rzv2h_ai_sdk_container` | + + >**Note:** Docker environment is required for building the sample application. + + +### Application File Generation +1. On your host machine, copy the repository from the GitHub to the desired location. + 1. It is recommended to copy/clone the repository on the `data` folder, which is mounted on the Docker container. + ```sh + cd /data + git clone https://github.com/renesas-rz/rzv_ai_sdk.git + ``` + >Note: This command will download the whole repository, which include all other applications. + If you have already downloaded the repository of the same version, you may not need to run this command. + +2. Run (or start) the docker container and open the bash terminal on the container. +E.g., for RZ/V2L, use the `rzv2l_ai_sdk_container` as the name of container created from `rzv2l_ai_sdk_image` docker image. + > Note that all the build steps/commands listed below are executed on the docker container bash terminal. + +3. Set your clone directory to the environment variable. + ```sh + export PROJECT_PATH=/drp-ai_tvm/data/rzv_ai_sdk + ``` +3. Go to the application source code directory. + ```sh + cd ${PROJECT_PATH}/Q01_footfall_counter/ + ``` + |Board | `SRC_DIR` | + |:---|:---| + |RZ/V2L EVK|`src` | + |RZ/V2H EVK|`src_v2h` | + +4. **[For RZ/V2L only]** Prepare the `boost` library. + 1. Download the `boost` tar file + ```sh + wget https://boostorg.jfrog.io/artifactory/main/release/1.81.0/source/boost_1_81_0.tar.bz2 + ``` + >**Note:** It is expected that the docker container is able to connect to the internet. If that's not the case, User can use the same command on the host PC to download the file. Make sure you are on the `src` folder present on the mounted `data` directory. + + 2. Extract tar file to the current location + + ```sh + tar -xvf boost_1_81_0.tar.bz2 + ``` + + 3. Copy the boost files to the `include` folder + ```sh + mkdir -p include + cp -r boost_1_81_0/boost include/ + ``` + + 4. Remove boost files [Optional] + + ```sh + rm boost_1_81_0.tar.bz2 + rm -rf boost_1_81_0 + ``` + +4. Create and move to the `build` directory. + ```sh + mkdir -p build && cd build + `````` +5. Build the application by following the commands below. + ```sh + cmake -DCMAKE_TOOLCHAIN_FILE=./toolchain/runtime.cmake .. + make -j$(nproc) + ``` +6. The following application file would be generated in the `${PROJECT_PATH}/Q01_footfall_counter//build` directory + - object_tracker + + +## Application: Deploy Stage +### Prerequisites +This section expects the user to have completed Step 7-1 of [Getting Started Guide](https://renesas-rz.github.io/rzv_ai_sdk/latest/getting_started.html#step7) provided by Renesas. + +After completion of the guide, the user is expected of following things. +- microSD card setup is done. + +### File Configuration +For ease of deployment, all deployable files and folders are provided in the following folders. +|Board | `EXE_DIR` | +|:---|:---| +|RZ/V2L EVK|[exe_v2l](./exe_v2l) | +|RZ/V2H EVK|[exe_v2h](./exe_v2h) | + +Each folder contains following items. + +|File | Details | +|:---|:---| +|tinyyolov3_onnx | **[RZ/V2L only]** Model object files for deployment. | +|d-yolov3 | **[RZ/V2H only]** Model object files for deployment. | +|coco-labels-2014_2017.txt | Label list for Object Detection. | +|config.ini | user input config for line, region and object. | +|object_tracker | application file. | + + + +### Instruction +1. **[For RZ/V2H only]** Run following commands to download the necessary file. + ```sh + cd /data/Q01_footfall_counter/exe_v2h/d-yolov3 + wget https://github.com/renesas-rz/rzv_ai_sdk/releases/tag/v3.00/Q01_footfall_counter_deploy_tvm_v2h-v210.so + ``` +2. **[For RZ/V2H only]** Rename the `Q01_footfall_counter_deploy_*.so` to `deploy.so`. + ```sh + mv Q01_footfall_counter_deploy_tvm_v2h-v210.so deploy.so + ``` +3. Copy the following files to the `/home/root/tvm` directory of the rootfs (SD Card) for the board. + |File | Details | + |:---|:---| + |All files in `EXE_DIR` directory | Including `deploy.so` file. | + |`object_tracker` application file | Generated the file according to [Application File Generation](#application-file-generation) | + +4. Check if `libtvm_runtime.so` exists under `/usr/lib64` directory of the rootfs (SD card) on the board. + +5. Folder structure in the rootfs (SD Card) would look like: + ```sh + ├── usr/ + │ └── lib64/ + │ └── libtvm_runtime.so + └── home/ + └── root/ + └── tvm/ + ├── tinyyolov3_onnx/ #RZ/V2L only + │ ├── preprocess/ #RZ/V2L only + │ ├── deploy.json #RZ/V2L only + │ ├── deploy.params #RZ/V2L only + │ └── deploy.so #RZ/V2L only + │ + ├── d-yolov3/ #RZ/V2H only + │ ├── deploy.json #RZ/V2H only + │ ├── deploy.params #RZ/V2H only + │ └── deploy.so #RZ/V2H only + │ + ├── coco-labels-2014_2017.txt + ├── config.ini + └── object_tracker + ``` + +>**Note:** The directory name could be anything instead of `tvm`. If you copy the whole `EXE_DIR` folder on the board, you are not required to rename it `tvm`. + +## Application: Run Stage + +### Prerequisites +This section expects the user to have completed Step 7-3 of [Getting Started Guide](https://renesas-rz.github.io/rzv_ai_sdk/latest/getting_started.html#step7-3) provided by Renesas. + +After completion of the guide, the user is expected of following things. +- The board setup is done. +- The board is booted with microSD card, which contains the application file. + +### Instruction +1. On Board terminal, go to the `tvm` directory of the rootfs. + ```sh + cd /home/root/tvm/ + ``` +2. Change the values in `config.ini` as per the requirements. Detailed explanation of the `config.ini` file is given at [below section](#explanation-of-the-configini-file). + ```sh + vi config.ini + ``` +3. Run the application. + ```sh + ./object_tracker USB + ``` +4. Following window shows up on HDMI screen. + + |RZ/V2L EVK | RZ/V2H EVK | + |:---|:---| + || | + + + + - The application will track the person if the person crossed the `red line` and increment the `human count` and decrement it when the person crosses again. + - As per current logic, left to right crossing will increment the human count, and right to left crossing will decrement the count. + - Users need to arrange the line according to the requirement. + - The application will also keep track of the person in the available region `green box` and increment the `person in region` count. It will also decrement the count if the person leaves the area. + - Each person tracked is given a unique `id`. + - The `time` parameter of the tracked person indicates the time spent on the desired location. This incremented at regular interval. + +5. To terninate the application, switch the application window to the terminal by using `Super(windows key)+Tab` and press ENTER key on the terminal of the board. + +## Application: Configuration +### AI Model +#### RZ/V2L EVK +- TinyYOLOv3: [Darknet](https://pjreddie.com/darknet/yolo/) +Dataset: [COCO](https://cocodataset.org/#home) +Input size: 1x3x416x416 +Output1 size: 1x13x13x255 +Output2 size: 1x26x26x255 + +#### RZ/V2H EVK +- YOLOv3: [Darknet](https://pjreddie.com/darknet/yolo/) +Dataset: [COCO](https://cocodataset.org/#home) +Input size: 1x3x416x416 +Output1 size: 1x13x13x255 +Output2 size: 1x26x26x255 +Output3 size: 1x52x52x255 + +### AI inference time + +|Board | AI model | AI inference time| +|:---|:---|:---| +|RZ/V2L EVK|Tiny YOLOv3| Approximately 110ms | +|RZ/V2H EVK |YOLOv3 | Approximately 40ms | + +### Processing + +|Processing | RZ/V2L EVK | RZ/V2H EVK | +|:---|:---|:---| +|Pre-processing | Processed by DRP-AI.
| Processed by CPU.
| +|Inference | Processed by DRP-AI and CPU. | Processed by DRP-AI and CPU. | +|Post-processing | Processed by CPU. |Processed by CPU. | + + +### Image buffer size + +|Board | Camera capture buffer size|HDMI output buffer size| +|:---|:---|:---| +|RZ/V2L EVK| VGA (640x480) in YUYV format | FHD (1920x1080) in BGRA format | +|RZ/V2H EVK | VGA (640x480) in YUYV format | FHD (1920x1080) in BGRA format | + + +### Explanation of the `config.ini` file + +The file contains three sections: [**line**], [**region**], and [**tracking**]. + +>**Note:** The x,y coordinates are ranged from [0,0] to [img_height, img_width]. The img_height and img_width depends on the camera capture resolution. This sample application is tested on 640x480 image. + +- The [**line**] section contains four key-value pairs that define the coordinates of the boundary line to be drawn.\ +The x1, y1, x2, and y2 values correspond to the x and y coordinates of the boundary line's +starting and ending points. + +- The [**region**] section contains five key-value pairs, which defines the Region of Interest.\ +The n value indicates the number of points that define a region, followed by x and y coordinates +for each point.\ +The region is defined by connecting these points in the order they are listed. + +- The [**tracking**] section contains two key-value pairs.\ +The conf value is a confidence threshold used for object tracking, and the kmin value is the minimum number of key-points required for tracking. + +>**Note:** The object tracked here is of class "Person", it can be changed to other classes present on the coco labels. + + +To modify the configuration settings, edit the values in this file using VI Editor, from the Board. + +### Time Tracking Backend Integration + +>**Note:** As per recent development status, the application have been tested for 100 numbers of people on the certain region without any error occurring, so if the use cases are expected for the number of people on the certain region to be less than 100, there is no need for code modification. + +- Currently for storing the person id and the time spent on the region of interest [ROI] is stored on the board memory as key-value pair. As board memory consumption is limited, this procedure could be moved to the Database/Cache which could be hosted on the cloud or host machine. + +- Customers can install SQL Database/Cache on the cloud or host-machine. + +- After each sec the application can call the API for inserting the data on the DB, if not present. and show the time taken for the persons in the ROI + +- When the person goes out of the ROI, the application can call the API to remove the person data from the DB and cache. + +## Reference +- For RZ/V2H EVK, this application supports USB camera only with 640x480 resolution. +FHD resolution is supported by e-CAM22_CURZH camera (MIPI). +Please refer to following URL for how to change camera input to MIPI camera. +[https://renesas-rz.github.io/rzv_ai_sdk/latest/about-applications](https://renesas-rz.github.io/rzv_ai_sdk/latest/about-applications#mipi). diff --git a/Q01_footfall_counter/src_v2h/CMakeLists.txt b/Q01_footfall_counter/src_v2h/CMakeLists.txt new file mode 100755 index 0000000..7f50cda --- /dev/null +++ b/Q01_footfall_counter/src_v2h/CMakeLists.txt @@ -0,0 +1,39 @@ +cmake_minimum_required(VERSION 3.10) +project(object_tracker_cpp) + +set(CMAKE_CXX_STANDARD 17) + +set(TVM_ROOT $ENV{TVM_HOME}) +include_directories(${TVM_ROOT}/include) +include_directories(${TVM_ROOT}/3rdparty/dlpack/include) +include_directories(${TVM_ROOT}/3rdparty/dmlc-core/include) +include_directories(${TVM_ROOT}/3rdparty/compiler-rt) + +set(TVM_RUNTIME_LIB ${TVM_ROOT}/build_runtime/libtvm_runtime.so) +set(EXE_NAME object_tracker) + +file(GLOB SOURCE *.cpp *.h) +add_executable (${EXE_NAME} +${SOURCE} +) +TARGET_LINK_LIBRARIES(${EXE_NAME} pthread) +TARGET_LINK_LIBRARIES(${EXE_NAME} jpeg) +TARGET_LINK_LIBRARIES(${EXE_NAME} wayland-client) +TARGET_LINK_LIBRARIES(${EXE_NAME} + pthread glib-2.0 mmngr gobject-2.0 gstreamer-1.0 gstbase-1.0 gstapp-1.0 + opencv_imgcodecs opencv_imgproc opencv_core opencv_highgui + jpeg webp tiff z tbb gtk-3 png16 gdk-3 cairo + lzma rt cairo-gobject + xkbcommon wayland-cursor wayland-egl wayland-client epoxy + fribidi harfbuzz fontconfig + glib-2.0 gobject-2.0 gdk_pixbuf-2.0 gmodule-2.0 pangocairo-1.0 + atk-1.0 gio-2.0 pango-1.0 freetype pixman-1 uuid pcre + mount resolv expat pangoft2-1.0 blkid + EGL GLESv2 mmngrbuf) + +find_package(OpenCV REQUIRED) +if(OpenCV_FOUND) + target_include_directories(${EXE_NAME} PUBLIC ${OpenCV_INCLUDE_DIRS}) + target_link_libraries(${EXE_NAME} ${OpenCV_LIBS}) +endif() +target_link_libraries(${EXE_NAME} ${TVM_RUNTIME_LIB}) diff --git a/Q01_footfall_counter/src_v2h/MeraDrpRuntimeWrapper.cpp b/Q01_footfall_counter/src_v2h/MeraDrpRuntimeWrapper.cpp new file mode 100755 index 0000000..ff005a8 --- /dev/null +++ b/Q01_footfall_counter/src_v2h/MeraDrpRuntimeWrapper.cpp @@ -0,0 +1,208 @@ +/* + * Original Code (C) Copyright Edgecortix, Inc. 2022 + * Modified Code (C) Copyright Renesas Electronics Corporation 2023 + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + * + */ +#include +#include +#include +#include + +#include +#include +#include +#include "MeraDrpRuntimeWrapper.h" + +template +static std::vector LoadBinary(const std::string& bin_file) { + std::ifstream file(bin_file.c_str(), std::ios::in | std::ios::binary); + if (!file.is_open()) { + LOG(FATAL) << "unable to open file " + bin_file; + } + + file.seekg(0, file.end); + const uint32_t file_size = static_cast(file.tellg()); + file.seekg(0, file.beg); + + const auto file_buffer = std::unique_ptr(new char[file_size]); + file.read(file_buffer.get(), file_size); + + if (file.bad() || file.fail()) { + LOG(FATAL) << "error occured while reading the file"; + } + + file.close(); + + auto ptr = reinterpret_cast(file_buffer.get()); + const auto num_elements = file_size / sizeof(T); + return std::vector(ptr, ptr + num_elements); +} + +MeraDrpRuntimeWrapper::MeraDrpRuntimeWrapper() { + //device_type = kDLCPU; + device_type = kDLDrpAi; + device_id = 0; +}; + +MeraDrpRuntimeWrapper::~MeraDrpRuntimeWrapper() = default; + +bool MeraDrpRuntimeWrapper::LoadModel(const std::string& model_dir, uint32_t start_address){ + device_type = kDLCPU; + + return LoadModel(model_dir, (uint64_t)start_address); +} + +bool MeraDrpRuntimeWrapper::LoadModel(const std::string& model_dir, uint64_t start_address = 0x00) { + LOG(INFO) << "Loading json data..."; + const std::string json_file(model_dir + "/deploy.json"); + std::ifstream json_in(json_file.c_str(), std::ios::in); + std::string json_data((std::istreambuf_iterator(json_in)), std::istreambuf_iterator()); + json_in.close(); + + #if 0 + if(json_data.find("drp") == json_data.npos && device_type != kDLCPU){ + LOG(INFO) <<"Break! this model is Not for DRP-AI retry as CPU Only"; + return false; + } + #else + if(json_data.find("drp") == json_data.npos && device_type != kDLCPU){ + LOG(INFO) <<"try as CPU Only"; + device_type = kDLCPU; + } + #endif + + LOG(INFO) << "Loading runtime module..."; + tvm::runtime::Module mod_syslib = tvm::runtime::Module::LoadFromFile(model_dir + "/deploy.so"); + mod = (*tvm::runtime::Registry::Get("tvm.graph_executor_debug.create"))( + json_data, mod_syslib, device_type, device_id); + + LOG(INFO) << "Loading parameters..."; + tvm::runtime::PackedFunc load_params = mod.GetFunction("load_params"); + auto params_data = LoadBinary(model_dir + "/deploy.params"); + TVMByteArray params_arr; + params_arr.data = params_data.data(); + params_arr.size = params_data.size(); + load_params(params_arr); + tvm::runtime::PackedFunc set_start_address = mod.GetFunction("set_start_address"); + if(set_start_address != nullptr){ + set_start_address(start_address); + } + return true; +} + +template +void MeraDrpRuntimeWrapper::SetInput(int input_index, const T* data_ptr) { + LOG(INFO) << "Loading input..."; + + tvm::runtime::PackedFunc get_input = mod.GetFunction("get_input"); + tvm::runtime::NDArray xx = get_input(input_index); + auto in_shape = xx.Shape(); + int64_t in_size = 1; + for (unsigned long i = 0; i < in_shape.size(); ++i) { + in_size *= in_shape[i]; + } + + DLDevice ctx; + ctx.device_id = device_id; + ctx.device_type = DLDeviceType(device_type); + + auto input_array = tvm::runtime::NDArray::Empty(in_shape, xx.DataType(), ctx); + auto input_data = (T*)(input_array->data); + std::memcpy(input_data, data_ptr, sizeof(T) * in_size); + tvm::runtime::PackedFunc set_input = mod.GetFunction("set_input"); + set_input(input_index, input_array); +} +template void MeraDrpRuntimeWrapper::SetInput(int input_index, const float*); +template void MeraDrpRuntimeWrapper::SetInput(int input_index, const unsigned short*); + +void MeraDrpRuntimeWrapper::Run() { + mod.GetFunction("run")(); +} + +void MeraDrpRuntimeWrapper::ProfileRun(const std::string& profile_table, const std::string& profile_csv) { + tvm::runtime::PackedFunc profile = mod.GetFunction("profile"); + tvm::runtime::Array collectors; + tvm::runtime::profiling::Report report = profile(collectors); + + std::string rep_table = report->AsTable(); + std::ofstream ofs_table (profile_table, std::ofstream::out); + ofs_table << rep_table << std::endl; + ofs_table.close(); + + std::string rep_csv = report->AsCSV(); + std::ofstream ofs_csv (profile_csv, std::ofstream::out); + ofs_csv << rep_csv << std::endl; + ofs_csv.close(); +} + +int MeraDrpRuntimeWrapper::GetNumInput(std::string model_dir) { + // TVM does not support api to get number input of model. + // This function calculate input number base on convention + // of input data name (input_xyz.bin) + DIR *dir; + dirent *diread; + int num_input = 0; + if ((dir = opendir(model_dir.c_str())) != nullptr) { + while ((diread = readdir(dir)) != nullptr) { + std::string file_name(diread->d_name); + if (std::regex_match(file_name, std::regex("(input_)(.*)(bin)") )) { + num_input++; + } + } + closedir(dir); + } else { + LOG(FATAL) << "Can not open model dir : " << model_dir; + } + + return num_input; +} + +InOutDataType MeraDrpRuntimeWrapper::GetInputDataType(int index) { + tvm::runtime::PackedFunc get_input = mod.GetFunction("get_input"); + tvm::runtime::NDArray input_info = get_input(index); + InOutDataType data_type = InOutDataType::OTHER; + if (input_info.DataType().is_float() && input_info.DataType().bits() == 32) { + data_type = InOutDataType::FLOAT32; + } else if (input_info.DataType().is_float() && input_info.DataType().bits() == 16) { + data_type = InOutDataType::FLOAT16; + } + return data_type; + } + +int MeraDrpRuntimeWrapper::GetNumOutput() { + return mod.GetFunction("get_num_outputs")(); + } + +std::tuple MeraDrpRuntimeWrapper::GetOutput(int index) { + tvm::runtime::PackedFunc get_output = mod.GetFunction("get_output"); + tvm::runtime::NDArray out = get_output(index); + int64_t out_size = 1; + for ( unsigned long i = 0; i < out.Shape().size(); ++i) { + out_size *= out.Shape()[i]; + } + + InOutDataType data_type = InOutDataType::OTHER; + if (out.DataType().is_float() && out.DataType().bits() == 32) { + data_type = InOutDataType::FLOAT32; + } else if (out.DataType().is_float() && out.DataType().bits() == 16) { + data_type = InOutDataType::FLOAT16; + } + return std::make_tuple(data_type, reinterpret_cast(out->data), out_size); +} diff --git a/Q01_footfall_counter/src_v2h/MeraDrpRuntimeWrapper.h b/Q01_footfall_counter/src_v2h/MeraDrpRuntimeWrapper.h new file mode 100755 index 0000000..5f1b2be --- /dev/null +++ b/Q01_footfall_counter/src_v2h/MeraDrpRuntimeWrapper.h @@ -0,0 +1,52 @@ +/* + * Original Code (C) Copyright Edgecortix, Inc. 2022 + * Modified Code (C) Copyright Renesas Electronics Corporation 2023 + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + * +*/ +#include + +enum class InOutDataType { + FLOAT32, + FLOAT16, + OTHER +}; + +class MeraDrpRuntimeWrapper { + public: + MeraDrpRuntimeWrapper(); + ~MeraDrpRuntimeWrapper(); + + bool LoadModel(const std::string& model_dir, uint32_t start_address); + bool LoadModel(const std::string& model_dir, uint64_t start_address); + template + void SetInput(int input_index, const T* data_ptr); + void Run(); + void ProfileRun(const std::string& profile_table, const std::string& profile_csv); + int GetNumInput(std::string model_dir); + InOutDataType GetInputDataType(int index); + int GetNumOutput(); + + std::tuple GetOutput(int index); + + private: + int device_type; + int device_id; + tvm::runtime::Module mod; +}; diff --git a/Q01_footfall_counter/src_v2h/box.cpp b/Q01_footfall_counter/src_v2h/box.cpp new file mode 100755 index 0000000..1d8eeb9 --- /dev/null +++ b/Q01_footfall_counter/src_v2h/box.cpp @@ -0,0 +1,140 @@ +/*********************************************************************************************************************** +* DISCLAIMER +* This software is supplied by Renesas Electronics Corporation and is only intended for use with Renesas products. No +* other uses are authorized. This software is owned by Renesas Electronics Corporation and is protected under all +* applicable laws, including copyright laws. +* THIS SOFTWARE IS PROVIDED "AS IS" AND RENESAS MAKES NO WARRANTIES REGARDING +* THIS SOFTWARE, WHETHER EXPRESS, IMPLIED OR STATUTORY, INCLUDING BUT NOT LIMITED TO WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. ALL SUCH WARRANTIES ARE EXPRESSLY DISCLAIMED. TO THE MAXIMUM +* EXTENT PERMITTED NOT PROHIBITED BY LAW, NEITHER RENESAS ELECTRONICS CORPORATION NOR ANY OF ITS AFFILIATED COMPANIES +* SHALL BE LIABLE FOR ANY DIRECT, INDIRECT, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES FOR ANY REASON RELATED TO THIS +* SOFTWARE, EVEN IF RENESAS OR ITS AFFILIATES HAVE BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. +* Renesas reserves the right, without notice, to make changes to this software and to discontinue the availability of +* this software. By using this software, you agree to the additional terms and conditions found by accessing the +* following link: +* http://www.renesas.com/disclaimer +* +* Copyright (C) 2023 Renesas Electronics Corporation. All rights reserved. +***********************************************************************************************************************/ +/*********************************************************************************************************************** +* File Name : box.cpp +* Version : v1.00 +* Description : RZ/V2H AI SDK Sample Application for Object Detection +***********************************************************************************************************************/ + +/***************************************** +* Includes +******************************************/ +#include "box.h" + +/***************************************** +* Function Name : overlap +* Description : Function to compute the overlapped data between coordinate x with size w +* Arguments : x1 = 1-dimensional coordinate of first line +* w1 = size of fist line +* x2 = 1-dimensional coordinate of second line +* w2 = size of second line +* Return value : overlapped line size +******************************************/ +float overlap(float x1, float w1, float x2, float w2) +{ + float l1 = x1 - w1/2; + float l2 = x2 - w2/2; + float left = l1 > l2 ? l1 : l2; + float r1 = x1 + w1/2; + float r2 = x2 + w2/2; + float right = r1 < r2 ? r1 : r2; + return right - left; +} + +/***************************************** +* Function Name : box_intersection +* Description : Function to compute the area of intersection of Box a and b +* Arguments : a = Box 1 +* b = Box 2 +* Return value : area of intersection +******************************************/ +float box_intersection(Box a, Box b) +{ + float w = overlap(a.x, a.w, b.x, b.w); + float h = overlap(a.y, a.h, b.y, b.h); + if(w < 0 || h < 0) + { + return 0; + } + float area = w*h; + return area; +} + +/***************************************** +* Function Name : box_union +* Description : Function to compute the area of union of Box a and b +* Arguments : a = Box 1 +* b = Box 2 +* Return value : area of union +******************************************/ +float box_union(Box a, Box b) +{ + float i = box_intersection(a, b); + float u = a.w*a.h + b.w*b.h - i; + return u; +} + +/***************************************** +* Function Name : box_iou +* Description : Function to compute the Intersection over Union (IoU) of Box a and b +* Arguments : a = Box 1 +* b = Box 2 +* Return value : IoU +******************************************/ +float box_iou(Box a, Box b) +{ + return box_intersection(a, b)/box_union(a, b); +} + +/***************************************** +* Function Name : filter_boxes_nms +* Description : Apply Non-Maximum Suppression (NMS) to get rid of overlapped rectangles. +* Arguments : det= detected rectangles +* size = number of detections stored in det +* th_nms = threshold for nms +* Return value : - +******************************************/ +void filter_boxes_nms(std::vector &det, int32_t size, float th_nms) +{ + int32_t count = size; + int32_t i = 0; + int32_t j = 0; + Box a; + Box b; + float b_intersection = 0; + for (i = 0; i < count; i++) + { + a = det[i].bbox; + for (j = 0; j < count; j++) + { + if (i == j) + { + continue; + } + if (det[i].c != det[j].c) + { + continue; + } + b = det[j].bbox; + b_intersection = box_intersection(a, b); + if ((box_iou(a, b)>th_nms) || (b_intersection >= a.h * a.w - 1) || (b_intersection >= b.h * b.w - 1)) + { + if (det[i].prob > det[j].prob) + { + det[j].prob= 0; + } + else + { + det[i].prob= 0; + } + } + } + } + return; +} diff --git a/Q01_footfall_counter/src_v2h/box.h b/Q01_footfall_counter/src_v2h/box.h new file mode 100755 index 0000000..65030a1 --- /dev/null +++ b/Q01_footfall_counter/src_v2h/box.h @@ -0,0 +1,73 @@ +/*********************************************************************************************************************** +* DISCLAIMER +* This software is supplied by Renesas Electronics Corporation and is only intended for use with Renesas products. No +* other uses are authorized. This software is owned by Renesas Electronics Corporation and is protected under all +* applicable laws, including copyright laws. +* THIS SOFTWARE IS PROVIDED "AS IS" AND RENESAS MAKES NO WARRANTIES REGARDING +* THIS SOFTWARE, WHETHER EXPRESS, IMPLIED OR STATUTORY, INCLUDING BUT NOT LIMITED TO WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. ALL SUCH WARRANTIES ARE EXPRESSLY DISCLAIMED. TO THE MAXIMUM +* EXTENT PERMITTED NOT PROHIBITED BY LAW, NEITHER RENESAS ELECTRONICS CORPORATION NOR ANY OF ITS AFFILIATED COMPANIES +* SHALL BE LIABLE FOR ANY DIRECT, INDIRECT, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES FOR ANY REASON RELATED TO THIS +* SOFTWARE, EVEN IF RENESAS OR ITS AFFILIATES HAVE BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. +* Renesas reserves the right, without notice, to make changes to this software and to discontinue the availability of +* this software. By using this software, you agree to the additional terms and conditions found by accessing the +* following link: +* http://www.renesas.com/disclaimer +* +* Copyright (C) 2023 Renesas Electronics Corporation. All rights reserved. +***********************************************************************************************************************/ +/*********************************************************************************************************************** +* File Name : box.h +* Version : v1.00 +* Description : RZ/V2H AI SDK Sample Application for Object Detection +***********************************************************************************************************************/ + +#ifndef BOX_H +#define BOX_H + +#include +#include +#include +#include +#include + +/***************************************** +* Box : Bounding box coordinates and its size +******************************************/ +typedef struct +{ + float x, y, w, h; +} Box; + +/***************************************** +* detection : Detected result +******************************************/ +typedef struct detection +{ + Box bbox; + int32_t c; + float prob; +} detection; + +/***************************************** +* bbox_t : Detected result +******************************************/ +struct bbox_t +{ + std::string name; + int32_t X; + int32_t Y; + int32_t W; + int32_t H; + float pred; +}; +/***************************************** +* Functions +******************************************/ +float box_iou(Box a, Box b); +float overlap(float x1, float w1, float x2, float w2); +float box_intersection(Box a, Box b); +float box_union(Box a, Box b); +void filter_boxes_nms(std::vector &det, int32_t size, float th_nms); + +#endif diff --git a/Q01_footfall_counter/src_v2h/define.h b/Q01_footfall_counter/src_v2h/define.h new file mode 100755 index 0000000..38d87b7 --- /dev/null +++ b/Q01_footfall_counter/src_v2h/define.h @@ -0,0 +1,189 @@ +/* + * Original Code (C) Copyright Edgecortix, Inc. 2022 + * Modified Code (C) Copyright Renesas Electronics Corporation 2023 + *  + * *1 DRP-AI TVM is powered by EdgeCortix MERA(TM) Compiler Framework. + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + * + */ +/*********************************************************************************************************************** +* DISCLAIMER +* This software is supplied by Renesas Electronics Corporation and is only intended for use with Renesas products. No +* other uses are authorized. This software is owned by Renesas Electronics Corporation and is protected under all +* applicable laws, including copyright laws. +* THIS SOFTWARE IS PROVIDED "AS IS" AND RENESAS MAKES NO WARRANTIES REGARDING +* THIS SOFTWARE, WHETHER EXPRESS, IMPLIED OR STATUTORY, INCLUDING BUT NOT LIMITED TO WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. ALL SUCH WARRANTIES ARE EXPRESSLY DISCLAIMED. TO THE MAXIMUM +* EXTENT PERMITTED NOT PROHIBITED BY LAW, NEITHER RENESAS ELECTRONICS CORPORATION NOR ANY OF ITS AFFILIATED COMPANIES +* SHALL BE LIABLE FOR ANY DIRECT, INDIRECT, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES FOR ANY REASON RELATED TO THIS +* SOFTWARE, EVEN IF RENESAS OR ITS AFFILIATES HAVE BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. +* Renesas reserves the right, without notice, to make changes to this software and to discontinue the availability of +* this software. By using this software, you agree to the additional terms and conditions found by accessing the +* following link: +* http://www.renesas.com/disclaimer +* +* Copyright (C) 2023 Renesas Electronics Corporation. All rights reserved. +***********************************************************************************************************************/ +/*********************************************************************************************************************** +* File Name : define.h +* Version : v1.00 +* Description : RZ/V2H AI SDK Sample Application for Object Detection +***********************************************************************************************************************/ + +#ifndef DEFINE_MACRO_H +#define DEFINE_MACRO_H + +/***************************************** +* includes +******************************************/ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "opencv2/core.hpp" +#include "opencv2/imgproc.hpp" +#include "opencv2/highgui.hpp" + +/***************************************** +* Static Variables for YOLOv3 +* Following variables need to be changed in order to custormize the AI model +* - model_dir = directory name of DRP-AI TVM[*1] Model Object files +******************************************/ +/* Model Binary */ +const static std::string model_dir = "d-yolov3"; +/* Pre-processing Runtime Object */ +const static std::string pre_dir = model_dir + "/preprocess"; +/* Anchor box information */ +const static double anchors[] = +{ + 10, 13, + 16, 30, + 33, 23, + 30, 61, + 62, 45, + 59, 119, + 116, 90, + 156, 198, + 373, 326 +}; +/* Class labels to be classified */ +const static std::string label_list = "coco-labels-2014_2017.txt"; +/* Empty since labels will be loaded from label_list file */ +static std::vector label_file_map = {}; + +/***************************************** +* Macro for YOLOv3 +******************************************/ +/* Number of class to be detected */ +#define NUM_CLASS (80) +/* Number for [region] layer num parameter */ +#define NUM_BB (3) +#define NUM_INF_OUT_LAYER (3) +/* Thresholds */ +#define TH_PROB (0.5f) +#define TH_NMS (0.5f) +/* Size of input image to the model */ +#define MODEL_IN_W (416) +#define MODEL_IN_H (416) + +/* Number of grids in the image. The length of this array MUST match with the NUM_INF_OUT_LAYER */ +const static uint8_t num_grids[] = { 13, 26, 52 }; +/* Number of DRP-AI output */ +const static uint32_t INF_OUT_SIZE = (NUM_CLASS + 5) * NUM_BB * num_grids[0] * num_grids[0] + + (NUM_CLASS + 5) * NUM_BB * num_grids[1] * num_grids[1] + + (NUM_CLASS + 5) * NUM_BB * num_grids[2] * num_grids[2]; + +/***************************************** +* Macro for Application +******************************************/ +/*DRP-AI memory area offset for model objects*/ +/*Offset value depends on the size of memory area used by DRP-AI Pre-processing Runtime Object files*/ +#define DRPAI_MEM_OFFSET (0) + +/*Camera Capture Image Information*/ +#define CAM_IMAGE_WIDTH (640) +#define CAM_IMAGE_HEIGHT (480) + +/*DRP-AI Input image information*/ +#define DRPAI_IN_WIDTH (CAM_IMAGE_WIDTH) +#define DRPAI_IN_HEIGHT (CAM_IMAGE_HEIGHT) + +#define IMAGE_OUTPUT_WIDTH (1920) +#define IMAGE_OUTPUT_HEIGHT (1080) +#define IMAGE_CHANNEL_BGRA (4) + +#define DISP_IMAGE_OUTPUT_WIDTH (1480) +#define DISP_IMAGE_OUTPUT_HEIGHT (1050) +#define DISP_OUTPUT_WIDTH (1920) +#define DISP_OUTPUT_HEIGHT (1080) + +/*Waiting Time*/ +#define WAIT_TIME (1000) /* microseconds */ + +/*Timer Related*/ +#define CAPTURE_TIMEOUT (20) /* seconds */ +#define AI_THREAD_TIMEOUT (20) /* seconds */ +#define EXIT_THREAD_TIMEOUT (10) /* seconds */ + +/* DRP_MAX_FREQ and DRPAI_FREQ are the */ +/* frequency settings for DRP-AI. */ +/*Basicallyuse the default values */ + +#define DRP_MAX_FREQ (2) +/* DRP_MAX_FREQ can be set from 2 to 127 */ +/* 2: 420MHz */ +/* 3: 315MHz */ +/* ... */ +/* 127: 9.84MHz */ +/* Calculation Formula: */ +/* 1260MHz /(DRP_MAX_FREQ + 1) */ + +#define DRPAI_FREQ (2) +/* DRPAI_FREQ can be set from 1 to 127 */ +/* 1,2: 1GHz */ +/* 3: 630MHz */ +/* 4: 420MHz */ +/* 5: 315MHz */ +/* ... */ +/* 127: 10MHz */ +/* Calculation Formula: */ +/* 1260MHz /(DRPAI_FREQ - 1) */ +/* (When DRPAI_FREQ = 3 or more.) */ + +#endif diff --git a/Q01_footfall_counter/src_v2h/kalman_box_tracker.cpp b/Q01_footfall_counter/src_v2h/kalman_box_tracker.cpp new file mode 100755 index 0000000..fe1034b --- /dev/null +++ b/Q01_footfall_counter/src_v2h/kalman_box_tracker.cpp @@ -0,0 +1,87 @@ +#include "kalman_box_tracker.h" + +using namespace sort; + +int KalmanBoxTracker::count = 0; + +KalmanBoxTracker::KalmanBoxTracker(const cv::Mat &bbox) +{ + id = KalmanBoxTracker::count; + KalmanBoxTracker::count++; + + kf = std::make_shared(KF_DIM_X, KF_DIM_Z); // no control vector + // state transition matrix (A), x(k) = A*x(k-1) + B*u(k) + w(k) + kf->transitionMatrix = (cv::Mat_(KF_DIM_X, KF_DIM_X) << + 1, 0, 0, 0, 1, 0, 0, + 0, 1, 0, 0, 0, 1, 0, + 0, 0, 1, 0, 0, 0, 1, + 0, 0, 0, 1, 0, 0, 0, + 0, 0, 0, 0, 1, 0, 0, + 0, 0, 0, 0, 0, 1, 0, + 0, 0, 0, 0, 0, 0, 1); + // measurement matrix (H), z(k) = H*x(k) + v(k) + kf->measurementMatrix = (cv::Mat_(KF_DIM_Z, KF_DIM_X) << + 1, 0, 0, 0, 0, 0, 0, + 0, 1, 0, 0, 0, 0, 0, + 0, 0, 1, 0, 0, 0, 0, + 0, 0, 0, 1, 0, 0, 0); + // measurement noise covariance matrix (R), K(k) = P`(k)*Ct*inv(C*P`(k)*Ct + R) + kf->measurementNoiseCov = (cv::Mat_(KF_DIM_Z, KF_DIM_Z) << + 1, 0, 0, 0, + 0, 1, 0, 0, + 0, 0, 10, 0, + 0, 0, 0, 10); + // posteriori error estimate covariance matrix (P(k)): P(k)=(I-K(k)*H)*P'(k) + kf->errorCovPost = (cv::Mat_(KF_DIM_X, KF_DIM_X) << + 10, 0, 0, 0, 0, 0, 0, + 0, 10, 0, 0, 0, 0, 0, + 0, 0, 10, 0, 0, 0, 0, + 0, 0, 0, 10, 0, 0, 0, + 0, 0, 0, 0, 1e4, 0, 0, + 0, 0, 0, 0, 0, 1e4, 0, + 0, 0, 0, 0, 0, 0, 1e4); + // process noise covariance matrix (Q), P'(k) = A*P(k-1)*At + Q + kf->processNoiseCov = (cv::Mat_(KF_DIM_X, KF_DIM_X) << + 1, 0, 0, 0, 0, 0, 0, + 0, 1, 0, 0, 0, 0, 0, + 0, 0, 1, 0, 0, 0, 0, + 0, 0, 0, 1, 0, 0, 0, + 0, 0, 0, 0, 1e-2, 0, 0, + 0, 0, 0, 0, 0, 1e-2, 0, + 0, 0, 0, 0, 0, 0, 1e-4); + // corrected state (x(k)): x(k)=x'(k)+K(k)*(z(k)-H*x'(k)) + cv::vconcat(convertBBoxToZ(bbox), + cv::Mat(KF_DIM_X - KF_DIM_Z, 1, CV_32F, cv::Scalar(0)), + kf->statePost); +} + + +KalmanBoxTracker::~KalmanBoxTracker() +{ +} + + +cv::Mat KalmanBoxTracker::update(const cv::Mat &bbox) +{ + timeSinceUpdate = 0; + hitStreak += 1; + xPost = kf->correct(convertBBoxToZ(bbox)); + cv::Mat bboxPost = convertXToBBox(xPost); + return bboxPost; +} + + +cv::Mat KalmanBoxTracker::predict() +{ + // bbox area (ds/dt + s) shouldn't be negtive + if (kf->statePost.at(6, 0) + kf->statePost.at(2, 0) <= 0) + kf->statePost.at(6, 0) *= 0; + + cv::Mat xPred = kf->predict(); + cv::Mat bboxPred = convertXToBBox(xPred); + + hitStreak = timeSinceUpdate > 0 ? 0 : hitStreak; + timeSinceUpdate++; + + return bboxPred; +} \ No newline at end of file diff --git a/Q01_footfall_counter/src_v2h/kalman_box_tracker.h b/Q01_footfall_counter/src_v2h/kalman_box_tracker.h new file mode 100755 index 0000000..2251a18 --- /dev/null +++ b/Q01_footfall_counter/src_v2h/kalman_box_tracker.h @@ -0,0 +1,119 @@ +/** + * @desc: kalmanfilter for boundary box tracking. + * opencv kalmanfilter documents: + * https://docs.opencv.org/4.x/dd/d6a/classcv_1_1KalmanFilter.html + * + * @author: lst + * @date: 12/10/2021 + */ +#pragma once + +#include +#include +#include +#include +#include + +#define KF_DIM_X 7 // xc, yc, s, r, dxc/dt, dyc/dt, ds/dt +#define KF_DIM_Z 4 // xc, yc, s, r + +namespace sort +{ + class KalmanBoxTracker + { + // variables + public: + using Ptr = std::shared_ptr; + private: + static int count; + int id; + int timeSinceUpdate = 0; + int hitStreak = 0; + std::shared_ptr kf = nullptr; + cv::Mat xPost; + + // methods + public: + /** + * @brief Kalman filter for bbox tracking + * @param bbox bounding box, Mat(1, 4+) [xc, yc, w, h, ...] + */ + explicit KalmanBoxTracker(const cv::Mat &bbox); + + virtual ~KalmanBoxTracker(); + KalmanBoxTracker(const KalmanBoxTracker&) = delete; + void operator=(const KalmanBoxTracker&) = delete; + + /** + * @brief updates the state vector with observed bbox. + * @param bbox boundary box, Mat(1, 4+) [xc, yc, w, h, ...] + * @return corrected bounding box estimate, Mat(1, 4) + */ + cv::Mat update(const cv::Mat &bbox); + + /** + * @brief advances the state vector and returns the predicted bounding box estimate. + * @return predicted bounding box, Mat(1, 4) + */ + cv::Mat predict(); + + static inline int getFilterCount() + { + return KalmanBoxTracker::count; + } + + inline int getFilterId() + { + return id; + } + + inline int getTimeSinceUpdate() + { + return timeSinceUpdate; + } + + inline int getHitStreak() + { + return hitStreak; + } + + inline cv::Mat getState() + { + return xPost.clone(); + } + + private: + /** + * @brief convert boundary box to measurement. + * @param bbox boundary box (1, 4+) [x center, y center, width, height, ...] + * @return measurement vector (4, 1) [x center; y center; scale/area; aspect ratio] + */ + static inline cv::Mat convertBBoxToZ(const cv::Mat &bbox) + { + assert(bbox.rows == 1 && bbox.cols >= 4); + float x = bbox.at(0, 0); + float y = bbox.at(0, 1); + float s = bbox.at(0, 2) * bbox.at(0, 3); + float r = bbox.at(0, 2) / bbox.at(0, 3); + + return (cv::Mat_(KF_DIM_Z, 1) << x, y, s, r); + } + + /** + * @brief convert state vector to boundary box. + * @param state state vector (7, 1) (x center; y center; scale/area; aspect ratio; ...) + * @return boundary box (1, 4) [x center, y center, width, height] + */ + static inline cv::Mat convertXToBBox(const cv::Mat &state) + { + assert(state.rows == KF_DIM_X && state.cols == 1); + float x = state.at(0, 0); + float y = state.at(1, 0); + float w = sqrt(state.at(2, 0) * state.at(3, 0)); + float h = state.at(2, 0) / w; + + return (cv::Mat_(1, 4) << x, y, w, h); + } + }; +} + diff --git a/Q01_footfall_counter/src_v2h/kuhn_munkres.cpp b/Q01_footfall_counter/src_v2h/kuhn_munkres.cpp new file mode 100755 index 0000000..636c135 --- /dev/null +++ b/Q01_footfall_counter/src_v2h/kuhn_munkres.cpp @@ -0,0 +1,295 @@ +#include "kuhn_munkres.h" + +namespace kuhn_munkres { +using std::max; + +KuhnMunkres::KuhnMunkres() { + +} + +KuhnMunkres::~KuhnMunkres() { + +} + +vector > KuhnMunkres::compute(const Vec2f& costMatrix) { + this->C = KuhnMunkres::padMatrix(costMatrix); + this->n = C.size(); + this->originalLength = costMatrix.size(); + this->originalWidth = costMatrix.size() == 0 ? 0 : costMatrix[0].size(); + this->rowCovered = Vec1b(n, false); + this->colCovered = Vec1b(n, false); + this->Z0_r = 0; + this->Z0_c = 0; + this->path = Vec2i(n*n, Vec1i(2)); + this->marked = KuhnMunkres::makeMatrix(n, 0); + vector steps = { + nullptr, + &KuhnMunkres::step1, + &KuhnMunkres::step2, + &KuhnMunkres::step3, + &KuhnMunkres::step4, + &KuhnMunkres::step5, + &KuhnMunkres::step6, + }; + + int step = 1; + while (true) { + if (step < 1 || step > 6) break; // done + + StepFunc func = steps[step]; + step = (this->*func)(); + } + + vector > result; + for (int i = 0; i < this->originalLength; ++i) + for (int j = 0; j < this->originalWidth; ++j) + if (this->marked[i][j] == 1) + result.push_back({i, j}); + + return result; +} + +Vec2f KuhnMunkres::makeCostMatrix(const Vec2f& profixMatrix, InversionFunc func) { + if (func == nullptr) { + float maxinum = -__FLT_MAX__; + for (Vec1f row : profixMatrix) + maxinum = std::max(maxinum, *std::max_element(row.begin(), row.end())); + + func = [maxinum](float x)->float { return maxinum - x; }; + } + + Vec2f costMatrix = profixMatrix; + for (int i = 0; i < costMatrix.size(); ++i) { + for (int j = 0; j < costMatrix[i].size(); ++j) { + costMatrix[i][j] = func(costMatrix[i][j]); + } + } + + return costMatrix; +} + +Vec2f KuhnMunkres::padMatrix(const Vec2f& matrix, const float padValue) { + int maxColumns = 0; + int totalRows = matrix.size(); + for (auto row : matrix) maxColumns = max(maxColumns, int(row.size())); + totalRows = max(totalRows, maxColumns); + + Vec2f newMatrix; + for (auto row : matrix) { + auto newRow = row; + while (newRow.size() < totalRows) + // Row too short, pad it. + newRow.push_back(padValue); + newMatrix.push_back(newRow); + } + + while (newMatrix.size() < totalRows) + newMatrix.push_back(Vec1f(totalRows, padValue)); + + return newMatrix; +} + +Vec2i KuhnMunkres::makeMatrix(const int &n, const int &val) { + return Vec2i(n, Vec1i(n, val)); +} + +int KuhnMunkres::step1() { + for (int i = 0; i < this->n; ++i) { + float minVal = *std::min_element(this->C[i].begin(), this->C[i].end()); + // Find the minimum value for this row and substract that mininum + // from every element in the row. + for (int j = 0; j < this->n; ++j) + this->C[i][j] -= minVal; + } + + return 2; +} + +int KuhnMunkres::step2() { + for (int i = 0; i < this->n; ++i) { + for (int j = 0; j < this->n; ++j) { + if (this->C[i][j] == 0 && !this->colCovered[j] && !this->rowCovered[i]) { + this->marked[i][j] = 1; + this->colCovered[j] = true; + this->rowCovered[i] = true; + break; + } + } + } + + clearCovers(); + return 3; +} + +int KuhnMunkres::step3() { + int count = 0; + for (int i = 0; i < this->n; ++i) { + for (int j = 0; j < this->n; ++j) { + if (this->marked[i][j] == 1 and !this->colCovered[j]) { + this->colCovered[j] = true; + count += 1; + } + } + } + + if (count >= this->n) return 7; // done + else return 4; +} + +int KuhnMunkres::step4() { + int row = 0, col = 0, starCol = -1; + while (true) { + auto [r, c] = findAZero(row, col); + row = r, col = c; + if (row < 0) { + return 6; + } else { + this->marked[row][col] = 2; + starCol = findStarInRow(row); + if (starCol >= 0) { + col = starCol; + this->rowCovered[row] = true; + this->colCovered[col] = false; + } else { + this->Z0_r = row; + this->Z0_c = col; + return 5; + } + } + } +} + +int KuhnMunkres::step5() { + int count = 0; + this->path[count][0] = this->Z0_r; + this->path[count][1] = this->Z0_c; + while (true) { + int row = findStarInCol(this->path[count][1]); + if (row >= 0) { + count += 1; + this->path[count][0] = row; + this->path[count][1] = this->path[count - 1][1]; + + int col = findPrimeInRow(this->path[count][0]); + count += 1; + this->path[count][0] = this->path[count - 1][0]; + this->path[count][1] = col; + } else { + this->convertPath(path, count); + this->clearCovers(); + this->erasePrimes(); + return 3; + } + } +} + +int KuhnMunkres::step6() { + float minVal = findSmallest(); + int events = 0; // track actual changes to matrix + for (int i = 0; i < this->n; ++i) { + for (int j = 0; j < this->n; ++j) { + if (this->rowCovered[i]) { + this->C[i][j] += minVal; + events += 1; + } + + if (!this->colCovered[j]) { + this->C[i][j] -= minVal; + events += 1; + } + + if (this->rowCovered[i] && !this->colCovered[j]) { + events -= 2; // change reversed, no real difference + } + } + } + + if (events == 0) throw UnsolvableMatrixException(); + + return 4; +} + +float KuhnMunkres::findSmallest() const { + float minVal = __FLT_MAX__; + for (int i = 0; i < this->n; ++i) { + for (int j = 0; j < this->n; ++j) { + if (!this->rowCovered[i] && !this->colCovered[j] && minVal > this->C[i][j]) { + minVal = this->C[i][j]; + } + } + } + + return minVal; +} + +pair KuhnMunkres::findAZero(const int i0, const int j0) const { + int row = -1, col = -1; + int i = i0; + bool done = false; + + while (!done) { + int j = j0; + while (true) { + if (this->C[i][j] == 0 && !this->rowCovered[i] & !this->colCovered[j]) { + row = i; + col = j; + done = true; + } + j = (j + 1) % this->n; + if (j == j0) break; + } + i = (i + 1) % this->n; + if (i == i0) done = true; + } + + return {row, col}; +} + +int KuhnMunkres::findStarInRow(const int row) const { + for (int j = 0; j < this->n; ++j) + if (this->marked[row][j] == 1) + return j; + + return -1; +} + +int KuhnMunkres::findStarInCol(const int col) const { + for (int i = 0; i < this->n; ++i) + if (this->marked[i][col] == 1) + return i; + + return -1; +} + +int KuhnMunkres::findPrimeInRow(const int row) const { + for (int j = 0; j < this->n; ++j) + if (this->marked[row][j] == 2) + return j; + + return -1; +} + +void KuhnMunkres::convertPath(const Vec2i& path, const int count) { + for (int i = 0; i < count + 1; ++i) { + if (this->marked[path[i][0]][path[i][1]] == 1) + this->marked[path[i][0]][path[i][1]] = 0; + else + this->marked[path[i][0]][path[i][1]] = 1; + } +} + +void KuhnMunkres::clearCovers() { + for (int i = 0; i < this->n; ++i) { + this->rowCovered[i] = false; + this->colCovered[i] = false; + } +} + +void KuhnMunkres::erasePrimes() { + for (int i = 0; i < this->n; ++i) + for (int j = 0; j < this->n; ++j) + if (this->marked[i][j] == 2) + this->marked[i][j] = 0; +} + +} // namespace kuhn_munkres \ No newline at end of file diff --git a/Q01_footfall_counter/src_v2h/kuhn_munkres.h b/Q01_footfall_counter/src_v2h/kuhn_munkres.h new file mode 100755 index 0000000..e98324f --- /dev/null +++ b/Q01_footfall_counter/src_v2h/kuhn_munkres.h @@ -0,0 +1,213 @@ +/** + * @desc: Kuhn Munkres assignment algorithm + * link: + * https://brc2.com/the-algorithm-workshop/ + * https://github.com/bmc/munkres + * + * @author: lst + * @date: 10/10/2022 + */ +#pragma once + +#include +#include +#include + +namespace kuhn_munkres { + +using std::vector; +using std::pair; +using Vec1f = vector; +using Vec2f = vector; +using Vec1i = vector; +using Vec2i = vector; +using Vec1b = vector; +using InversionFunc = std::function ; + +class UnsolvableMatrixException : public std::exception { + virtual const char* + what() const _GLIBCXX_TXN_SAFE_DYN _GLIBCXX_NOTHROW { + return "Matrix cannot be solved!"; + } +}; + +class KuhnMunkres { +public: + using Ptr = std::shared_ptr; + + // variables + + // methods + KuhnMunkres(); + virtual ~KuhnMunkres(); + KuhnMunkres(const KuhnMunkres&) = delete; + KuhnMunkres& operator=(const KuhnMunkres&) = delete; + + /** + * @brief Compute the indexes for the lowest-cost pairings between rows and + * columns in the database. Returns a list of `(row, column)` tuples + * that can be used to traverse the matrix. + * **WARNING**: This code handles square and rectangular matrices. It + * does *not* handle irregular matrices. + * @param costMatrix (list of lists of numbers): The cost matrix. If this + * cost matrix is not square, it will be padded with zeros, via a call + * to `pad_matrix()`. (This method does *not* modify the caller's + * matrix. It operates on a copy of the matrix.) + * @return A list of `(row, column)` tuples that describe the lowest cost path + * through the matrix + */ + vector > compute(const Vec2f& costMatrix); + + /** + * @brief Create a cost matrix from a profit matrix by calling `inversion_function()` + * to invert each value. The inversion function must take one numeric argument + * (of any type) and return another numeric argument which is presumed to be + * the cost inverse of the original profit value. If the inversion function + * is not provided, a given cell's inverted value is calculated as + * `max(matrix) - value`. + * This is a static method. Call it like this: + * auto costMatrix = KuhnMunkres::makeCostMatrix(matrix, inversionFunc) + * For example: + * auto costmatrix = KuhnMunkres::makeCostMatrix(matrix, [](float x)->float { return 1.0f - x; }]) + * @param profitMatrix (list of lists of numbers): The matrix to convert from + * profit to cost values. + * @param func (`function`): The function to use to invert each + * entry in the profit matrix. + * @return cost matrix + */ + static Vec2f makeCostMatrix(const Vec2f& profixMatrix, InversionFunc func=nullptr); + +private: + using StepFunc = int (KuhnMunkres::*)(); + + // variables + Vec2f C; // cost matrix + Vec1b rowCovered, colCovered; + int n = 0, originalLength = 0, originalWidth = 0; + int Z0_r = 0, Z0_c = 0; + Vec2i marked, path; + + // methods + /** + * @brief Pad a possibly non-square matrix to make it square. + * @param matrix matrix to pad + * @param padValue value to use to pad the matrix + * @return a new, possibly padded, matrix + */ + static Vec2f padMatrix(const Vec2f& matrix, const float padValue=0.0); + + /** + * @brief Create an *n*x*n* matrix, populating it with the specific value. + * @param n rows and columns + * @param val value + * @return created matrix + */ + static Vec2i makeMatrix(const int &n, const int &val); + + /** + * @brief For each row of the matrix, find the smallest element and + * substract it from every element in its row. Go to Step 2. + * @return Next step. + */ + int step1(); + + /** + * @brief Find a zero (Z) in the resulting matrix. If there is no starred + * zero in its row or column, star Z. Repeat for each element in the + * matrix. Go to Step 3. + * @return Next step. + */ + int step2(); + + /** + * @brief Cover each column containing a starred zero. If K columns are + * covered, the starred zeros describe a complete set of unique + * assignments. In this case, Go to DONE, otherwise, Go to Step 4. + * @return Next step. + */ + int step3(); + + /** + * @brief Find a noncovered zero and prime it. If there is no starred zero + * in the row containing this primed zero, Go to Step 5. Otherwise, + * cover this row and uncover the column containing the starred + * zero. Continue in this manner until there are no uncovered zeros + * left. Save the smallest uncovered value and Go to Step 6. + * @return Next step. + */ + int step4(); + + /** + * @brief Construct a series of alternating primed and starred zeros as + * follows. Let Z0 represent the uncovered primed zero found in Step 4. + * Let Z1 denote the starred zero in the column of Z0 (if any). + * Let Z2 denote the primed zero in the row of Z1 (there will always + * be one). Continue until the series terminates at a primed zero + * that has no starred zero in its column. Unstar each starred zero + * of the series, star each primed zero of the series, erase all + * primes and uncover every line in the matrix. Return to Step 3 + * @return Next step. + */ + int step5(); + + /** + * @brief Add the value found in Step 4 to every element of each covered + * row, and subtract it from every element of each uncovered column. + * Return to Step 4 without altering any stars, primes, or covered + * lines. + * @return Next step. + */ + int step6(); + + /** + * @brief Find the first uncovered element with value 0 + * @return first uncovered element + */ + float findSmallest() const; + + /** + * @brief Find the first uncovered element with value 0 + * @param i0 start row + * @param j0 start column + * @return row and column index. + */ + pair findAZero(const int i0=0, const int j0=0) const; + + /** + * @brief Find the first starred element in the specified row. Returns + * the column index, or -1 if no starred element was found. + * @param row row index + * @return column index. + */ + int findStarInRow(const int row) const; + + /** + * @brief Find the first starred element in the specified row. Returns + * the row index, or -1 if no starred element was found. + * @param col column index + * @return row index. + */ + int findStarInCol(const int col) const; + + /** + * @brief Find the first prime element in the specified row. Returns + * the column index, or -1 if no starred element was found. + * @param row row index + * @return column index + */ + int findPrimeInRow(const int row) const; + + void convertPath(const Vec2i& path, const int count); + + /** + * @brief Clear all covered matrix cells + */ + void clearCovers(); + + /** + * @brief Erase all prime markings + */ + void erasePrimes(); +}; + +} // namespace kuhn_munkres \ No newline at end of file diff --git a/Q01_footfall_counter/src_v2h/main.cpp b/Q01_footfall_counter/src_v2h/main.cpp new file mode 100755 index 0000000..44690a7 --- /dev/null +++ b/Q01_footfall_counter/src_v2h/main.cpp @@ -0,0 +1,1515 @@ +/* + * Original Code (C) Copyright Edgecortix, Inc. 2022 + * Modified Code (C) Copyright Renesas Electronics Corporation 2023 + *  + * *1 DRP-AI TVM is powered by EdgeCortix MERA(TM) Compiler Framework. + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + * + */ +/*********************************************************************************************************************** +* DISCLAIMER +* This software is supplied by Renesas Electronics Corporation and is only intended for use with Renesas products. No +* other uses are authorized. This software is owned by Renesas Electronics Corporation and is protected under all +* applicable laws, including copyright laws. +* THIS SOFTWARE IS PROVIDED "AS IS" AND RENESAS MAKES NO WARRANTIES REGARDING +* THIS SOFTWARE, WHETHER EXPRESS, IMPLIED OR STATUTORY, INCLUDING BUT NOT LIMITED TO WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. ALL SUCH WARRANTIES ARE EXPRESSLY DISCLAIMED. TO THE MAXIMUM +* EXTENT PERMITTED NOT PROHIBITED BY LAW, NEITHER RENESAS ELECTRONICS CORPORATION NOR ANY OF ITS AFFILIATED COMPANIES +* SHALL BE LIABLE FOR ANY DIRECT, INDIRECT, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES FOR ANY REASON RELATED TO THIS +* SOFTWARE, EVEN IF RENESAS OR ITS AFFILIATES HAVE BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. +* Renesas reserves the right, without notice, to make changes to this software and to discontinue the availability of +* this software. By using this software, you agree to the additional terms and conditions found by accessing the +* following link: +* http://www.renesas.com/disclaimer +* +* Copyright (C) 2023 Renesas Electronics Corporation. All rights reserved. +***********************************************************************************************************************/ +/*********************************************************************************************************************** +* File Name : main.cpp +* Version : v1.00 +* Description : RZ/V2H AI SDK Sample Application for Object Detection +***********************************************************************************************************************/ + +/***************************************** +* Includes +******************************************/ +/*DRP-AI TVM[*1] Runtime*/ +#include "MeraDrpRuntimeWrapper.h" +/*Definition of Macros & other variables*/ +#include "define.h" +/*box drawing*/ +#include "box.h" +/*Double click termination*/ +#include "utils.h" +/*Wayland control*/ +#include "wayland.h" + +#include "sort.h" + +/***************************************** +* Global Variables +******************************************/ +std::map input_source_map = +{ + {"USB", 1} +}; + +/*Multithreading*/ +static sem_t terminate_req_sem; +static pthread_t ai_inf_thread; +static pthread_t capture_thread; +static pthread_t exit_thread; +static pthread_t kbhit_thread; +static std::mutex mtx; + +/*Flags*/ +static std::atomic inference_start (0); +static std::atomic img_obj_ready (0); + +/*Global Variables*/ +static float drpai_output_buf[INF_OUT_SIZE]; + +/*AI Inference for DRPAI*/ +/* DRP-AI TVM[*1] Runtime object */ +MeraDrpRuntimeWrapper runtime; + +/* Sets a flag to indicate whether a double click has been detected. */ +bool doubleClick = false; + +/* DRP_MAX_FREQ and DRPAI_FREQ are the */ +/* frequency settings for DRP-AI. */ +/* Basically use the default values */ +static int32_t drp_max_freq; +static int32_t drpai_freq; + +static float pre_time = 0; +static float post_time = 0; +static float ai_time = 0; +static float total_time = 0; +static std::vector det; + +cv::Mat yuyv_image; +cv::Mat input_image; +std::unordered_map> ini_values; +std::vector detection_object_vector; + +static cv::Mat trackerbbox = cv::Mat(0, 6, CV_32F); +static std::vector bbox; +int pointx1, pointy1, pointx2, pointy2, actual_count = 0; +std::vector polygon; +static float conf = 0; + +/* Wayland object */ +static Wayland wayland; + +/***************************************** +* Function Name : float16_to_float32 +* Description : Function by Edge cortex. Cast uint16_t a into float value. +* Arguments : a = uint16_t number +* Return value : float = float32 number +******************************************/ +float float16_to_float32(uint16_t a) +{ + return __extendXfYf2__(a); +} + +/***************************************** +* Function Name : time_difference_msec +* Description : compute the time differences in ms between two moments +* Arguments : t0 = start time +* t1 = stop time +* Return value : the time difference in ms +******************************************/ +static double time_difference_msec(struct timespec t0, struct timespec t1) +{ + return (t1.tv_sec - t0.tv_sec) * 1000.0 + (t1.tv_nsec - t0.tv_nsec) / 1000000.0; +} + +/***************************************** +* Function Name : wait_join +* Description : waits for a fixed amount of time for the thread to exit +* Arguments : p_join_thread = thread that the function waits for to Exit +* join_time = the timeout time for the thread for exiting +* Return value : 0 if successful +* not 0 otherwise +******************************************/ +static int8_t wait_join(pthread_t *p_join_thread, uint32_t join_time) +{ + int8_t ret_err; + struct timespec join_timeout; + ret_err = clock_gettime(CLOCK_REALTIME, &join_timeout); + if ( 0 == ret_err ) + { + join_timeout.tv_sec += join_time; + ret_err = pthread_timedjoin_np(*p_join_thread, NULL, &join_timeout); + } + return ret_err; +} + +/***************************************** +* Function Name : config_read +* Description : Read configuration from the config.ini file. +******************************************/ +void config_read() +{ + /*Open config.ini file*/ + std::ifstream ini_file("config.ini"); + std::string line; + std::string current_section; + /*parsing ini file*/ + while (std::getline(ini_file, line)) + { + auto comment_pos = line.find(";"); + if (comment_pos != std::string::npos) + { + line.erase(comment_pos); + } + line.erase(0, line.find_first_not_of(" \t\r\n")); + line.erase(line.find_last_not_of(" \t\r\n") + 1); + if (line.empty()) + { + continue; + } + else if (line[0] == '[') + { + current_section = line.substr(1, line.size() - 2); + } + else + { + auto delimiter_pos = line.find("="); + std::string key = line.substr(0, delimiter_pos); + std::string value = line.substr(delimiter_pos + 1); + ini_values[current_section][key] = value; + } + } + return; +} + +/***************************************** +* Function Name : load_label_file +* Description : Load label list text file and return the label list that contains the label. +* Arguments : label_file_name = filename of label list. must be in txt format +* Return value : vector list = list contains labels +* empty if error occurred +******************************************/ +std::vector load_label_file(std::string label_file_name) +{ + std::vector list = {}; + std::vector empty = {}; + std::ifstream infile(label_file_name); + + if (!infile.is_open()) + { + return list; + } + + std::string line = ""; + while (getline(infile,line)) + { + list.push_back(line); + if (infile.fail()) + { + return empty; + } + } + + return list; +} + +/***************************************** +* Function Name : get_result +* Description : Get DRP-AI Output from memory via DRP-AI Driver +* Arguments : drpai_fd = file descriptor of DRP-AI Driver +* output_addr = memory start address of DRP-AI output +* output_size = output data size +* Return value : 0 if succeeded +* not 0 otherwise +******************************************/ +int8_t get_result() +{ + int8_t ret = 0; + int32_t i = 0; + int32_t output_num = 0; + std::tuple output_buffer; + int64_t output_size; + uint32_t size_count = 0; + + /* Get the number of output of the target model. */ + output_num = runtime.GetNumOutput(); + size_count = 0; + /*GetOutput loop*/ + for (i = 0;i(output_buffer). */ + output_size = std::get<2>(output_buffer); + + /*Output Data Type = std::get<0>(output_buffer)*/ + if (InOutDataType::FLOAT16 == std::get<0>(output_buffer)) + { + /*Output Data = std::get<1>(output_buffer)*/ + uint16_t* data_ptr = reinterpret_cast(std::get<1>(output_buffer)); + for (int j = 0; j(output_buffer)) + { + /*Output Data = std::get<1>(output_buffer)*/ + float* data_ptr = reinterpret_cast(std::get<1>(output_buffer)); + for (int j = 0; j rgb_images; + cv::split(image, rgb_images); + cv::Mat m_flat_r = rgb_images[0].reshape(1, 1); + cv::Mat m_flat_g = rgb_images[1].reshape(1, 1); + cv::Mat m_flat_b = rgb_images[2].reshape(1, 1); + cv::Mat matArray[] = {m_flat_r, m_flat_g, m_flat_b}; + cv::Mat flat_image; + cv::hconcat(matArray, 3, flat_image); + return flat_image; +} + +/***************************************** +* Function Name : R_Post_Proc +* Description : Process CPU post-processing for YOLOv3 +* Arguments : floatarr = drpai output address +* Return value : - +******************************************/ +void R_Post_Proc(float* floatarr) +{ + /* Following variables are required for correct_region_boxes in Darknet implementation*/ + /* Note: This implementation refers to the "darknet detector test" */ + mtx.lock(); + float new_w, new_h; + float correct_w = 1.; + float correct_h = 1.; + if ((float) (MODEL_IN_W / correct_w) < (float) (MODEL_IN_H/correct_h) ) + { + new_w = (float) MODEL_IN_W; + new_h = correct_h * MODEL_IN_W / correct_w; + } + else + { + new_w = correct_w * MODEL_IN_H / correct_h; + new_h = MODEL_IN_H; + } + + int32_t n = 0; + int32_t b = 0; + int32_t y = 0; + int32_t x = 0; + int32_t offs = 0; + int32_t i = 0; + float tx = 0; + float ty = 0; + float tw = 0; + float th = 0; + float tc = 0; + float center_x = 0; + float center_y = 0; + float box_w = 0; + float box_h = 0; + float objectness = 0; + uint8_t num_grid = 0; + uint8_t anchor_offset = 0; + float classes[NUM_CLASS]; + float max_pred = 0; + int32_t pred_class = -1; + float probability = 0; + detection d; + /* Clear the detected result list */ + det.clear(); + + /*Post Processing Start*/ + for (n = 0; n < NUM_INF_OUT_LAYER; n++) + { + num_grid = num_grids[n]; + anchor_offset = 2 * NUM_BB * (NUM_INF_OUT_LAYER - (n + 1)); + + for(b = 0; b < NUM_BB; b++) + { + for(y = 0; y < num_grid; y++) + { + for(x = 0; x < num_grid; x++) + { + offs = yolo_offset(n, b, y, x); + tx = floatarr[offs]; + ty = floatarr[yolo_index(n, offs, 1)]; + tw = floatarr[yolo_index(n, offs, 2)]; + th = floatarr[yolo_index(n, offs, 3)]; + tc = floatarr[yolo_index(n, offs, 4)]; + /* Compute the bounding box */ + /*get_region_box*/ + center_x = ((float) x + sigmoid(tx)) / (float) num_grid; + center_y = ((float) y + sigmoid(ty)) / (float) num_grid; + box_w = (float) exp(tw) * anchors[anchor_offset+2*b+0] / (float) MODEL_IN_W; + box_h = (float) exp(th) * anchors[anchor_offset+2*b+1] / (float) MODEL_IN_W; + /* Adjustment for VGA size */ + /* correct_region_boxes */ + center_x = (center_x - (MODEL_IN_W - new_w) / 2. / MODEL_IN_W) / ((float) new_w / MODEL_IN_W); + center_y = (center_y - (MODEL_IN_H - new_h) / 2. / MODEL_IN_H) / ((float) new_h / MODEL_IN_H); + box_w *= (float) (MODEL_IN_W / new_w); + box_h *= (float) (MODEL_IN_H / new_h); + center_x = round(center_x * DRPAI_IN_WIDTH); + center_y = round(center_y * DRPAI_IN_HEIGHT); + box_w = round(box_w * DRPAI_IN_WIDTH); + box_h = round(box_h * DRPAI_IN_HEIGHT); + objectness = sigmoid(tc); + Box bb = {center_x, center_y, box_w, box_h}; + /* Get the class prediction */ + for (i = 0; i < NUM_CLASS; i++) + { + classes[i] = sigmoid(floatarr[yolo_index(n, offs, 5+i)]); + } + max_pred = 0; + pred_class = -1; + for (i = 0; i < NUM_CLASS; i++) + { + if (classes[i] > max_pred) + { + pred_class = i; + max_pred = classes[i]; + } + } + /* Store the result into the list if the probability is more than the threshold */ + probability = max_pred * objectness; + if (probability > TH_PROB) + { + d = {bb, pred_class, probability}; + det.push_back(d); + } + } + } + } + } + /* Non-Maximum Supression filter */ + filter_boxes_nms(det, det.size(), TH_NMS); + + bbox.clear(); + trackerbbox = cv::Mat(0, 6, CV_32F); + for (detection detect : det) + { + bbox_t dat; + if (detect.prob < conf) + continue; + dat.name = label_file_map[detect.c].c_str(); + if (count(detection_object_vector.begin(), detection_object_vector.end(), dat.name) == 0) + continue; + + dat.X = (int32_t)(detect.bbox.x - (detect.bbox.w / 2)); + dat.Y = (int32_t)(detect.bbox.y - (detect.bbox.h / 2)); + dat.W = (int32_t)detect.bbox.w; + dat.H = (int32_t)detect.bbox.h; + bbox.emplace_back(dat); + cv::Mat bbox = (cv::Mat_(1, 6) << dat.X, dat.Y, dat.W, dat.H, detect.prob, detect.c); + cv::vconcat(trackerbbox, bbox, trackerbbox); + } + mtx.unlock(); + return ; +} + +/***************************************** +* Function Name : check_above_or_below +* Description : check the given point is above or below the line, +* added a small value of 1e-8 to avoid division by zero +* Arguments : x = x coordinate of the point +* y = y coordinate of the point +* Return value : int location of the point w.r.t the line +******************************************/ +int check_above_or_below(int x, int y) +{ + double m = (pointy2 - pointy1) / ((pointx2 - pointx1) + 0.00000006); + double yline = m * (x - pointx1) + pointy1; + if (y > yline) + return 1; + else + return 0; +} + +/***************************************** +* Function Name : check_inside_rectangle +* Description : check the given point is inside the region defined using the polygon, +* Arguments : x = x coordinate of the point +* y = y coordinate of the point +* Return value : bool location of the point w.r.t the line +******************************************/ +bool check_inside_rectangle(int x, int y) +{ + int result = pointPolygonTest(polygon, cv::Point(x, y), false); + if (result == 1) + return 1; + else + return 0; +} + +/***************************************** +* Function Name : R_Inf_Thread +* Description : Executes the DRP-AI inference thread +* Arguments : threadid = thread identification +* Return value : - +******************************************/ +void *R_Inf_Thread(void *threadid) +{ + /*Semaphore Variable*/ + int32_t inf_sem_check = 0; + + /*Inference Variables*/ + fd_set rfds; + struct timespec tv; + int8_t inf_status = 0; + /*Variable for checking return value*/ + int8_t ret = 0; + /*Variable for Performance Measurement*/ + static struct timespec start_time; + static struct timespec inf_end_time; + static struct timespec pre_start_time; + static struct timespec pre_end_time; + static struct timespec post_start_time; + static struct timespec post_end_time; + + printf("\n[INFO] Inference Thread Starting\n"); + + /*Inference Loop Start*/ + while(1) + { + while(1) + { + /*Gets the Termination request semaphore value. If different then 1 Termination was requested*/ + /*Checks if sem_getvalue is executed without issue*/ + errno = 0; + ret = sem_getvalue(&terminate_req_sem, &inf_sem_check); + if (0 != ret) + { + fprintf(stderr, "[ERROR] Failed to get Semaphore Value: errno=%d\n", errno); + goto err; + } + /*Checks the semaphore value*/ + if (1 != inf_sem_check) + { + goto ai_inf_end; + } + /*Checks if image frame from Capture Thread is ready.*/ + if (inference_start.load()) + { + break; + } + usleep(WAIT_TIME); + } + + /*Gets Pre-process starting time*/ + ret = timespec_get(&pre_start_time, TIME_UTC); + if (0 == ret) + { + fprintf(stderr, "[ERROR] Failed to get Pre-process Start Time\n"); + goto err; + } + + cv::Size size(MODEL_IN_H, MODEL_IN_W); + cv::resize(input_image, input_image, size); + cv::cvtColor(input_image, input_image, cv::COLOR_BGR2RGB); + input_image = hwc2chw(input_image); + input_image.convertTo(input_image, CV_32FC3,1.0 / 255.0, 0); + if (!input_image.isContinuous()) + input_image = input_image.clone(); + + ret = timespec_get(&pre_end_time, TIME_UTC); + if ( 0 == ret) + { + fprintf(stderr, "[ERROR] Failed to Get Pre-process End Time\n"); + goto err; + } + + /*Set Pre-processing output to be inference input. */ + runtime.SetInput(0, input_image.ptr()); + + /*Pre-process Time Result*/ + pre_time = (float)((time_difference_msec(pre_start_time, pre_end_time))); + + /*Gets inference starting time*/ + ret = timespec_get(&start_time, TIME_UTC); + if (0 == ret) + { + fprintf(stderr, "[ERROR] Failed to get Inference Start Time\n"); + goto err; + } + + runtime.Run(); + + /*Gets AI Inference End Time*/ + ret = timespec_get(&inf_end_time, TIME_UTC); + if ( 0 == ret) + { + fprintf(stderr, "[ERROR] Failed to Get Inference End Time\n"); + goto err; + } + /*Inference Time Result*/ + ai_time = (float)((time_difference_msec(start_time, inf_end_time))); + + /*Gets Post-process starting time*/ + ret = timespec_get(&post_start_time, TIME_UTC); + if (0 == ret) + { + fprintf(stderr, "[ERROR] Failed to get Post-process Start Time\n"); + goto err; + } + /*Process to read the DRPAI output data.*/ + ret = get_result(); + if (0 != ret) + { + fprintf(stderr, "[ERROR] Failed to get result from memory.\n"); + goto err; + } + + /*CPU Post-Processing For YOLOv3*/ + R_Post_Proc(drpai_output_buf); + /*Gets Post-process End Time*/ + ret = timespec_get(&post_end_time, TIME_UTC); + if ( 0 == ret) + { + fprintf(stderr, "[ERROR] Failed to Get Post-process End Time\n"); + goto err; + } + /*Post-process Time Result*/ + post_time = (float)((time_difference_msec(post_start_time, post_end_time))); + total_time = pre_time + ai_time + post_time; + inference_start.store(0); + } + /*End of Inference Loop*/ + +/*Error Processing*/ +err: + /*Set Termination Request Semaphore to 0*/ + sem_trywait(&terminate_req_sem); + goto ai_inf_end; +/*AI Thread Termination*/ +ai_inf_end: + /*To terminate the loop in Capture Thread.*/ + printf("[INFO] AI Inference Thread Terminated\n"); + pthread_exit(NULL); +} + +/***************************************** +* Function Name : R_Kbhit_Thread +* Description : Executes the Keyboard hit thread (checks if enter key is hit) +* Arguments : threadid = thread identification +* Return value : - +******************************************/ +void *R_Kbhit_Thread(void *threadid) +{ + /*Semaphore Variable*/ + int32_t kh_sem_check = 0; + /*Variable to store the getchar() value*/ + int32_t c = 0; + /*Variable for checking return value*/ + int8_t ret = 0; + + devices dev; + printf("[INFO] Key Hit Thread Starting\n"); + + printf("\n\n************************************************\n"); + printf("* Press ENTER key to quit. *\n"); + printf("************************************************\n"); + + /*Set Standard Input to Non Blocking*/ + errno = 0; + ret = fcntl(0, F_SETFL, O_NONBLOCK); + if (-1 == ret) + { + fprintf(stderr, "[ERROR] Failed to run fctnl(): errno=%d\n", errno); + goto err; + } + + while(1) + { + /*Gets the Termination request semaphore value. If different then 1 Termination was requested*/ + /*Checks if sem_getvalue is executed without issue*/ + errno = 0; + ret = sem_getvalue(&terminate_req_sem, &kh_sem_check); + if (0 != ret) + { + fprintf(stderr, "[ERROR] Failed to get Semaphore Value: errno=%d\n", errno); + goto err; + } + /*Checks the semaphore value*/ + if (1 != kh_sem_check) + { + goto key_hit_end; + } + + c = getchar(); + if (EOF != c) + { + /* When key is pressed. */ + printf("[INFO] key Detected. !!!\n"); + goto err; + } + else + { + /* When nothing is pressed. */ + usleep(WAIT_TIME); + } + } + +/*Error Processing*/ +err: + /*Set Termination Request Semaphore to 0*/ + sem_trywait(&terminate_req_sem); + goto key_hit_end; + +key_hit_end: + printf("[INFO] Key Hit Thread Terminated\n"); + pthread_exit(NULL); +} + +/***************************************** +* Function Name : R_exit_Thread +* Description : Executes the double click exit thread +* Arguments : threadid = thread identification +* Return value : - +******************************************/ +void *R_exit_Thread(void *threadid) +{ + /*Semaphore Variable*/ + int32_t kh_sem_check = 0; + + /*Variable for checking return value*/ + int8_t ret = 0; + devices dev; + + printf("[INFO] Exit Thread Starting\n"); + /*Set Standard Input to Non Blocking*/ + errno = 0; + ret = fcntl(0, F_SETFL, O_NONBLOCK); + if (-1 == ret) + { + fprintf(stderr, "[ERROR] Failed to run fctnl(): errno=%d\n", errno); + goto err; + } + + while(1) + { + /*Gets the Termination request semaphore value. If different then 1 Termination was requested*/ + /*Checks if sem_getvalue is executed without issue*/ + errno = 0; + ret = sem_getvalue(&terminate_req_sem, &kh_sem_check); + if (0 != ret) + { + fprintf(stderr, "[ERROR] Failed to get Semaphore Value: errno=%d\n", errno); + goto err; + } + /*Checks the semaphore value*/ + if (1 != kh_sem_check) + { + goto exit_end; + } + dev.detect_mouse_click(); + if (doubleClick) + { + goto err; + } + } + +/*Error Processing*/ +err: + /*Set Termination Request Semaphore to 0*/ + sem_trywait(&terminate_req_sem); + goto exit_end; + +exit_end: + printf("[INFO] Exit Thread Terminated\n"); + pthread_exit(NULL); +} + +/***************************************** +* Function Name : R_Capture_Thread +* Description : Executes the V4L2 capture with Capture thread. +* Arguments : cap_pipeline = g-streamer pipeline +* Return value : - +******************************************/ +void *R_Capture_Thread(void *cap_pipeline) +{ + std::string &gstream = *(static_cast(cap_pipeline)); + /*Semaphore Variable*/ + int32_t capture_sem_check = 0; + int8_t ret = 0; + cv::Mat g_frame; + cv::Mat raw_frame; + cv::VideoCapture g_cap; + + printf("[INFO] Capture Thread Starting\n"); + + g_cap.open(gstream, cv::CAP_GSTREAMER); + if (!g_cap.isOpened()) + { + std::cout << "[ERROR] Error opening video stream or camera !\n" + << std::endl; + goto err; + } + /* Set camera resolution */ + /* set width */ + g_cap.set(cv::CAP_PROP_FRAME_WIDTH, 640); + /* set height */ + g_cap.set(cv::CAP_PROP_FRAME_HEIGHT, 480); + while(1) + { + /*Gets the Termination request semaphore value. If different then 1 Termination was requested*/ + /*Checks if sem_getvalue is executed without issue*/ + errno = 0; + ret = sem_getvalue(&terminate_req_sem, &capture_sem_check); + if (0 != ret) + { + fprintf(stderr, "[ERROR] Failed to get Semaphore Value: errno=%d\n", errno); + goto err; + } + /*Checks the semaphore value*/ + if (1 != capture_sem_check) + { + goto capture_end; + } + + g_cap >> g_frame; + /* Breaking the loop if no video frame is detected */ + if (g_frame.empty()) + { + std::cout << "[INFO] Video ended or corrupted frame !\n"; + goto capture_end; + } + else + { + if (!inference_start.load()) + { + + input_image = g_frame.clone(); + inference_start.store(1); /* Flag for AI Inference Thread. */ + } + + if (!img_obj_ready.load()) + { + yuyv_image = g_frame.clone(); + img_obj_ready.store(1); /* Flag for Main Thread. */ + } + } + } /*End of Loop*/ + +/*Error Processing*/ +err: + sem_trywait(&terminate_req_sem); + goto capture_end; + +capture_end: + /*To terminate the loop in AI Inference Thread.*/ + inference_start.store(1); + + printf("[INFO] Capture Thread Terminated\n"); + pthread_exit(NULL); +} + +/***************************************** + * Function Name : create_output_frame + * Description : create the output frame with space for displaying inference details + * Arguments : cv::Mat frame_g, input frame to be displayed in the background + * Return value : cv::Mat background, final display frame to be written to g-streamer pipeline + *****************************************/ +cv::Mat create_output_frame(cv::Mat frame_g) +{ + /* Create a black background image of size 1080x720 */ + cv::Mat background(DISP_OUTPUT_HEIGHT, DISP_OUTPUT_WIDTH, frame_g.type(), cv::Scalar(0, 0, 0)); + /* Resize the original image to fit within 960x720 */ + cv::Mat resizedImage; + cv::resize(frame_g, resizedImage, cv::Size(DISP_IMAGE_OUTPUT_WIDTH, DISP_IMAGE_OUTPUT_HEIGHT)); + /* Copy the resized image to the left side of the background (0 to 960) */ + cv::Rect roi(cv::Rect(0, 0, resizedImage.cols, resizedImage.rows)); + resizedImage.copyTo(background(roi)); + return background; +} + +/***************************************** +* Function Name : R_Main_Process +* Description : Runs the main process loop +* Arguments : - +* Return value : 0 if succeeded +* not 0 otherwise +******************************************/ +int8_t R_Main_Process() +{ + /*Main Process Variables*/ + int8_t main_ret = 0; + /*Semaphore Related*/ + int32_t sem_check = 0; + /*Variable for checking return value*/ + int8_t ret = 0; + /* wayland Index = 0 */ + uint32_t idx = 0; + config_read(); + uint8_t img_buf_id; + std::map id_time; + std::set unique_ids = {}; + std::map location_history; + long int infer_time_ms; + cv::Mat bgra_image; + std::stringstream stream; + std::string result_str; + int8_t region_count; + int tracker_id; + int class_id; + std::string class_name; + int8_t kmin = stoi(ini_values["tracking"]["kmin"]); + conf = stof(ini_values["tracking"]["conf"]); + /*set point 1*/ + pointx1 = stoi(ini_values["line"]["x1"]); + pointy1 = stoi(ini_values["line"]["y1"]); + /*set point 2*/ + pointx2 = stoi(ini_values["line"]["x2"]); + pointy2 = stoi(ini_values["line"]["y2"]); + + polygon = { + cv::Point(stoi(ini_values["region"]["x1"]), stoi(ini_values["region"]["y1"])), + cv::Point(stoi(ini_values["region"]["x2"]), stoi(ini_values["region"]["y2"])), + cv::Point(stoi(ini_values["region"]["x3"]), stoi(ini_values["region"]["y3"])), + cv::Point(stoi(ini_values["region"]["x4"]), stoi(ini_values["region"]["y4"])) + }; + + std::string detection_object_string = ini_values["tracking"]["objects"]; + std::stringstream detection_object_ss(detection_object_string); + std::string item; + while (std::getline(detection_object_ss, item, ',')) + { + detection_object_vector.push_back(item); + } + std::cout << "*******************Tracking/Detection Parameters*******************" << std::endl; + std::cout << "\n[INFO] Selected objects to track\n\n"; + for (const auto &item : detection_object_vector) + std::cout<< item << std::endl; + + std::string DISPLAY_TEXT = ini_values["display"]["display_text"]; + std::string DISPLAY_REGION_TEXT = ini_values["display"]["region_display_text"]; + std::string g_pipeline = "appsrc ! videoconvert ! autovideosink sync=false "; + float font_size = .9; + float font_weight = 2; + float font_size_dt = 0.75; + float font_size_bb = 0.5; + float font_weight_bb = 1; + if (DISPLAY_TEXT.size() > 20 || DISPLAY_REGION_TEXT.size() > 20) + { + font_size_dt = .45; + } + + sort::Sort::Ptr mot = std::make_shared(1, 3, 0.3f); + cv::namedWindow("Object Tracker", cv::WINDOW_NORMAL); + cv::setWindowProperty("Object Tracker", cv::WND_PROP_FULLSCREEN, cv::WINDOW_FULLSCREEN); + + /* Initialize wayland */ + ret = wayland.init(idx, IMAGE_OUTPUT_WIDTH, IMAGE_OUTPUT_HEIGHT, IMAGE_CHANNEL_BGRA); + if(0 != ret) + { + fprintf(stderr, "[ERROR] Failed to initialize Image for Wayland\n"); + goto err; + } + printf("\n[INFO] Main Loop Starts\n"); + while(1) + { + /*Gets the Termination request semaphore value. If different then 1 Termination was requested*/ + errno = 0; + ret = sem_getvalue(&terminate_req_sem, &sem_check); + if (0 != ret) + { + fprintf(stderr, "[ERROR] Failed to get Semaphore Value: errno=%d\n", errno); + goto err; + } + /*Checks the semaphore value*/ + if (1 != sem_check) + { + goto main_proc_end; + } + /* Check img_obj_ready flag which is set in Capture Thread. */ + if (img_obj_ready.load()) + { + bgra_image = yuyv_image; + infer_time_ms = total_time; + cv::Mat tracks = mot->update(trackerbbox); + region_count = 0; + /* result tracks */ + for (int i = 0; i < tracks.rows; ++i) + { + bbox_t dat; + tracker_id = int(tracks.at(i, 8)); + class_id = int(tracks.at(i, 5)); + class_name = label_file_map[class_id]; + dat.X = tracks.at(i, 0); + dat.Y = tracks.at(i, 1); + dat.W = tracks.at(i, 2); + dat.H = tracks.at(i, 3); + if (id_time.find(tracker_id) == id_time.end()) + { + dat.name = class_name + " Id : " + std::to_string(tracker_id); + } + else + { + dat.name = class_name + " Id: " + std::to_string(tracker_id) + " Time: " + std::to_string(id_time[tracker_id] / 1000); + } + int s = check_above_or_below((int)(dat.X + dat.W / 2), (int)(dat.Y + dat.H)); + if (s) + { + if (location_history.find(tracker_id) == location_history.end()) + location_history[tracker_id] = s; + else + { + if (!location_history[tracker_id]) + { + actual_count++; + } + location_history[tracker_id] = s; + } + } + else + { + if (location_history.find(tracker_id) == location_history.end()) + location_history[tracker_id] = s; + else + { + if (location_history[tracker_id]) + { + actual_count--; + if(actual_count < 0) + actual_count = 0; + } + location_history[tracker_id] = s; + } + } + bool is_in_rect = check_inside_rectangle((int)(dat.X + dat.W / 2), (int)(dat.Y + dat.H)); + if (is_in_rect) + { + region_count++; + if (id_time.find(tracker_id) == id_time.end()) + { + id_time[tracker_id] = infer_time_ms; + } + else + { + id_time[tracker_id] += infer_time_ms; + } + } + if (dat.Y < 20){ + dat.Y = 20; + } + cv::Rect rect(dat.X, dat.Y, dat.W, dat.H); + cv::rectangle(bgra_image, rect, cv::Scalar(0, 255, 0), 1.5); + font_weight_bb = 1; + font_size_bb = 0.5; + cv::Size text_size = cv::getTextSize(dat.name, cv::FONT_HERSHEY_SIMPLEX, font_size_bb, font_weight_bb, 0); + if (text_size.width > dat.W) + { + font_size_bb = 0.3; + text_size = cv::getTextSize(dat.name, cv::FONT_HERSHEY_SIMPLEX, font_size_bb, font_weight_bb, 0); + } + cv::Rect rect_text_box(dat.X, dat.Y - 20, text_size.width + 20, 20); + cv::rectangle(bgra_image, rect_text_box, cv::Scalar(0, 255, 0), cv::FILLED); + cv::putText(bgra_image, dat.name, cv::Point(dat.X + 10, dat.Y - 7), cv::FONT_HERSHEY_SIMPLEX, font_size_bb, cv::Scalar(0, 0, 0), font_size_bb, cv::LINE_AA); + } + cv::line(bgra_image, cv::Point(pointx1, pointy1), cv::Point(pointx2, pointy2), cv::Scalar(0, 0, 255), 4); + cv::polylines(bgra_image, polygon, true, cv::Scalar(0, 255, 0), 2); + bgra_image = create_output_frame(bgra_image); + cv::putText(bgra_image, "Preprocess Time : " + std::to_string(int(pre_time)), cv::Point(1500, 60), cv::FONT_HERSHEY_SIMPLEX, font_size, cv::Scalar(255, 255, 255), font_weight, cv::LINE_AA); + cv::putText(bgra_image, "AI Inference Time : " + std::to_string(int(ai_time)), cv::Point(1503, 95), cv::FONT_HERSHEY_SIMPLEX, font_size, cv::Scalar(255, 255, 255), font_weight, cv::LINE_AA); + cv::putText(bgra_image, "Postprocess Time : " + std::to_string(int(post_time)), cv::Point(1500, 127), cv::FONT_HERSHEY_SIMPLEX, font_size, cv::Scalar(255, 255, 255), font_weight, cv::LINE_AA); + cv::putText(bgra_image, DISPLAY_TEXT + " : " + std::to_string(actual_count), cv::Point(1500, 180), cv::FONT_HERSHEY_SIMPLEX, font_size_dt, cv::Scalar(255, 255, 255), font_weight, cv::LINE_AA); + cv::putText(bgra_image, DISPLAY_REGION_TEXT + " : " + std::to_string(region_count), cv::Point(1500, 210), cv::FONT_HERSHEY_SIMPLEX, font_size_dt, cv::Scalar(255, 255, 255), font_weight, cv::LINE_AA); + cv::putText(bgra_image, "Objects Detected : ", cv::Point(1500, 270), cv::FONT_HERSHEY_SIMPLEX, font_size, cv::Scalar(255, 255, 255), font_weight, cv::LINE_AA); + mtx.lock(); + for (int i = 0; i < bbox.size(); i++) + { + cv::putText(bgra_image, bbox[i].name, cv::Point(1510, (320 + (i * 30))), cv::FONT_HERSHEY_SIMPLEX, font_size, cv::Scalar(255, 255, 255), font_weight, cv::LINE_AA); + } + mtx.unlock(); + cv::cvtColor(bgra_image,bgra_image,cv::COLOR_BGR2BGRA); + /*Update Wayland*/ + wayland.commit(bgra_image.data,NULL); + img_obj_ready.store(0); + } + /*Wait for 1 TICK.*/ + usleep(WAIT_TIME); + } + +/*Error Processing*/ +err: + sem_trywait(&terminate_req_sem); + main_ret = 1; + goto main_proc_end; +/*Main Processing Termination*/ +main_proc_end: + /*To terminate the loop in Capture Thread.*/ + img_obj_ready.store(0); + printf("[INFO] Main Process Terminated\n"); + return main_ret; +} + +/***************************************** +* Function Name : get_drpai_start_addr +* Description : Get DRP-AI Memory Area Address via DRP-AI Driver +* Arguments : int drpai_fd +* Return value : drpai start address +******************************************/ +uint64_t get_drpai_start_addr(int drpai_fd) +{ + int fd = 0; + int ret = 0; + drpai_data_t drpai_data; + errno = 0; + + fd = open("/dev/drpai0", O_RDWR); + if (0 > fd ) + { + LOG(FATAL) << "[ERROR] Failed to open DRP-AI Driver : errno=" << errno; + return (uint32_t)NULL; + } + + /* Get DRP-AI Memory Area Address via DRP-AI Driver */ + ret = ioctl(fd , DRPAI_GET_DRPAI_AREA, &drpai_data); + if (-1 == ret) + { + LOG(FATAL) << "[ERROR] Failed to get DRP-AI Memory Area : errno=" << errno ; + return (uint32_t)NULL; + } + + return drpai_data.address; +} + +/***************************************** + * Function Name : query_device_status + * Description : function to check USB/MIPI device is connectod. + * Return value : media_port, media port that device is connectod. + ******************************************/ +std::string query_device_status(std::string device_type) +{ + std::string media_port = ""; + /* Linux command to be executed */ + const char* command = "v4l2-ctl --list-devices"; + /* Open a pipe to the command and execute it */ + FILE* pipe = popen(command, "r"); + if (!pipe) + { + std::cerr << "[ERROR] Unable to open the pipe." << std::endl; + return media_port; + } + /* Read the command output line by line */ + char buffer[128]; + size_t found; + while (fgets(buffer, sizeof(buffer), pipe) != nullptr) + { + std::string response = std::string(buffer); + found = response.find(device_type); + if (found != std::string::npos) + { + fgets(buffer, sizeof(buffer), pipe); + media_port = std::string(buffer); + pclose(pipe); + /* return media port*/ + return media_port; + } + } + pclose(pipe); + /* return media port*/ + return media_port; +} + +/***************************************** +* Function Name : set_drpai_freq +* Description : Function to set the DRP and DRP-AI frequency. +* Arguments : drpai_fd: DRP-AI file descriptor +* Return value : 0 if succeeded +* not 0 otherwise +******************************************/ +int set_drpai_freq(int drpai_fd) +{ + int ret = 0; + uint32_t data; + + errno = 0; + data = drp_max_freq; + ret = ioctl(drpai_fd , DRPAI_SET_DRP_MAX_FREQ, &data); + if (-1 == ret) + { + std::cerr << "[ERROR] Failed to set DRP Max Frequency : errno=" << errno << std::endl; + return -1; + } + + errno = 0; + data = drpai_freq; + ret = ioctl(drpai_fd , DRPAI_SET_DRPAI_FREQ, &data); + if (-1 == ret) + { + std::cerr << "[ERROR] Failed to set DRP-AI Frequency : errno=" << errno << std::endl; + return -1; + } + + return 0; +} + +/***************************************** +* Function Name : init_drpai +* Description : Function to initialize DRP-AI. +* Arguments : drpai_fd: DRP-AI file descriptor +* Return value : If non-zero, DRP-AI memory start address. +* 0 is failure. +******************************************/ +uint64_t init_drpai(int drpai_fd) +{ + int ret = 0; + uint64_t drpai_addr = 0; + + /*Get DRP-AI memory start address*/ + drpai_addr = get_drpai_start_addr(drpai_fd); + if (drpai_addr == 0) + { + return 0; + } + + /*Set DRP-AI frequency*/ + ret = set_drpai_freq(drpai_fd); + if (ret != 0) + { + return 0; + } + + return drpai_addr; +} + +int32_t main(int32_t argc, char * argv[]) +{ + int8_t main_proc = 0; + int8_t ret_main = 0; + int8_t ret = 0; + + /*Multithreading Variables*/ + int32_t create_thread_ai = -1; + int32_t create_thread_key = -1; + int32_t create_thread_exit = -1; + int32_t create_thread_capture = -1; + int32_t sem_create = -1; + + InOutDataType input_data_type; + bool runtime_status = false; + std::string gstreamer_pipeline; + + /*Disable OpenCV Accelerator due to the use of multithreading */ + unsigned long OCA_list[16]; + for (int i=0; i < 16; i++) OCA_list[i] = 0; + OCA_Activate( &OCA_list[0] ); + + if (argc < 2) + { + std::cout << "[ERROR] Please specify Input Source\n"; + std::cout << "[INFO] usage: ./object_tracker USB.\n"; + std::cout << "[INFO] End Application.\n"; + return -1; + } + std::string input_source = argv[1]; + switch (input_source_map[input_source]) + { + /* Input Source : USB Camera */ + case 1: + { + std::cout << "[INFO] USB CAMERA \n"; + std::string media_port = query_device_status("usb"); + gstreamer_pipeline = "v4l2src device=" + media_port + " ! videoconvert ! appsink"; + } + break; + default: + { + std::cout << "[ERROR] Invalid Input source\n"; + std::cout << "[INFO] End Application.\n"; + return -1; + } + } + /* DRP-AI Frequency Setting */ + if (3 <= argc) + drp_max_freq = atoi(argv[2]); + else + drp_max_freq = DRP_MAX_FREQ; + if (4 <= argc) + drpai_freq = atoi(argv[3]); + else + drpai_freq = DRPAI_FREQ; + std::cout<<"\n[INFO] DRP MAX FREQUENCY : "< drpai_fd) + { + std::cerr << "[ERROR] Failed to open DRP-AI Driver : errno=" << errno << std::endl; + std::cout << "[INFO] End Application.\n"; + return -1; + } + /* Set drpai mem start address */ + uint64_t drpaimem_addr_start = 1073741824; + + /*Load Label from label_list file*/ + label_file_map = load_label_file(label_list); + if (label_file_map.empty()) + { + fprintf(stderr,"[ERROR] Failed to load label file: %s\n", label_list.c_str()); + ret = -1; + goto end_main; + } + + /*Load model_dir structure and its weight to runtime object */ + drpaimem_addr_start = init_drpai(drpai_fd); + if ((uint32_t)NULL == drpaimem_addr_start) + { + fprintf(stderr, "[ERROR] Failed to get DRP-AI memory area start address.\n"); + goto end_main; + } + + runtime_status = runtime.LoadModel(model_dir, drpaimem_addr_start + DRPAI_MEM_OFFSET); + + if(!runtime_status) + { + fprintf(stderr, "[ERROR] Failed to load model.\n"); + goto end_main; + } + + /*Get input data */ + input_data_type = runtime.GetInputDataType(0); + if (InOutDataType::FLOAT32 == input_data_type) + { + /*Do nothing*/ + } + else if (InOutDataType::FLOAT16 == input_data_type) + { + fprintf(stderr, "[ERROR] Input data type : FP16.\n"); + /*If your model input data type is FP16, use std::vector for reading input data. */ + goto end_main; + } + else + { + fprintf(stderr, "[ERROR] Input data type : neither FP32 nor FP16.\n"); + goto end_main; + } + + /*Termination Request Semaphore Initialization*/ + /*Initialized value at 1.*/ + sem_create = sem_init(&terminate_req_sem, 0, 1); + if (0 != sem_create) + { + fprintf(stderr, "[ERROR] Failed to Initialize Termination Request Semaphore.\n"); + ret_main = -1; + goto end_threads; + } + + /*Create Inference Thread*/ + create_thread_ai = pthread_create(&ai_inf_thread, NULL, R_Inf_Thread, NULL); + if (0 != create_thread_ai) + { + sem_trywait(&terminate_req_sem); + fprintf(stderr, "[ERROR] Failed to create AI Inference Thread.\n"); + ret_main = -1; + goto end_threads; + } + + /* Create Capture Thread */ + create_thread_capture = pthread_create(&capture_thread, NULL, R_Capture_Thread, (void *) &gstreamer_pipeline); + if (0 != create_thread_capture) + { + sem_trywait(&terminate_req_sem); + fprintf(stderr, "[ERROR] Failed to create Capture Thread.\n"); + ret_main = -1; + goto end_threads; + } + + /* Create exit Thread */ + create_thread_exit = pthread_create(&exit_thread, NULL, R_exit_Thread, NULL); + if (0 != create_thread_exit) + { + fprintf(stderr, "[ERROR] Failed to create exit Thread.\n"); + ret_main = -1; + goto end_threads; + } + /* Detached exit thread */ + pthread_detach(exit_thread); + + /* Create Key Hit Thread */ + create_thread_key = pthread_create(&kbhit_thread, NULL, R_Kbhit_Thread, NULL); + if (0 != create_thread_key) + { + fprintf(stderr, "[ERROR] Failed to create Key Hit Thread.\n"); + ret_main = -1; + goto end_threads; + } + + /* Main Processing */ + main_proc = R_Main_Process(); + if (0 != main_proc) + { + fprintf(stderr, "[ERROR] Error during Main Process\n"); + ret_main = -1; + } + goto end_threads; + +end_threads: + if(0 == create_thread_capture) + { + ret = wait_join(&capture_thread, CAPTURE_TIMEOUT); + if (0 != ret) + { + fprintf(stderr, "[ERROR] Failed to exit Capture Thread on time.\n"); + ret_main = -1; + } + } + if (0 == create_thread_ai) + { + ret = wait_join(&ai_inf_thread, AI_THREAD_TIMEOUT); + if (0 != ret) + { + fprintf(stderr, "[ERROR] Failed to exit AI Inference Thread on time.\n"); + ret_main = -1; + } + } + if (0 == create_thread_key) + { + ret = wait_join(&kbhit_thread, EXIT_THREAD_TIMEOUT); + if (0 != ret) + { + fprintf(stderr, "[ERROR] Failed to exit Key Hit Thread on time.\n"); + ret_main = -1; + } + } + + /* Delete Terminate Request Semaphore */ + if (0 == sem_create) + { + sem_destroy(&terminate_req_sem); + } + /* Exit wayland */ + wayland.exit(); + goto end_main; + +end_main: + printf("[INFO] End Application.\n"); + return ret_main; +} diff --git a/Q01_footfall_counter/src_v2h/sort.cpp b/Q01_footfall_counter/src_v2h/sort.cpp new file mode 100755 index 0000000..85d67d2 --- /dev/null +++ b/Q01_footfall_counter/src_v2h/sort.cpp @@ -0,0 +1,145 @@ +#include "sort.h" + +using namespace sort; + +Sort::Sort(int maxAge, int minHits, float iouThresh) + : maxAge(maxAge), minHits(minHits), iouThresh(iouThresh) +{ + km = std::make_shared(); +} + + +Sort::~Sort() +{ +} + + +cv::Mat Sort::update(const cv::Mat &bboxesDet) +{ + assert(bboxesDet.rows >= 0 && bboxesDet.cols == 6); // detections, [xc, yc, w, h, score, class_id] + + cv::Mat bboxesPred(0, 6, CV_32F, cv::Scalar(0)); // predictions used in data association, [xc, yc, w, h, ...] + cv::Mat bboxesPost(0, 9, CV_32F, cv::Scalar(0)); // bounding boxes estimate, [xc, yc, w, h, score, class_id, vx, vy, tracker_id] + + // kalman bbox tracker predict + for (auto it = trackers.begin(); it != trackers.end();) + { + cv::Mat bboxPred = (*it)->predict(); // Mat(1, 4) + if (isAnyNan(bboxPred)) + trackers.erase(it); // remove the NAN value and corresponding tracker + else{ + cv::hconcat(bboxPred, cv::Mat(1, 2, CV_32F,cv::Scalar(0)), bboxPred); // Mat(1, 6) + cv::vconcat(bboxesPred, bboxPred, bboxesPred); // Mat(N, 6) + ++it; + } + } + + TypeAssociate asTuple = dataAssociate(bboxesDet, bboxesPred); + TypeMatchedPairs matchedDetPred = std::get<0>(asTuple); + TypeLostDets lostDets = std::get<1>(asTuple); + TypeLostPreds lostPreds = std::get<2>(asTuple); + + // update matched trackers with assigned detections + for (auto pair : matchedDetPred) + { + int detInd = pair.first; + int predInd = pair.second; + cv::Mat bboxPost = trackers[predInd]->update(bboxesDet.rowRange(detInd, detInd + 1)); + + if (trackers[predInd]->getHitStreak() >= minHits) + { + float score = bboxesDet.at(detInd, 4); + int classId = bboxesDet.at(detInd, 5); + float dx = trackers[predInd]->getState().at(4, 0); + float dy = trackers[predInd]->getState().at(5, 0); + int trackerId = trackers[predInd]->getFilterId(); + cv::Mat tailData = (cv::Mat_(1, 5) << score, classId, dx, dy, trackerId); + cv::hconcat(bboxPost, tailData, bboxPost); + cv::vconcat(bboxesPost, bboxPost, bboxesPost); // Mat(N, 9) + } + } + + // remove dead trackers + trackers.erase( + std::remove_if(trackers.begin(), trackers.end(), + [&](const KalmanBoxTracker::Ptr& kbt)->bool { + return kbt->getTimeSinceUpdate() > maxAge; + }), + trackers.end() + ); + + // create and initialize new trackers for unmatched detections + for (int lostInd : lostDets) + { + cv::Mat lostBbox = bboxesDet.rowRange(lostInd, lostInd + 1); + trackers.push_back(make_shared(lostBbox)); + } + + return bboxesPost; +} + + +TypeAssociate Sort::dataAssociate(const cv::Mat& bboxesDet, const cv::Mat& bboxesPred) +{ + TypeMatchedPairs matchedDetPred; + TypeLostDets lostDets; + TypeLostPreds lostPreds; + + // initialize + for (int i = 0; i < bboxesDet.rows; ++i) + lostDets.push_back(i); // size M + for (int j = 0; j < bboxesPred.rows; ++j) + lostPreds.push_back(j); // size N + + // nothing detected or predicted + if (bboxesDet.rows == 0 || bboxesPred.rows == 0) + return make_tuple(matchedDetPred, lostDets, lostPreds); + + // compute IoU matrix + cv::Mat iouMat = getIouMatrix(bboxesDet, bboxesPred); // Mat(M, N) + + // Kuhn Munkres assignment algorithm + Vec2f costMatrix(iouMat.rows, Vec1f(iouMat.cols, 0.0f)); + for (int i = 0; i < iouMat.rows; ++i) + for (int j = 0; j < iouMat.cols; ++j) + costMatrix[i][j] = 1.0f - iouMat.at(i, j); + auto indices = km->compute(costMatrix); + + // find matched pairs and lost detect and predict + for (auto [detInd, predInd] : indices) { + matchedDetPred.push_back({detInd, predInd}); + lostDets.erase(remove(lostDets.begin(), lostDets.end(), detInd), lostDets.end()); + lostPreds.erase(remove(lostPreds.begin(), lostPreds.end(), predInd), lostPreds.end()); + } + + return make_tuple(matchedDetPred, lostDets, lostPreds); +} + + +cv::Mat Sort::getIouMatrix(const cv::Mat& bboxesA, const cv::Mat& bboxesB) +{ + assert(bboxesA.cols >= 4 && bboxesB.cols >= 4); + int numA = bboxesA.rows; + int numB = bboxesB.rows; + cv::Mat iouMat(numA, numB, CV_32F, cv::Scalar(0.0)); + + cv::Rect re1, re2; + for (int i = 0; i < numA; ++i) + { + for (int j = 0; j < numB; ++j) + { + re1.x = bboxesA.at(i, 0) - bboxesA.at(i, 2) / 2.0; + re1.y = bboxesA.at(i, 1) - bboxesA.at(i, 3) / 2.0; + re1.width = bboxesA.at(i, 2); + re1.height = bboxesA.at(i, 3); + re2.x = bboxesB.at(j, 0) - bboxesB.at(j, 2) / 2.0; + re2.y = bboxesB.at(j, 1) - bboxesB.at(j, 3) / 2.0; + re2.width = bboxesB.at(j, 2); + re2.height = bboxesB.at(j, 3); + + iouMat.at(i, j) = (re1 & re2).area() / ((re1 | re2).area() + FLT_EPSILON); + } + } + + return iouMat; +} \ No newline at end of file diff --git a/Q01_footfall_counter/src_v2h/sort.h b/Q01_footfall_counter/src_v2h/sort.h new file mode 100755 index 0000000..f6e1dd4 --- /dev/null +++ b/Q01_footfall_counter/src_v2h/sort.h @@ -0,0 +1,88 @@ +/** + * @desc: C++ implementation of SORT. + * Bewley Alex "Simple, online, and realtime tracking of multiple objects in a video sequence", + * http://arxiv.org/abs/1602.00763, 2016. + * + * @author: lst + * @date: 12/10/2021 + */ +#pragma once + +#include +#include "kuhn_munkres.h" +#include "kalman_box_tracker.h" + +namespace sort{ + using std::shared_ptr; + using std::vector; + using std::pair; + using std::tuple; + using std::make_tuple; + using std::make_shared; + using kuhn_munkres::KuhnMunkres; + using kuhn_munkres::Vec2f; + using kuhn_munkres::Vec1f; + + using TypeMatchedPairs = vector >; // first: detected id, second: predicted id + using TypeLostDets = vector; + using TypeLostPreds = vector; + using TypeAssociate = tuple; + + class Sort + { + // variables + public: + using Ptr = std::shared_ptr; + private: + int maxAge; // tracker's maximal unmatch count + int minHits; // tracker's minimal match count + float iouThresh; // IoU threshold + vector trackers; + KuhnMunkres::Ptr km = nullptr; + + // methods + public: + Sort(int maxAge=1, int minHits=3, float iouThresh=0.3); + virtual ~Sort(); + Sort(const Sort&) = delete; + Sort& operator=(const Sort&) = delete; + + /** + * @brief bbox tracking in SORT, this method must be called once for each frame even with empty detections, + * the number of objects retured may differ from the number of detections provided. + * @param bboxesDet detections, Mat(M, 6) with the format [[xc,yc,w,h,score,class_id];[...];...] + * @return matched bboxes, Mat(N, 9) with the format [[xc,yc,w,h,score,class_id,dx,dy,tracker_id];[...];...]. + */ + cv::Mat update(const cv::Mat &bboxesDet); + private: + /** + * @brief check if NAN value in Mat + * @param mat input Matrix + * @return any NAN value in Matrix or not. + */ + template + static bool isAnyNan(const cv::Mat& mat) + { + for (auto it = mat.begin<_Tp>(); it != mat.end<_Tp>(); ++it) + if (*it != *it) return true; + return false; + } + + /** + * @brief data associate in SORT + * @param bboxesDet detected bboxes, Mat(M, 4+) + * @param bboxesPred predicted bboxes, Mat(N, 4+) + * @return associate tuple (matched pairs, lost detections, lost predictions) + */ + TypeAssociate dataAssociate(const cv::Mat& bboxesDet, const cv::Mat& bboxesPred); + + /** + * @brief IoU of bboxes + * @param bboxesA input bboxes A, Mat(M, 4+) + * @param bboxesB another input bboxes B, Mat(N, 4+) + * @return M x N matrix, value(i, j) means IoU of A(i) and B(j) + */ + static cv::Mat getIouMatrix(const cv::Mat& bboxesA, const cv::Mat& bboxesB); + }; +} + diff --git a/Q01_footfall_counter/src_v2h/toolchain/runtime.cmake b/Q01_footfall_counter/src_v2h/toolchain/runtime.cmake new file mode 100755 index 0000000..10300ce --- /dev/null +++ b/Q01_footfall_counter/src_v2h/toolchain/runtime.cmake @@ -0,0 +1,14 @@ +set(CMAKE_SYSTEM_NAME Linux) +set(CMAKE_SYSTEM_PROCESSOR aarch64) +set(MERA_DRP_RUNTIME ON) +set(DCMAKE_SYSTEM_VERSION 1) + +set(CMAKE_SYSROOT $ENV{SDK}/sysroots/aarch64-poky-linux) +set(CMAKE_FIND_ROOT_PATH $ENV{SDK}/sysroots/aarch64-poky-linux/usr/include/gnu) +set(CMAKE_CXX_COMPILER $ENV{SDK}/sysroots/x86_64-pokysdk-linux/usr/bin/aarch64-poky-linux/aarch64-poky-linux-g++) +set(CMAKE_C_COMPILER $ENV{SDK}/sysroots/x86_64-pokysdk-linux/usr/bin/aarch64-poky-linux/aarch64-poky-linux-gcc) + +set(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER) +set(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY) +set(CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY) +set(CMAKE_FIND_ROOT_PATH_MODE_PACKAGE ONLY) diff --git a/Q01_footfall_counter/src_v2h/utils.cpp b/Q01_footfall_counter/src_v2h/utils.cpp new file mode 100755 index 0000000..f0cdc3f --- /dev/null +++ b/Q01_footfall_counter/src_v2h/utils.cpp @@ -0,0 +1,189 @@ +#include "utils.h" + +extern bool doubleClick; +/***************************************** + * Function Name : detect_mouse_click + * Description : function to detect mouse click. + * Return value : return 1 if Error opening input device, + * return 0 if mouse click detected. + ******************************************/ +int devices::detect_mouse_click(void) +{ + int32_t c = 0; + /* Gets the full path to the mouse event device. */ + std::string full_path = find_mouse_event(); + /* Converts the full path to a const char pointer. */ + const char* device_path = full_path.c_str(); + /* Opens the mouse event device file. */ + int fd = open(device_path, O_RDONLY); + if (fd < 0) + { + std::cerr << "Error opening input device" << std::endl; + return 1; + } + /* Enters a loop to read mouse events. */ + while (true) + { + struct input_event ev; + /* Reads a mouse event from the device file. */ + ssize_t bytesRead = read(fd, &ev, sizeof(struct input_event)); + /* Checks if the event is a key event and if the key code is for the left or right mouse button. */ + if (bytesRead == sizeof(struct input_event) && ev.type == EV_KEY && + (ev.code == BTN_LEFT || ev.code == BTN_RIGHT)) + { + /* Checks if the key is being pressed (value == 1) or released (value == 0). */ + if (ev.value == 0) + { + if(first_click == true) + { + t2 = std::chrono::high_resolution_clock::now(); + duration = std::chrono::duration_cast(t2 - t1).count(); + first_click = false; + } + if(second_click == true) + { + t4 = std::chrono::high_resolution_clock::now(); + duration_sc = std::chrono::duration_cast(t4 - t3).count(); + second_click = false; + } + /* If the second click has occurred and the duration between clicks is less than 110 milliseconds, + then a double click has been detected. */ + if(duration_cd < tm_clk_difference && duration_cd != 0 && duration < tm_difference + && duration != 0 && duration_sc < tm_difference && duration_sc != 0) + { + std::cout<<"\n[INFO] Double tap !!!!\n"; + duration = 0; + duration_cd = 0; + duration_sc = 0; + doubleClick = true; + break; + } + } + else if (ev.value == 1) + { + t1 = std::chrono::high_resolution_clock::now(); + if(first_click == false && duration < tm_difference && duration != 0) + { + t3 = std::chrono::high_resolution_clock::now(); + duration = 0; + second_click = true; + } + if(second_click == true) + duration_cd = std::chrono::duration_cast(t3 - t2).count(); + first_click = true; + } + } + } + /* Closes the mouse event device file. */ + close(fd); + /* Returns 0 if no double click was detected. */ + return 0; +} +/***************************************** + * Function Name : find_mouse_event + * Description : function to find which mouse event is mapped with mouse button. + * Return value : return fullPath(string) that is mapped with mouse button event. + ***************************detect_mouse_click***************/ +std::string devices::find_mouse_event(void) +{ + /* flags to detect valid path */ + bool valid_path = false; + /* string variable to get valid path */ + std::string fullPath ; + /* Path to the directory containing all input devices. */ + const char* inputDirPath = "/dev/input/"; + /* Opens the input directory.*/ + DIR* dir = opendir(inputDirPath); + if (!dir) + { + std::cerr << "Error opening input directory" << std::endl; + exit(1); + } + /* declare a vector array to store all events mapped in input device*/ + std::vector mouse_events; + /* list input device command */ + FILE* pipe = popen("ls /dev/input/", "r"); + if (!pipe) + std::cerr << "Error executing command." << std::endl; + char buffer[128]; + while (fgets(buffer, sizeof(buffer), pipe) != NULL) + mouse_events.push_back(buffer); + pclose(pipe); + /* check which event mapped with mouse */ + for(size_t i = 0;i < mouse_events.size();i++) + { + + std::string mapped_events = mouse_events[i]; + mapped_events.erase(std::remove_if(mapped_events.begin(),mapped_events.end(),::isspace),mapped_events.end()); + fullPath = inputDirPath + std::string(mapped_events); + int fd = open(fullPath.c_str(), O_RDONLY | O_NONBLOCK); + if (fd < 0) + continue; + /* Checks if the input device has relative axis (e.g., a mouse). */ + if (ioctl(fd, EVIOCGBIT(0, EV_MAX), sizeof(long)*EV_MAX)) + { + unsigned long evBits[EV_MAX]; + ioctl(fd, EVIOCGBIT(0, EV_MAX), evBits); + /* Returns the path to the device if the EV_REL bit is set (i.e., the device has relative axis). */ + if (evBits[EV_REL / BITS_PER_LONG] & (1 << (EV_REL % BITS_PER_LONG))) + { + close(fd); + closedir(dir); + valid_path = true; + break; + } + } + /* Closes the input device file. */ + close(fd); + } + /* return valid mouse event path */ + if(valid_path == true) + return fullPath; + else + { + /* Closes the input directory. */ + closedir(dir); + /* Prints an error message and exits the program if no mouse device is found. */ + std::cerr << "\n[INFO] No mouse device found!!" << std::endl; + std::cout << "[INFO] Application End\n"; + exit(1); + } +} + +/***************************************** + * Function Name : query_device_status + * Description : function to check USB/MIPI device is connected. + * Return value : media_port, media port that device is connected. + ******************************************/ +std::string devices::query_device_status(std::string device_type) +{ + std::string media_port = ""; + /* Linux command to be executed */ + const char* command = "v4l2-ctl --list-devices"; + /* Open a pipe to the command and execute it */ + FILE* pipe = popen(command, "r"); + if (!pipe) + { + std::cerr << "[ERROR] Unable to open the pipe." << std::endl; + return media_port; + } + /* Read the command output line by line */ + char buffer[128]; + size_t found; + while (fgets(buffer, sizeof(buffer), pipe) != nullptr) + { + std::string response = std::string(buffer); + found = response.find(device_type); + if (found != std::string::npos) + { + fgets(buffer, sizeof(buffer), pipe); + media_port = std::string(buffer); + pclose(pipe); + /* return media port*/ + return media_port; + } + } + pclose(pipe); + /* return media port*/ + return media_port; +} \ No newline at end of file diff --git a/Q01_footfall_counter/src_v2h/utils.h b/Q01_footfall_counter/src_v2h/utils.h new file mode 100755 index 0000000..faf0747 --- /dev/null +++ b/Q01_footfall_counter/src_v2h/utils.h @@ -0,0 +1,71 @@ +#pragma once +/* Prevents the header file from being included multiple times. */ +#ifndef MOUSE +#define MOUSE + +/* Includes necessary header files. */ +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + + +/* Defines constants that are used in the code. */ +#ifndef BITS_PER_LONG +#define BITS_PER_LONG (sizeof(long) * 8) +#endif + +#ifndef EV_REL +#define EV_REL 0x02 +#endif + +#ifndef EV_MAX +#define EV_MAX 0x1F +#endif + +constexpr int kNumColors = 32; +constexpr int kMaxCoastCycles = 1; +constexpr int kMinHits = 3; +constexpr float kMinConfidence = 0.45; + +/* Declares a class called devices. */ +class devices +{ + public: + /* Initializes variables to track the duration between mouse clicks. */ + std::chrono::high_resolution_clock::time_point t1; + std::chrono::high_resolution_clock::time_point t2; + std::chrono::high_resolution_clock::time_point t3; + std::chrono::high_resolution_clock::time_point t4; + + int duration = 0; + int duration_sc = 0; + int duration_cd = 0; + + /* Mouse button up and down time difference */ + const int tm_difference = 200; + /* Mouse button first click and second click time difference */ + const int tm_clk_difference = 190; + + bool last_click = false; + bool first_click = false; + bool second_click = false; + + /* Detects mouse clicks. */ + int detect_mouse_click(void); + /* Finds the path to the mouse event device. */ + std::string find_mouse_event(void); + /* function to check USB/MIPI device is connected. */ + std::string query_device_status(std::string device_type); +}; + +#endif \ No newline at end of file diff --git a/Q01_footfall_counter/src_v2h/wayland.cpp b/Q01_footfall_counter/src_v2h/wayland.cpp new file mode 100755 index 0000000..4c1ee93 --- /dev/null +++ b/Q01_footfall_counter/src_v2h/wayland.cpp @@ -0,0 +1,494 @@ +/*********************************************************************************************************************** + * Copyright (C) 2023 Renesas Electronics Corporation. All rights reserved. + ***********************************************************************************************************************/ +/*********************************************************************************************************************** + * File Name : wayland.cpp + * Version : 0.90 + * Description : RZ/V2H DRP-AI Sample Application for Megvii-Base Detection YOLOX with MIPI/USB Camera + ***********************************************************************************************************************/ + +/***************************************** + * Includes + ******************************************/ +#include "define.h" +#include "wayland.h" +#include +#include +#include +#include +#include +#include +#include +#include + + +struct WaylandGlobals { + struct wl_compositor* compositor; + struct wl_shell* shell; +}; + +/***************************************** + * Function Name : registry_global + * Description : wl_registry_listener callback + * wayland func bind. + * Arguments : data = The third argument of wl_registry_add_listener() is notified. + * regisry = The first argument of wl_registry_add_listener() is notified. + * name = global object ID is notified. + * interface = interface name is notifed. + * version = interface version is notified. + * Return value : - + ******************************************/ +static void registry_global(void *data, + struct wl_registry *registry, uint32_t id, + const char *interface, uint32_t version) +{ + struct WaylandGlobals* globals = (struct WaylandGlobals*)data; + if (strcmp(interface, "wl_compositor") == 0) { + globals->compositor = (struct wl_compositor*)wl_registry_bind(registry, id, &wl_compositor_interface, 1); + } + else if (strcmp(interface, "wl_shell") == 0) { + globals->shell = (struct wl_shell*)wl_registry_bind(registry, id, &wl_shell_interface, 1); + } +} + +/* registry callback for listener */ +static const struct wl_registry_listener registry_listener = { registry_global, NULL }; + +/***************************************** + * Function Name : shell_surface_ping + * Description : wl_shell_surface_listener callback + * compositer check hungup + * Arguments : data = The third argument of wl_shell_surface_add_listener() is notified. + * shell_surface = The first argument of wl_shell_surface_add_listener() is notified. + * serial = Identification ID is notified. + * Return value : - + ******************************************/ +static void shell_surface_ping(void *data, + struct wl_shell_surface *shell_surface, + uint32_t serial) +{ + wl_shell_surface_pong(shell_surface, serial); +} + +static const struct wl_shell_surface_listener shell_surface_listener = +{ + .ping = shell_surface_ping, +}; + +Wayland::Wayland() +{ +} + +Wayland::~Wayland() +{ +} + +/***************************************** + * Function Name : LoadShader + * Description : Return the loaded and compiled shader + * Arguments : type + * shaderSrc + * Return value : 0 if Success + * not 0 otherwise + ******************************************/ +GLuint Wayland::LoadShader(GLenum type, const char* shaderSrc) +{ + GLuint shader = glCreateShader(type); + assert(shader); + + glShaderSource(shader, 1, &shaderSrc, NULL); + glCompileShader(shader); + + GLint compiled; + glGetShaderiv(shader, GL_COMPILE_STATUS, &compiled); + assert(compiled); + + return shader; +} + +/***************************************** + * Function Name : initProgramObject + * Description : Initialize the shaders and return the program object + * Arguments : pShader + * Return value : 0 if Success + * not 0 otherwise + ******************************************/ +GLuint Wayland::initProgramObject(SShader* pShader) +{ + const char* vshader = R"( + attribute vec4 position; + attribute vec2 texcoord; + varying vec2 texcoordVarying; + void main() { + gl_Position = position; + texcoordVarying = texcoord; + } + )"; + + const char* fshader = R"( + precision mediump float; + uniform sampler2D texture; + varying vec2 texcoordVarying; + void main() { + highp float r = texture2D(texture, texcoordVarying).b; + highp float g = texture2D(texture, texcoordVarying).g; + highp float b = texture2D(texture, texcoordVarying).r; + highp float a = texture2D(texture, texcoordVarying).a; + gl_FragColor = vec4(r,g,b,a); + } + + )"; + + GLuint vertexShader = LoadShader(GL_VERTEX_SHADER, vshader); + GLuint fragmentShader = LoadShader(GL_FRAGMENT_SHADER, fshader); + + GLuint programObject = glCreateProgram(); + assert(programObject); + + glAttachShader(programObject, vertexShader); + glAttachShader(programObject, fragmentShader); + + glLinkProgram(programObject); + + GLint linked; + glGetProgramiv(programObject, GL_LINK_STATUS, &linked); + assert(linked); + + glDeleteShader(fragmentShader); + glDeleteShader(vertexShader); + + pShader->unProgram = programObject; + pShader->nAttrPos = glGetAttribLocation(pShader->unProgram, "position"); + pShader->nAttrColor = glGetAttribLocation(pShader->unProgram, "texcoord"); + return programObject; +} + +/***************************************** + * Function Name : initEGLDisplay + * Description : Configure EGL and return necessary resources + * Arguments : nativeDisplay + * nativeWindow + * eglDisplay + * eglSurface + * Return value : 0 if Success + * not 0 otherwise + ******************************************/ +static int8_t initEGLDisplay(EGLNativeDisplayType nativeDisplay, EGLNativeWindowType nativeWindow, EGLDisplay* eglDisplay, EGLSurface* eglSurface) +{ +// int8_t ret = 0; + + EGLint number_of_config; + EGLint config_attribs[] = { + EGL_SURFACE_TYPE, EGL_WINDOW_BIT, + EGL_RED_SIZE, 8, + EGL_GREEN_SIZE, 8, + EGL_BLUE_SIZE, 8, + EGL_ALPHA_SIZE, 8, + EGL_RENDERABLE_TYPE, EGL_OPENGL_ES2_BIT, + EGL_NONE + }; + + static const EGLint context_attribs[] = { + EGL_CONTEXT_CLIENT_VERSION, 2, + EGL_NONE + }; + + *eglDisplay = eglGetDisplay(nativeDisplay); + if (*eglDisplay == EGL_NO_DISPLAY) + { + return -1; + } + + EGLBoolean initialized = eglInitialize(*eglDisplay, NULL, NULL); + if (initialized != EGL_TRUE) + { + return -1; + } + + EGLConfig configs[1]; + + EGLBoolean config = eglChooseConfig(*eglDisplay, config_attribs, configs, 1, &number_of_config); + if (config != EGL_TRUE) + { + return -1; + } + + EGLContext eglContext = eglCreateContext(*eglDisplay, configs[0], EGL_NO_CONTEXT, context_attribs); + + *eglSurface = eglCreateWindowSurface(*eglDisplay, configs[0], nativeWindow, NULL); + if (*eglSurface == EGL_NO_SURFACE) + { + return -1; + } + + EGLBoolean makeCurrent = eglMakeCurrent(*eglDisplay, *eglSurface, *eglSurface, eglContext); + if (makeCurrent != EGL_TRUE) + { + return -1; + } + return 0; +} + + +/***************************************** + * Function Name : initWaylandDisplay + * Description : Connect to the Wayland display and return the display and the surface + * Arguments : wlDisplay + * wlSurface + * Return value : 0 if Success + * not 0 otherwise + ******************************************/ +static int8_t initWaylandDisplay(struct wl_display** wlDisplay, struct wl_surface** wlSurface) +{ +// int8_t ret = 0; + struct WaylandGlobals globals = { 0 }; + + *wlDisplay = wl_display_connect(NULL); + if(*wlDisplay == NULL) + { + return -1; + } + + struct wl_registry* registry = wl_display_get_registry(*wlDisplay); + wl_registry_add_listener(registry, ®istry_listener, (void*)&globals); + + wl_display_dispatch(*wlDisplay); + wl_display_roundtrip(*wlDisplay); + if (globals.compositor == NULL || globals.shell == NULL) + { + return -1; + } + + *wlSurface = wl_compositor_create_surface(globals.compositor); + if (*wlSurface == NULL) + { + return -1; + } + + struct wl_shell_surface* shellSurface = wl_shell_get_shell_surface(globals.shell, *wlSurface); + wl_shell_surface_set_toplevel(shellSurface); + return 0; +} + +/***************************************** + * Function Name : initWindow + * Description : Connect Wayland and make EGL + * Arguments : width + * height + * wlDisplay + * eglDisplay + * eglSurface + * Return value : 0 if Success + * not 0 otherwise + ******************************************/ +static int8_t initWindow(GLint width, GLint height, struct wl_display** wlDisplay, EGLDisplay* eglDisplay, EGLSurface* eglSurface) +{ + int8_t ret = 0; + struct wl_surface* wlSurface; + ret = initWaylandDisplay(wlDisplay, &wlSurface); + if (ret != 0) + { + return -1; + } + + struct wl_egl_window* wlEglWindow = wl_egl_window_create(wlSurface, width, height); + if (wlEglWindow == NULL) + { + return -1; + } + + ret = initEGLDisplay((EGLNativeDisplayType)*wlDisplay, (EGLNativeWindowType)wlEglWindow, eglDisplay, eglSurface); + if (ret != 0) + { + return -1; + } + return 0; +} + +/***************************************** + * Function Name : init + * Description : wayland client init + * create buffer. + * Arguments : idx = index of the display buffer + * w = width + * c = color + * Return value : 0 if Success + * not 0 otherwise + ******************************************/ +uint8_t Wayland::init(uint32_t idx, uint32_t w, uint32_t h, uint32_t c, bool overlay) +{ + int8_t ret = 0; + img_w = w; + img_h = h; + img_c = c; + img_overlay = overlay; + + // Connect Wayland and make EGL + ret = initWindow(w, h, &display, &eglDisplay, &eglSurface); + if (ret != 0) + { + return -1; + } + + // Initialize the shaders and return the program object + GLuint programObject = initProgramObject(&sShader); + if (programObject == 0) + { + return -1; + } + + // Apply program object + glUseProgram(sShader.unProgram); + glGenTextures(2, textures); + + glEnableVertexAttribArray(sShader.nAttrPos); + glEnableVertexAttribArray(sShader.nAttrColor); + + // enable Alpha Blending + if (img_overlay == true){ + glEnable(GL_BLEND); + glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA); + } + + glUniform1i(glGetUniformLocation(sShader.unProgram, "texture"), 0); + + return 0; +} + +/***************************************** + * Function Name : exit + * Description : Exit Wayland + * Arguments : - + * Return value : 0 if Success + * not 0 otherwise + ******************************************/ +uint8_t Wayland::exit() +{ + SShader* pShader = &sShader; + if (pShader) { + glDeleteProgram(pShader->unProgram); + pShader->unProgram = 0; + pShader->nAttrPos = -1; + pShader->nAttrColor = -1; + } + wl_display_disconnect(display); + return 0; +} + + +/***************************************** + * Function Name : render + * Description : + * Arguments : pShader + * texID + * Return value : 0 if Success + * not 0 otherwise + ******************************************/ +uint8_t Wayland::render(SShader* pShader, GLuint texID) +{ + const float vertices[] = { + -1.0f, 1.0f, 0.0f, + -1.0f, -1.0f, 0.0f, + 1.0f, 1.0f, 0.0f, + 1.0f, -1.0f, 0.0f + }; + + const float texcoords[] = { + 0.0f, 0.0f, + 0.0f, 1.0f, + 1.0f, 0.0f, + 1.0f, 1.0f }; + + + glVertexAttribPointer(pShader->nAttrColor, 2, GL_FLOAT, GL_FALSE, 0, texcoords); + glVertexAttribPointer(pShader->nAttrPos, 3, GL_FLOAT, GL_FALSE, 0, vertices); + + // draw texture + glActiveTexture(GL_TEXTURE0); + glBindTexture(GL_TEXTURE_2D, texID); + //glUniform1i(uniID, texID); + glDrawArrays(GL_TRIANGLE_STRIP, 0, 4); + return 0; +} + + +/***************************************** + * Function Name : setupTexture + * Description : Bind Texture + * Arguments : texID + * src_pixels + * Return value : 0 if Success + * not 0 otherwise + ******************************************/ +uint8_t Wayland::setupTexture(GLuint texID, uint8_t* src_pixels) +{ + glPixelStorei(GL_UNPACK_ALIGNMENT, 1); + glBindTexture(GL_TEXTURE_2D, texID); + glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, img_w, img_h, 0, GL_RGBA, GL_UNSIGNED_BYTE, src_pixels); + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR); + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE); + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE); + return 0; +} + + +/***************************************** + * Function Name : commit + * Description : Commit to update the display image + * Arguments : buf_id = buffer id + * Return value : 0 if Success + * not 0 otherwise + ******************************************/ +uint8_t Wayland::commit(uint8_t* cam_buffer, uint8_t* ol_buffer) +{ + uint8_t ret = 0; +#ifdef DEBUG_TIME_FLG + using namespace std; + chrono::system_clock::time_point start, end; + double time = 0; + start = chrono::system_clock::now(); +#endif // DEBUG_TIME_FLG + + // setup texture + setupTexture(textures[0], cam_buffer); + if (ol_buffer != NULL && img_overlay == true) { + setupTexture(textures[1], ol_buffer); + } +#ifdef DEBUG_TIME_FLG + end = chrono::system_clock::now(); + time = static_cast(chrono::duration_cast(end - start).count() / 1000.0); + printf("Setup Image Time : %lf[ms]\n", time); +#endif // DEBUG_TIME_FLG + + // clear + glClearColor(0.0f, 0.0f, 0.0f, 1.0f); + glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); + +#ifdef DEBUG_TIME_FLG + start = chrono::system_clock::now(); +#endif // DEBUG_TIME_FLG + + // render + render(&sShader, textures[0]); + if (ol_buffer != NULL && img_overlay == true) { + render(&sShader, textures[1]); + } +#ifdef DEBUG_TIME_FLG + end = chrono::system_clock::now(); + time = static_cast(chrono::duration_cast(end - start).count() / 1000.0); + printf("Specifies Render Time : %lf[ms]\n", time); + start = chrono::system_clock::now(); +#endif // DEBUG_TIME_FLG + + eglSwapBuffers(eglDisplay, eglSurface); + +#ifdef DEBUG_TIME_FLG + end = chrono::system_clock::now(); + time = static_cast(chrono::duration_cast(end - start).count() / 1000.0); + printf("Update Frame Time : %lf[ms]\n", time); +#endif // DEBUG_TIME_FLG + + return ret; +} + diff --git a/Q01_footfall_counter/src_v2h/wayland.h b/Q01_footfall_counter/src_v2h/wayland.h new file mode 100755 index 0000000..df5e34f --- /dev/null +++ b/Q01_footfall_counter/src_v2h/wayland.h @@ -0,0 +1,62 @@ +/*********************************************************************************************************************** +* Copyright (C) 2023 Renesas Electronics Corporation. All rights reserved. +***********************************************************************************************************************/ +/*********************************************************************************************************************** +* File Name : wayland.h +* Version : 0.90 +* Description : RZ/V2H DRP-AI Sample Application for Megvii-Base Detection YOLOX with MIPI/USB Camera +***********************************************************************************************************************/ + +#ifndef WAYLAND_H +#define WAYLAND_H + + +#include "define.h" +#include +#include +#include +#include + +class Wayland +{ + /* structure of Shader settings */ + typedef struct _SShader { + GLuint unProgram; + GLint nAttrPos; + GLint nAttrColor; + } SShader; + + public: + Wayland(); + ~Wayland(); + + uint8_t init(uint32_t idx, uint32_t w, uint32_t h, uint32_t c, bool overlay = false); + uint8_t exit(); + uint8_t commit(uint8_t* cam_buffer, uint8_t* ol_buffer); + + struct wl_compositor *compositor = NULL; + struct wl_shm *shm = NULL; + struct wl_shell *shell = NULL; + + private: + uint32_t img_h; + uint32_t img_w; + uint32_t img_c; + bool img_overlay; + + struct wl_display *display = NULL; + struct wl_surface *surface; + struct wl_shell_surface *shell_surface; + struct wl_registry *registry = NULL; + EGLDisplay eglDisplay; + EGLSurface eglSurface; + SShader sShader; + GLuint textures[2]; + + GLuint LoadShader(GLenum type, const char* shaderSrc); + GLuint initProgramObject(SShader* pShader); + uint8_t render(SShader* pShader, GLuint texID); + uint8_t setupTexture(GLuint texID, uint8_t* src_pixels); +}; + +#endif diff --git a/Q08_object_counter/README.md b/Q08_object_counter/README.md new file mode 100755 index 0000000..8c6d7b4 --- /dev/null +++ b/Q08_object_counter/README.md @@ -0,0 +1,471 @@ +# Object Counter Application + +## Application: Overview +The Object Counter Application is a user-friendly and efficient generic software tool that can be used to create custom counting applications for any scenario. This application uses the advanced YOLOV3/Tiny YOLOv3 algorithm to identify and count objects in images or videos. + +### Use Cases +The Generic Counter Application is a powerful tool that can be used to count objects in a variety of settings, including: + +- **Animal Counting**: The application can be fine tuned to count the animals only. This application can be used for zoo or farm monitoring, also could be used to prevent the road hazards due to animal interference. The list of animals on which the AI model is trained is available in [animal_class.txt](./exe/animal/animal_class.txt) + +- **Vehicle Counting**: The application can be fine tuned to count the vehicle instances per frame. This application can then be used for traffic monitoring at government/corporate buildings.The list of vehicles on which the AI model is trained is available in [vehicle_class.txt](./exe/vehicle/vehicle_class.txt) + +- **General Counting**: The general counting applications can be used to count any type of object, from people and cars to inventory and products. They are often used in businesses to track customer traffic, inventory levels, and employee productivity. The list of objects on which the AI model is trained is available in [coco_class.txt](./exe/coco/coco_class.txt) + +The other use cases could be: + +- **Manufacturing**: The application can be used to count parts on a production line or to measure the output of a machine. +- **Retail**: The application can be used to count products on a shelf or to track the number of customers in a store. +- **Safety**: The application can be used to count people in a crowd or to monitor the traffic flow in a city. + +### Key Features +Here are some of the key features of the Generic Counter Application: + +- **Automatic Object Detection**: + The application utilizes YOLOv3/Tiny YOLOv3 model for detection, identifying and localizing objects specified within the provided frame. +- **Flexible**: + The application can be customized to meet the specific needs of any counting scenario. +- **Customizable Settings**: + Users can adjust the detection and classification parameters by using the config file provided in the repository. + +It has following camera input modes. +- Using MIPI Camera +- Using USB Camera + +Users can select detection target from following list +- Animal +- Vehicle +- General (COCO dataset) + +### Supported Product +- RZ/V2L Evaluation Board Kit (RZ/V2L EVK) +- RZ/V2H Evaluation Board Kit (RZ/V2H EVK) + +### Demo +Following is the demo for RZ/V2L EVK. + + +## Application: Requirements + +### Hardware Requirements + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ForEquipmentDetails
RZ/V2LRZ/V2L EVKEvaluation Board Kit for RZ/V2L.
Includes followings. +
    +
  • + MIPI Camera Module(Google Coral Camera)
    + Used as a camera input source. +
  • +
  • MicroUSB to Serial Cable for serial communication.
  • +
+
AC AdapterUSB Power Delivery adapter for the board power supply.
MicroHDMI CableUsed to connect the HDMI Monitor and the board.
+ RZ/V2L EVK has microHDMI port.
RZ/V2HRZ/V2H EVKEvaluation Board Kit for RZ/V2H.
AC AdapterUSB Power Delivery adapter for the board power supply.
+ 100W is required.
HDMI CableUsed to connect the HDMI Monitor and the board.
+ RZ/V2H EVK has HDMI port.
USB CameraUsed as a camera input source.
CommonUSB Cable Type-CConnect AC adapter and the board.
HDMI MonitorUsed to display the graphics of the board.
microSD cardUsed as the filesystem.
+ Must have over 4GB capacity of blank space.
+ Operating Environment: Transcend UHS-I microSD 300S 16GB
Linux PCUsed to build application and setup microSD card.
+ Operating Environment: Ubuntu 20.04
SD card readerUsed for setting up microSD card.
USB HubUsed to connect USB Keyboard and USB Mouse to the board.
USB KeyboardUsed to type strings on the terminal of board.
USB MouseUsed to operate the mouse on the screen of board.
+ +>**Note:** All external devices will be attached to the board and does not require any driver installation (Plug n Play Type) + +Connect the hardware as shown below. + +|RZ/V2L EVK | RZ/V2H EVK | +|:---|:---| +|| | + +>**Note 1:** When using the keyboard connected to RZ/V Evaluation Board, the keyboard layout and language are fixed to English. +**Note 2:** For RZ/V2H EVK, there are USB 2.0 and USB 3.0 ports. +USB camera needs to be connected to appropriate port based on its requirement. + + +## Application: Build Stage + +>**Note:** User can skip to the [next stage (deploy)](#application-deploy-stage) if they do not want to build the application. +All pre-built binaries are provided. + +### Prerequisites +This section expects the user to have completed Step 5 of [Getting Started Guide](https://renesas-rz.github.io/rzv_ai_sdk/latest/getting_started.html) provided by Renesas. + +After completion of the guide, the user is expected of following things. +- AI SDK setup is done. +- Following docker container is running on the host machine. + |Board | Docker container | + |:---|:---| + |RZ/V2L EVK|`rzv2l_ai_sdk_container` | + |RZ/V2H EVK|`rzv2h_ai_sdk_container` | + + >**Note:** Docker environment is required for building the sample application. + + +### Application File Generation +1. On your host machine, copy the repository from the GitHub to the desired location. + 1. It is recommended to copy/clone the repository on the `data` folder, which is mounted on the Docker container. + ```sh + cd /data + git clone https://github.com/renesas-rz/rzv_ai_sdk.git + ``` + >Note: This command will download the whole repository, which include all other applications. + If you have already downloaded the repository of the same version, you may not need to run this command. + +2. Run (or start) the docker container and open the bash terminal on the container. +E.g., for RZ/V2L, use the `rzv2l_ai_sdk_container` as the name of container created from `rzv2l_ai_sdk_image` docker image. + > Note that all the build steps/commands listed below are executed on the docker container bash terminal. + +3. Set your clone directory to the environment variable. + ```sh + export PROJECT_PATH=/drp-ai_tvm/data/rzv_ai_sdk + ``` +3. Go to the application source code directory. + ```sh + cd ${PROJECT_PATH}/Q08_object_counter/ + ``` + |Board | `SRC_DIR` | + |:---|:---| + |RZ/V2L EVK|`src` | + |RZ/V2H EVK|`src_v2h` | + +4. Create and move to the `build` directory. + ```sh + mkdir -p build && cd build + `````` +5. Build the application by following the commands below. + ```sh + cmake -DCMAKE_TOOLCHAIN_FILE=./toolchain/runtime.cmake .. + make -j$(nproc) + ``` +6. The following application file would be generated in the `${PROJECT_PATH}/Q08_object_counter//build` directory + - object_counter + + +## Application: Deploy Stage +### Prerequisites +This section expects the user to have completed Step 7-1 of [Getting Started Guide](https://renesas-rz.github.io/rzv_ai_sdk/latest/getting_started.html#step7) provided by Renesas. + +After completion of the guide, the user is expected of following things. +- microSD card setup is done. + +### File Configuration +For the ease of deployment all the deployable files and folders are provided in following folders. +|Board | `EXE_DIR` | +|:---|:---| +|RZ/V2L EVK|[exe_v2l](./exe_v2l) | +|RZ/V2H EVK|[exe_v2h](./exe_v2h) | + +Each folder contains following items. +|File | Details | +|:---|:---| +|coco/tinyyolov3_onnx | **[RZ/V2L only]** Model object files for Coco Detection | +|coco/yolov3_onnx | **[RZ/V2H only]** Model object files for Coco Detection | +|coco/coco_class.txt | Label list for Coco Detection | +|coco/config.ini | User input model config object | +|animal/animal_onnx | Model object files for Animal Detection | +|animal/animal_class.txt | Label list for Animal Detection | +|animal/config.ini | User input model config object | +|vehicle/vehicle_onnx | Model object files for Vehicle Detection | +|vehicle/vehicle_class.txt | Label list for Vehicle Detection | +|vehicle/config.ini | User input model config object | +|app_conf.ini | User input application config object | +|object_counter | Application file | + + +### Instruction +1. **[For RZ/V2H only]** Run following commands to download the necessary file. +Replace each variable according to your board. + ```sh + cd /data/Q08_object_counter/ + wget / + ``` + | Target | `EXE_PATH` |`SO_FILE` |`URL` | + |:---|:---|:---|:---| + |Animal|[exe_v2h/animal/animal_onnx](./exe_v2h/animal/animal_onnx) |`Q08_object_counter_animal_deploy_tvm_v2h-v210.so` |[Release v3.00](https://github.com/renesas-rz/rzv_ai_sdk/releases/tag/v3.00/) | + |Vehicle|[exe_v2h/vehicle/vehicle_onnx](./exe_v2h/vehicle/vehicle_onnx) |`Q08_object_counter_vehicle_deploy_tvm_v2h-v210.so` |[Release v3.00](https://github.com/renesas-rz/rzv_ai_sdk/releases/tag/v3.00/) | + |COCO|[exe_v2h/coco/yolov3_onnx](./exe_v2h/coco/yolov3_onnx) |`Q08_object_counter_coco_deploy_tvm_v2h-v210.so` |[Release v3.00](https://github.com/renesas-rz/rzv_ai_sdk/releases/tag/v3.00/) | + + - E.g., for Animal counting, use following commands. + ```sh + cd /data/Q08_object_counter/exe_v2h/animal/animal_onnx + wget https://github.com/renesas-rz/rzv_ai_sdk/releases/download/v3.00/Q08_object_counter_animal_deploy_tvm_v2h-v210.so + ``` +2. **[For RZ/V2H only]** Rename the `Q08_object_counter_deploy_*.so` to `deploy.so`. + ```sh + mv deploy.so + ``` +3. Copy the following files to the `/home/root/tvm` directory of the rootfs (SD Card) for the board. + |File | Details | + |:---|:---| + |All files in `EXE_DIR` directory | Including `deploy.so` file. | + |`object_counter` application file | Generated the file according to [Application File Generation](#application-file-generation) | + +4. Check if `libtvm_runtime.so` exists under `/usr/lib64` directory of the rootfs (SD card) on the board. + +5. Folder structure in the rootfs (SD Card) would look like: + ```sh + ├── usr/ + │ └── lib64/ + │ └── libtvm_runtime.so + └── home/ + └── root/ + └── tvm/ + ├── coco/ + │ ├── tinyyolov3_onnx/ #RZ/V2L only + │ │ ├── deploy.json #RZ/V2L only + │ │ ├── deploy.params #RZ/V2L only + │ │ ├── deploy.so #RZ/V2L only + │ │ └── preprocess/ #RZ/V2L only + │ │ + │ ├── yolov3_onnx/ #RZ/V2H only + │ │ ├── deploy.json #RZ/V2H only + │ │ ├── deploy.params #RZ/V2H only + │ │ └── deploy.so #RZ/V2H only + │ │ + │ ├── coco_class.txt + │ └── config.ini + ├── animal/ + │ ├── animal_onnx/ + │ │ ├── deploy.json + │ │ ├── deploy.params + │ │ └── deploy.so + │ │ + │ ├── animal_class.txt + │ └── config.ini + ├── vehicle/ + │ ├── vehicle_onnx/ + │ │ ├── deploy.json + │ │ ├── deploy.params + │ │ └── deploy.so + │ │ + │ ├── vehicle_class.txt + │ └── config.ini + │ + ├── app_conf.ini + └── object_counter + ``` +>**Note:** The directory name could be anything instead of `tvm`. If you copy the whole `EXE_DIR` folder on the board, you are not required to rename it `tvm`. + +## Application: Run Stage + +### Prerequisites +This section expects the user to have completed Step 7-3 of [Getting Started Guide](https://renesas-rz.github.io/rzv_ai_sdk/latest/getting_started.html#step7-3) provided by Renesas. + +After completion of the guide, the user is expected of following things. +- The board setup is done. +- The board is booted with microSD card, which contains the application file. + +### Instruction +1. On Board terminal, go to the `tvm` directory of the rootfs. + ```sh + cd /home/root/tvm/ + ``` + +2. Change the values in `app_conf.ini` as per the requirements. Detailed explanation of the `app_conf.ini` file is given at [below section](#explanation-of-the-app_confini-file). + + ```sh + vi app_conf.ini + ``` + +3. Run the application. + ```sh + ./object_counter + ``` + - mode options + |Value |Description | + |-------|-------------------------------------| + |COCO | Detects coco objects listed | + |animal | Detects animals listed | + |vehicle| Detects automobiles listed | + + >**Note:** The mode will be the section name in app_conf.ini file. + + - camera options + |Value|Description | + |-----|---------------------------------------| + | | MIPI camera as input **[RZ/V2L only]**| + |USB | USB camera as input | + + For example, to run in "animal" mode with a USB camera, write the following command. + ```sh + ./object_counter animal USB + ``` + +3. Following window shows up on HDMI screen. + + |RZ/V2L EVK (COCO) | RZ/V2H EVK (COCO) | + |:---|:---| + || | + + On application window, following information is displayed. + - Camera capture + - AI result + - Processing time + + +4. To terminate the application, switch the application window to the terminal by using `Super(windows key)+Tab` and press ENTER key on the terminal of the board. + + +## Application: Configuration +### AI Model +#### RZ/V2L EVK +- TinyYOLOv3: [Darknet](https://pjreddie.com/darknet/yolo/) +Dataset: [COCO](https://cocodataset.org/#home) +Input size: 1x3x416x416 +Output1 size: 1x13x13x255 +Output2 size: 1x26x26x255 + +#### RZ/V2H EVK +- YOLOv3: [Darknet](https://pjreddie.com/darknet/yolo/) +Dataset: [COCO](https://cocodataset.org/#home) +Input size: 1x3x416x416 +Output1 size: 1x13x13x255 +Output2 size: 1x26x26x255 +Output3 size: 1x52x52x255 + +### Dataset + +| Model | Dataset | Description | +|---|---|---| +| coco | [Dataset Link](https://cocodataset.org/#download) | Dataset used is the same as mentioned in the research paper | +| animal | [Dataset Link](https://huggingface.co/datasets/myyyyw/NTLNP) | Dataset of wildlife in the mixed coniferous broad-leaved forest | +| vehicle | [Site](https://universe.roboflow.com/) | Combined multiple sources for different classes from the given site. Sources used are listed in below table | + +| Class | Dataset for RZ/V2L EVK | Dataset for RZ/V2H EVK | +|---|---|---| +| motorcycle | [Dataset](https://universe.roboflow.com/vehicle-mscoco/vehicles-coco) | [Dataset](https://universe.roboflow.com/vehicle-mscoco/vehicles-coco) | +| bus | [Dataset 1](https://universe.roboflow.com/titu/bus-jm7t3), [Dataset 2](https://universe.roboflow.com/final-year-project-shhpl/bus-detection-2wlyo), [Dataset 3](https://universe.roboflow.com/fyp-object-detection-tc8af/sya-bus) | [Dataset 1](https://universe.roboflow.com/titu/bus-jm7t3), [Dataset 2](https://universe.roboflow.com/final-year-project-shhpl/bus-detection-2wlyo), [Dataset 3](https://universe.roboflow.com/fyp-object-detection-tc8af/sya-bus)| +| car | [Dataset 1](https://universe.roboflow.com/hungdk-t8jb0/nhandienxeoto-udgcp), [Dataset 2](https://universe.roboflow.com/project-fjp7n/car-detection-vwdhg) | [Dataset 1](https://universe.roboflow.com/hungdk-t8jb0/nhandienxeoto-udgcp), [Dataset 2](https://universe.roboflow.com/project-fjp7n/car-detection-vwdhg) | +| policecar | [Dataset 1](https://universe.roboflow.com/fyp-tc-idn2o/police-cars-sumfm), [Dataset 2](https://universe.roboflow.com/maryam-mahmood-6hoeq/pol-tslhg) | [Dataset 1](https://universe.roboflow.com/fyp-tc-idn2o/police-cars-sumfm), [Dataset 2](https://universe.roboflow.com/maryam-mahmood-6hoeq/pol-tslhg)| +| ambulance | [Dataset 1](https://universe.roboflow.com/ambulance-k0z3x/ambulance-detection-azspv), [Dataset 2](https://universe.roboflow.com/school-87zwx/emegency-vehicle-detection) | [Dataset 1](https://universe.roboflow.com/ambulance-k0z3x/ambulance-detection-azspv), [Dataset 2](https://universe.roboflow.com/school-87zwx/emegency-vehicle-detection)| +| truck | [Dataset 1](https://universe.roboflow.com/project-school-ulsua/truck-detection-g88di), [Dataset 2](https://universe.roboflow.com/kmec/truck-detection-vka5s) |[Dataset 1](https://universe.roboflow.com/project-school-ulsua/truck-detection-g88di), [Dataset 2](https://universe.roboflow.com/kmec/truck-detection-vka5s) | +| bicycle | [Dataset 1](https://universe.roboflow.com/vtc-ywqwf/tt-aio6y), [Dataset 2](https://universe.roboflow.com/north-south-university-faox7/bicycle-bdti6) |[Dataset 1](https://universe.roboflow.com/vtc-ywqwf/tt-aio6y), [Dataset 2](https://universe.roboflow.com/north-south-university-faox7/bicycle-bdti6), [Dataset 3](https://cocodataset.org/#download) | +| bike | [Dataset 1](https://universe.roboflow.com/subham-bhansali-fedah/bike-detection-tzvlj), [Dataset 2](https://universe.roboflow.com/fyp-object-detection-tc8af/sya-bike) | [Dataset 1](https://universe.roboflow.com/subham-bhansali-fedah/bike-detection-tzvlj), [Dataset 2](https://universe.roboflow.com/fyp-object-detection-tc8af/sya-bike)| +| Auto | [Dataset 1](https://universe.roboflow.com/rutviknirma/smart-traffic-management-system), [Dataset 2](https://universe.roboflow.com/graduation-project-rtgrc/tuk-tuk-labelling) | [Dataset 1](https://universe.roboflow.com/rutviknirma/smart-traffic-management-system), [Dataset 2](https://universe.roboflow.com/graduation-project-rtgrc/tuk-tuk-labelling) | +| LCV | [Dataset 1](https://universe.roboflow.com/project-final-ltl6m/vehicle-detection-inlat), [Dataset 2](https://universe.roboflow.com/pooja-shri-v/lcvs-zqmsu) | [Dataset 1](https://universe.roboflow.com/project-final-ltl6m/vehicle-detection-inlat), [Dataset 2](https://universe.roboflow.com/pooja-shri-v/lcvs-zqmsu) | +| Fire engine | [Dataset 1](https://universe.roboflow.com/grad-project-tjt2u/fire-truck-xumw3), [Dataset 2](https://universe.roboflow.com/pouria-maleki/firetruck) | [Dataset 1](https://universe.roboflow.com/grad-project-tjt2u/fire-truck-xumw3), [Dataset 2](https://universe.roboflow.com/pouria-maleki/firetruck) | + +>Note: Link for motorcycle dateset has additional classes bus, car and truck which is also used for training + + +### AI inference time +|Board | AI model | AI inference time| +|:---|:---|:---| +|RZ/V2L EVK|Tiny YOLOv3| Approximately 170ms | +|RZ/V2H EVK |YOLOv3 | Approximately 40ms | + +### Processing + +|Processing | RZ/V2L EVK | RZ/V2H EVK | +|:---|:---|:---| +|Pre-processing | Processed by DRP-AI.
| Processed by CPU.
| +|Inference | Processed by DRP-AI and CPU. | Processed by DRP-AI and CPU. | +|Post-processing | Processed by CPU. |Processed by CPU. | + + +### Image buffer size + +|Board | Camera capture buffer size|HDMI output buffer size| +|:---|:---|:---| +|RZ/V2L EVK| VGA (640x480) in YUYV format | FHD (1920x1080) in BGRA format | +|RZ/V2H EVK | VGA (640x480) in YUYV format | FHD (1920x1080) in BGRA format | + +### Explanation of the `app_conf.ini` file + +- The section name can be of your choice. And it will be the mode name. + +- The section should contains three variables - `model_path`, `label_path` & `config_path`. + + - The `model_path` value is the path to the folder containing compiled model. The folder should also contains also contain preprocess folder. + + - The `label_path` value is the path to the label list the model supports. + + - The `config_path` value is the path to the model configuration ini file. Detailed explanation of the `config.ini` file is given at below section. + +### Explanation of the `config.ini` file + +- The [**detect**] section contains three variables - `conf`, `anchors` & `objects`. + + - The `conf` value is the confidence threshold used for object detection, + - The `anchors` are the yolo anchors for the object detection. + - The `objects` represents class to be identified and it can be changed to other classes present on the class label list. + +- To modify the configuration settings, edit the values in this file using VI Editor. + +```sh +vi config.ini +``` + +## Reference +- For RZ/V2H EVK, this application supports USB camera only with 640x480 resolution. +FHD resolution is supported by e-CAM22_CURZH camera (MIPI). +Please refer to following URL for how to change camera input to MIPI camera. +[https://renesas-rz.github.io/rzv_ai_sdk/latest/about-applications](https://renesas-rz.github.io/rzv_ai_sdk/latest/about-applications#mipi). diff --git a/Q08_object_counter/exe/animal/animal_class.txt b/Q08_object_counter/exe_v2h/animal/animal_class.txt old mode 100644 new mode 100755 similarity index 100% rename from Q08_object_counter/exe/animal/animal_class.txt rename to Q08_object_counter/exe_v2h/animal/animal_class.txt diff --git a/Q08_object_counter/exe_v2h/animal/animal_onnx/deploy.json b/Q08_object_counter/exe_v2h/animal/animal_onnx/deploy.json new file mode 100755 index 0000000..2c13800 --- /dev/null +++ b/Q08_object_counter/exe_v2h/animal/animal_onnx/deploy.json @@ -0,0 +1,76 @@ +{ + "nodes": [ + { + "op": "null", + "name": "input1", + "inputs": [] + }, + { + "op": "tvm_op", + "name": "tvmgen_default_tvmgen_default_tvmgen_default_mera_drp_main_0", + "attrs": { + "flatten_data": "0", + "func_name": "tvmgen_default_tvmgen_default_tvmgen_default_mera_drp_main_0", + "hash": "e050f4a9fc692759", + "num_outputs": "3", + "num_inputs": "1", + "global_symbol": "tvmgen_default_tvmgen_default_tvmgen_default_mera_drp_main_0", + "Compiler": "mera_drp" + }, + "inputs": [ + [ + 0, + 0, + 0 + ] + ] + } + ], + "arg_nodes": [0], + "heads": [ + [ + 1, + 0, + 0 + ], + [ + 1, + 1, + 0 + ], + [ + 1, + 2, + 0 + ] + ], + "attrs": { + "dltype": [ + "list_str", + [ + "float32", + "float16", + "float16", + "float16" + ] + ], + "device_index": [ + "list_int", + [1, 1, 1, 1] + ], + "storage_id": [ + "list_int", + [0, 1, 2, 3] + ], + "shape": [ + "list_shape", + [ + [1, 3, 416, 416], + [1, 54, 13, 13], + [1, 54, 26, 26], + [1, 54, 52, 52] + ] + ] + }, + "node_row_ptr": [0, 1, 4] +} \ No newline at end of file diff --git a/Q08_object_counter/exe_v2h/animal/animal_onnx/deploy.params b/Q08_object_counter/exe_v2h/animal/animal_onnx/deploy.params new file mode 100755 index 0000000..1011def Binary files /dev/null and b/Q08_object_counter/exe_v2h/animal/animal_onnx/deploy.params differ diff --git a/Q08_object_counter/exe_v2h/animal/config.ini b/Q08_object_counter/exe_v2h/animal/config.ini new file mode 100755 index 0000000..a42bdd0 --- /dev/null +++ b/Q08_object_counter/exe_v2h/animal/config.ini @@ -0,0 +1,6 @@ +; Configuration File + +[detect] +conf=0.1; +anchors=10,13,16,30,33,23,30,61,62,45,59,119,116,90,156,198,373,326; +objects=bear,boar,bird,cat,cow,deer,dog,fox,horse,rabbit,raccoon,monkey,sheep; diff --git a/Q08_object_counter/exe_v2h/app_conf.ini b/Q08_object_counter/exe_v2h/app_conf.ini new file mode 100755 index 0000000..94cc1a5 --- /dev/null +++ b/Q08_object_counter/exe_v2h/app_conf.ini @@ -0,0 +1,19 @@ +; Configuration File + +[COCO] + +model_path=./coco/yolov3_onnx; +label_path=./coco/coco_class.txt; +config_path=./coco/config.ini; + +[animal] + +model_path=./animal/animal_onnx; +label_path=./animal/animal_class.txt; +config_path=./animal/config.ini; + +[vehicle] + +model_path=./vehicle/vehicle_onnx; +config_path=./vehicle/config.ini; +label_path=./vehicle/vehicle_class.txt; \ No newline at end of file diff --git a/Q08_object_counter/exe/coco/coco_class.txt b/Q08_object_counter/exe_v2h/coco/coco_class.txt old mode 100644 new mode 100755 similarity index 100% rename from Q08_object_counter/exe/coco/coco_class.txt rename to Q08_object_counter/exe_v2h/coco/coco_class.txt diff --git a/Q08_object_counter/exe_v2h/coco/config.ini b/Q08_object_counter/exe_v2h/coco/config.ini new file mode 100755 index 0000000..f520276 --- /dev/null +++ b/Q08_object_counter/exe_v2h/coco/config.ini @@ -0,0 +1,8 @@ +; Configuration File + + +[detect] + +conf=0.5; +anchors=10,13,16,30,33,23,30,61,62,45,59,119,116,90,156,198,373,326; +objects=person,car,backpack,umbrella,fork,kite,tie,tennis racket,person,dog,laptop,mouse,remote,keyboard,cell phone; diff --git a/Q08_object_counter/exe_v2h/coco/yolov3_onnx/deploy.json b/Q08_object_counter/exe_v2h/coco/yolov3_onnx/deploy.json new file mode 100755 index 0000000..62b6854 --- /dev/null +++ b/Q08_object_counter/exe_v2h/coco/yolov3_onnx/deploy.json @@ -0,0 +1,76 @@ +{ + "nodes": [ + { + "op": "null", + "name": "input1", + "inputs": [] + }, + { + "op": "tvm_op", + "name": "tvmgen_default_tvmgen_default_tvmgen_default_mera_drp_main_0", + "attrs": { + "flatten_data": "0", + "func_name": "tvmgen_default_tvmgen_default_tvmgen_default_mera_drp_main_0", + "hash": "036f4701453dc3f3", + "num_outputs": "3", + "num_inputs": "1", + "global_symbol": "tvmgen_default_tvmgen_default_tvmgen_default_mera_drp_main_0", + "Compiler": "mera_drp" + }, + "inputs": [ + [ + 0, + 0, + 0 + ] + ] + } + ], + "arg_nodes": [0], + "heads": [ + [ + 1, + 0, + 0 + ], + [ + 1, + 1, + 0 + ], + [ + 1, + 2, + 0 + ] + ], + "attrs": { + "dltype": [ + "list_str", + [ + "float32", + "float16", + "float16", + "float16" + ] + ], + "device_index": [ + "list_int", + [1, 1, 1, 1] + ], + "storage_id": [ + "list_int", + [0, 1, 2, 3] + ], + "shape": [ + "list_shape", + [ + [1, 3, 416, 416], + [1, 255, 13, 13], + [1, 255, 26, 26], + [1, 255, 52, 52] + ] + ] + }, + "node_row_ptr": [0, 1, 4] +} \ No newline at end of file diff --git a/Q08_object_counter/exe_v2h/coco/yolov3_onnx/deploy.params b/Q08_object_counter/exe_v2h/coco/yolov3_onnx/deploy.params new file mode 100755 index 0000000..1011def Binary files /dev/null and b/Q08_object_counter/exe_v2h/coco/yolov3_onnx/deploy.params differ diff --git a/Q08_object_counter/exe_v2h/object_counter b/Q08_object_counter/exe_v2h/object_counter new file mode 100755 index 0000000..dd39d1f Binary files /dev/null and b/Q08_object_counter/exe_v2h/object_counter differ diff --git a/Q08_object_counter/exe_v2h/vehicle/config.ini b/Q08_object_counter/exe_v2h/vehicle/config.ini new file mode 100755 index 0000000..6a142fa --- /dev/null +++ b/Q08_object_counter/exe_v2h/vehicle/config.ini @@ -0,0 +1,6 @@ +; Configuration File +[detect] + +conf=0.5; +anchors=10,13,16,30,33,23,30,61,62,45,59,119,116,90,156,198,373,326; +objects=bus,car,motorcycle,truck,ambulance,Fire truck,LCV,Policecar,bicycle,automobile; diff --git a/Q08_object_counter/exe/vehicle/vehicle_class.txt b/Q08_object_counter/exe_v2h/vehicle/vehicle_class.txt old mode 100644 new mode 100755 similarity index 100% rename from Q08_object_counter/exe/vehicle/vehicle_class.txt rename to Q08_object_counter/exe_v2h/vehicle/vehicle_class.txt diff --git a/Q08_object_counter/exe_v2h/vehicle/vehicle_onnx/deploy.json b/Q08_object_counter/exe_v2h/vehicle/vehicle_onnx/deploy.json new file mode 100755 index 0000000..bbff683 --- /dev/null +++ b/Q08_object_counter/exe_v2h/vehicle/vehicle_onnx/deploy.json @@ -0,0 +1,76 @@ +{ + "nodes": [ + { + "op": "null", + "name": "input1", + "inputs": [] + }, + { + "op": "tvm_op", + "name": "tvmgen_default_tvmgen_default_tvmgen_default_mera_drp_main_0", + "attrs": { + "flatten_data": "0", + "func_name": "tvmgen_default_tvmgen_default_tvmgen_default_mera_drp_main_0", + "hash": "b4b8d690567be805", + "num_outputs": "3", + "num_inputs": "1", + "global_symbol": "tvmgen_default_tvmgen_default_tvmgen_default_mera_drp_main_0", + "Compiler": "mera_drp" + }, + "inputs": [ + [ + 0, + 0, + 0 + ] + ] + } + ], + "arg_nodes": [0], + "heads": [ + [ + 1, + 0, + 0 + ], + [ + 1, + 1, + 0 + ], + [ + 1, + 2, + 0 + ] + ], + "attrs": { + "dltype": [ + "list_str", + [ + "float32", + "float16", + "float16", + "float16" + ] + ], + "device_index": [ + "list_int", + [1, 1, 1, 1] + ], + "storage_id": [ + "list_int", + [0, 1, 2, 3] + ], + "shape": [ + "list_shape", + [ + [1, 3, 416, 416], + [1, 45, 13, 13], + [1, 45, 26, 26], + [1, 45, 52, 52] + ] + ] + }, + "node_row_ptr": [0, 1, 4] +} \ No newline at end of file diff --git a/Q08_object_counter/exe_v2h/vehicle/vehicle_onnx/deploy.params b/Q08_object_counter/exe_v2h/vehicle/vehicle_onnx/deploy.params new file mode 100755 index 0000000..1011def Binary files /dev/null and b/Q08_object_counter/exe_v2h/vehicle/vehicle_onnx/deploy.params differ diff --git a/Q08_object_counter/exe_v2l/animal/animal_class.txt b/Q08_object_counter/exe_v2l/animal/animal_class.txt new file mode 100644 index 0000000..9980a29 --- /dev/null +++ b/Q08_object_counter/exe_v2l/animal/animal_class.txt @@ -0,0 +1,13 @@ +boar +bear +bird +cat +cow +dog +horse +sheep +rabbit +raccoon +monkey +fox +deer diff --git a/Q08_object_counter/exe/animal/animal_onnx/deploy.json b/Q08_object_counter/exe_v2l/animal/animal_onnx/deploy.json similarity index 100% rename from Q08_object_counter/exe/animal/animal_onnx/deploy.json rename to Q08_object_counter/exe_v2l/animal/animal_onnx/deploy.json diff --git a/Q08_object_counter/exe/animal/animal_onnx/deploy.params b/Q08_object_counter/exe_v2l/animal/animal_onnx/deploy.params similarity index 100% rename from Q08_object_counter/exe/animal/animal_onnx/deploy.params rename to Q08_object_counter/exe_v2l/animal/animal_onnx/deploy.params diff --git a/Q08_object_counter/exe/animal/animal_onnx/deploy.so b/Q08_object_counter/exe_v2l/animal/animal_onnx/deploy.so similarity index 100% rename from Q08_object_counter/exe/animal/animal_onnx/deploy.so rename to Q08_object_counter/exe_v2l/animal/animal_onnx/deploy.so diff --git a/Q08_object_counter/exe/animal/animal_onnx/preprocess/aimac_desc.bin b/Q08_object_counter/exe_v2l/animal/animal_onnx/preprocess/aimac_desc.bin similarity index 100% rename from Q08_object_counter/exe/animal/animal_onnx/preprocess/aimac_desc.bin rename to Q08_object_counter/exe_v2l/animal/animal_onnx/preprocess/aimac_desc.bin diff --git a/Q08_object_counter/exe/animal/animal_onnx/preprocess/drp_desc.bin b/Q08_object_counter/exe_v2l/animal/animal_onnx/preprocess/drp_desc.bin similarity index 100% rename from Q08_object_counter/exe/animal/animal_onnx/preprocess/drp_desc.bin rename to Q08_object_counter/exe_v2l/animal/animal_onnx/preprocess/drp_desc.bin diff --git a/Q08_object_counter/exe/animal/animal_onnx/preprocess/drp_param.bin b/Q08_object_counter/exe_v2l/animal/animal_onnx/preprocess/drp_param.bin similarity index 100% rename from Q08_object_counter/exe/animal/animal_onnx/preprocess/drp_param.bin rename to Q08_object_counter/exe_v2l/animal/animal_onnx/preprocess/drp_param.bin diff --git a/Q08_object_counter/exe/animal/animal_onnx/preprocess/drp_param_info.txt b/Q08_object_counter/exe_v2l/animal/animal_onnx/preprocess/drp_param_info.txt similarity index 100% rename from Q08_object_counter/exe/animal/animal_onnx/preprocess/drp_param_info.txt rename to Q08_object_counter/exe_v2l/animal/animal_onnx/preprocess/drp_param_info.txt diff --git a/Q08_object_counter/exe/animal/animal_onnx/preprocess/pp_addrmap_intm.txt b/Q08_object_counter/exe_v2l/animal/animal_onnx/preprocess/pp_addrmap_intm.txt similarity index 100% rename from Q08_object_counter/exe/animal/animal_onnx/preprocess/pp_addrmap_intm.txt rename to Q08_object_counter/exe_v2l/animal/animal_onnx/preprocess/pp_addrmap_intm.txt diff --git a/Q08_object_counter/exe/animal/animal_onnx/preprocess/pp_drpcfg.mem b/Q08_object_counter/exe_v2l/animal/animal_onnx/preprocess/pp_drpcfg.mem similarity index 100% rename from Q08_object_counter/exe/animal/animal_onnx/preprocess/pp_drpcfg.mem rename to Q08_object_counter/exe_v2l/animal/animal_onnx/preprocess/pp_drpcfg.mem diff --git a/Q08_object_counter/exe/animal/animal_onnx/preprocess/pp_weight.dat b/Q08_object_counter/exe_v2l/animal/animal_onnx/preprocess/pp_weight.dat similarity index 100% rename from Q08_object_counter/exe/animal/animal_onnx/preprocess/pp_weight.dat rename to Q08_object_counter/exe_v2l/animal/animal_onnx/preprocess/pp_weight.dat diff --git a/Q08_object_counter/exe/animal/config.ini b/Q08_object_counter/exe_v2l/animal/config.ini similarity index 100% rename from Q08_object_counter/exe/animal/config.ini rename to Q08_object_counter/exe_v2l/animal/config.ini diff --git a/Q08_object_counter/exe/app_conf.ini b/Q08_object_counter/exe_v2l/app_conf.ini similarity index 100% rename from Q08_object_counter/exe/app_conf.ini rename to Q08_object_counter/exe_v2l/app_conf.ini diff --git a/Q08_object_counter/exe_v2l/coco/coco_class.txt b/Q08_object_counter/exe_v2l/coco/coco_class.txt new file mode 100644 index 0000000..1f42c8e --- /dev/null +++ b/Q08_object_counter/exe_v2l/coco/coco_class.txt @@ -0,0 +1,80 @@ +person +bicycle +car +motorcycle +airplane +bus +train +truck +boat +traffic light +fire hydrant +stop sign +parking meter +bench +bird +cat +dog +horse +sheep +cow +elephant +bear +zebra +giraffe +backpack +umbrella +handbag +tie +suitcase +frisbee +skis +snowboard +sports ball +kite +baseball bat +baseball glove +skateboard +surfboard +tennis racket +bottle +wine glass +cup +fork +knife +spoon +bowl +banana +apple +sandwich +orange +broccoli +carrot +hot dog +pizza +donut +cake +chair +couch +potted plant +bed +dining table +toilet +tv +laptop +mouse +remote +keyboard +cell phone +microwave +oven +toaster +sink +refrigerator +book +clock +vase +scissors +teddy bear +hair drier +toothbrush \ No newline at end of file diff --git a/Q08_object_counter/exe/coco/config.ini b/Q08_object_counter/exe_v2l/coco/config.ini similarity index 100% rename from Q08_object_counter/exe/coco/config.ini rename to Q08_object_counter/exe_v2l/coco/config.ini diff --git a/Q08_object_counter/exe/coco/tinyyolov3_onnx/deploy.json b/Q08_object_counter/exe_v2l/coco/tinyyolov3_onnx/deploy.json similarity index 100% rename from Q08_object_counter/exe/coco/tinyyolov3_onnx/deploy.json rename to Q08_object_counter/exe_v2l/coco/tinyyolov3_onnx/deploy.json diff --git a/Q08_object_counter/exe/coco/tinyyolov3_onnx/deploy.params b/Q08_object_counter/exe_v2l/coco/tinyyolov3_onnx/deploy.params similarity index 100% rename from Q08_object_counter/exe/coco/tinyyolov3_onnx/deploy.params rename to Q08_object_counter/exe_v2l/coco/tinyyolov3_onnx/deploy.params diff --git a/Q08_object_counter/exe/coco/tinyyolov3_onnx/deploy.so b/Q08_object_counter/exe_v2l/coco/tinyyolov3_onnx/deploy.so similarity index 100% rename from Q08_object_counter/exe/coco/tinyyolov3_onnx/deploy.so rename to Q08_object_counter/exe_v2l/coco/tinyyolov3_onnx/deploy.so diff --git a/Q08_object_counter/exe/coco/tinyyolov3_onnx/preprocess/aimac_desc.bin b/Q08_object_counter/exe_v2l/coco/tinyyolov3_onnx/preprocess/aimac_desc.bin similarity index 100% rename from Q08_object_counter/exe/coco/tinyyolov3_onnx/preprocess/aimac_desc.bin rename to Q08_object_counter/exe_v2l/coco/tinyyolov3_onnx/preprocess/aimac_desc.bin diff --git a/Q08_object_counter/exe/coco/tinyyolov3_onnx/preprocess/drp_desc.bin b/Q08_object_counter/exe_v2l/coco/tinyyolov3_onnx/preprocess/drp_desc.bin similarity index 100% rename from Q08_object_counter/exe/coco/tinyyolov3_onnx/preprocess/drp_desc.bin rename to Q08_object_counter/exe_v2l/coco/tinyyolov3_onnx/preprocess/drp_desc.bin diff --git a/Q08_object_counter/exe/coco/tinyyolov3_onnx/preprocess/drp_param.bin b/Q08_object_counter/exe_v2l/coco/tinyyolov3_onnx/preprocess/drp_param.bin similarity index 100% rename from Q08_object_counter/exe/coco/tinyyolov3_onnx/preprocess/drp_param.bin rename to Q08_object_counter/exe_v2l/coco/tinyyolov3_onnx/preprocess/drp_param.bin diff --git a/Q08_object_counter/exe/coco/tinyyolov3_onnx/preprocess/drp_param_info.txt b/Q08_object_counter/exe_v2l/coco/tinyyolov3_onnx/preprocess/drp_param_info.txt similarity index 100% rename from Q08_object_counter/exe/coco/tinyyolov3_onnx/preprocess/drp_param_info.txt rename to Q08_object_counter/exe_v2l/coco/tinyyolov3_onnx/preprocess/drp_param_info.txt diff --git a/Q08_object_counter/exe/coco/tinyyolov3_onnx/preprocess/pp_addrmap_intm.txt b/Q08_object_counter/exe_v2l/coco/tinyyolov3_onnx/preprocess/pp_addrmap_intm.txt similarity index 100% rename from Q08_object_counter/exe/coco/tinyyolov3_onnx/preprocess/pp_addrmap_intm.txt rename to Q08_object_counter/exe_v2l/coco/tinyyolov3_onnx/preprocess/pp_addrmap_intm.txt diff --git a/Q08_object_counter/exe/coco/tinyyolov3_onnx/preprocess/pp_drpcfg.mem b/Q08_object_counter/exe_v2l/coco/tinyyolov3_onnx/preprocess/pp_drpcfg.mem similarity index 100% rename from Q08_object_counter/exe/coco/tinyyolov3_onnx/preprocess/pp_drpcfg.mem rename to Q08_object_counter/exe_v2l/coco/tinyyolov3_onnx/preprocess/pp_drpcfg.mem diff --git a/Q08_object_counter/exe/coco/tinyyolov3_onnx/preprocess/pp_weight.dat b/Q08_object_counter/exe_v2l/coco/tinyyolov3_onnx/preprocess/pp_weight.dat similarity index 100% rename from Q08_object_counter/exe/coco/tinyyolov3_onnx/preprocess/pp_weight.dat rename to Q08_object_counter/exe_v2l/coco/tinyyolov3_onnx/preprocess/pp_weight.dat diff --git a/Q08_object_counter/exe/object_counter b/Q08_object_counter/exe_v2l/object_counter similarity index 100% rename from Q08_object_counter/exe/object_counter rename to Q08_object_counter/exe_v2l/object_counter diff --git a/Q08_object_counter/exe/vehicle/config.ini b/Q08_object_counter/exe_v2l/vehicle/config.ini similarity index 100% rename from Q08_object_counter/exe/vehicle/config.ini rename to Q08_object_counter/exe_v2l/vehicle/config.ini diff --git a/Q08_object_counter/exe_v2l/vehicle/vehicle_class.txt b/Q08_object_counter/exe_v2l/vehicle/vehicle_class.txt new file mode 100644 index 0000000..151936e --- /dev/null +++ b/Q08_object_counter/exe_v2l/vehicle/vehicle_class.txt @@ -0,0 +1,10 @@ +bus +car +motorcycle +truck +ambulance +Fire truck +LCV +Policecar +bicycle +automobile diff --git a/Q08_object_counter/exe/vehicle/vehicle_onnx/deploy.json b/Q08_object_counter/exe_v2l/vehicle/vehicle_onnx/deploy.json similarity index 100% rename from Q08_object_counter/exe/vehicle/vehicle_onnx/deploy.json rename to Q08_object_counter/exe_v2l/vehicle/vehicle_onnx/deploy.json diff --git a/Q08_object_counter/exe/vehicle/vehicle_onnx/deploy.params b/Q08_object_counter/exe_v2l/vehicle/vehicle_onnx/deploy.params similarity index 100% rename from Q08_object_counter/exe/vehicle/vehicle_onnx/deploy.params rename to Q08_object_counter/exe_v2l/vehicle/vehicle_onnx/deploy.params diff --git a/Q08_object_counter/exe/vehicle/vehicle_onnx/deploy.so b/Q08_object_counter/exe_v2l/vehicle/vehicle_onnx/deploy.so similarity index 100% rename from Q08_object_counter/exe/vehicle/vehicle_onnx/deploy.so rename to Q08_object_counter/exe_v2l/vehicle/vehicle_onnx/deploy.so diff --git a/Q08_object_counter/exe/vehicle/vehicle_onnx/preprocess/aimac_desc.bin b/Q08_object_counter/exe_v2l/vehicle/vehicle_onnx/preprocess/aimac_desc.bin similarity index 100% rename from Q08_object_counter/exe/vehicle/vehicle_onnx/preprocess/aimac_desc.bin rename to Q08_object_counter/exe_v2l/vehicle/vehicle_onnx/preprocess/aimac_desc.bin diff --git a/Q08_object_counter/exe/vehicle/vehicle_onnx/preprocess/drp_desc.bin b/Q08_object_counter/exe_v2l/vehicle/vehicle_onnx/preprocess/drp_desc.bin similarity index 100% rename from Q08_object_counter/exe/vehicle/vehicle_onnx/preprocess/drp_desc.bin rename to Q08_object_counter/exe_v2l/vehicle/vehicle_onnx/preprocess/drp_desc.bin diff --git a/Q08_object_counter/exe/vehicle/vehicle_onnx/preprocess/drp_param.bin b/Q08_object_counter/exe_v2l/vehicle/vehicle_onnx/preprocess/drp_param.bin similarity index 100% rename from Q08_object_counter/exe/vehicle/vehicle_onnx/preprocess/drp_param.bin rename to Q08_object_counter/exe_v2l/vehicle/vehicle_onnx/preprocess/drp_param.bin diff --git a/Q08_object_counter/exe/vehicle/vehicle_onnx/preprocess/drp_param_info.txt b/Q08_object_counter/exe_v2l/vehicle/vehicle_onnx/preprocess/drp_param_info.txt similarity index 100% rename from Q08_object_counter/exe/vehicle/vehicle_onnx/preprocess/drp_param_info.txt rename to Q08_object_counter/exe_v2l/vehicle/vehicle_onnx/preprocess/drp_param_info.txt diff --git a/Q08_object_counter/exe/vehicle/vehicle_onnx/preprocess/pp_addrmap_intm.txt b/Q08_object_counter/exe_v2l/vehicle/vehicle_onnx/preprocess/pp_addrmap_intm.txt similarity index 100% rename from Q08_object_counter/exe/vehicle/vehicle_onnx/preprocess/pp_addrmap_intm.txt rename to Q08_object_counter/exe_v2l/vehicle/vehicle_onnx/preprocess/pp_addrmap_intm.txt diff --git a/Q08_object_counter/exe/vehicle/vehicle_onnx/preprocess/pp_drpcfg.mem b/Q08_object_counter/exe_v2l/vehicle/vehicle_onnx/preprocess/pp_drpcfg.mem similarity index 100% rename from Q08_object_counter/exe/vehicle/vehicle_onnx/preprocess/pp_drpcfg.mem rename to Q08_object_counter/exe_v2l/vehicle/vehicle_onnx/preprocess/pp_drpcfg.mem diff --git a/Q08_object_counter/exe/vehicle/vehicle_onnx/preprocess/pp_weight.dat b/Q08_object_counter/exe_v2l/vehicle/vehicle_onnx/preprocess/pp_weight.dat similarity index 100% rename from Q08_object_counter/exe/vehicle/vehicle_onnx/preprocess/pp_weight.dat rename to Q08_object_counter/exe_v2l/vehicle/vehicle_onnx/preprocess/pp_weight.dat diff --git a/Q08_object_counter/images/COCO_v2h.png b/Q08_object_counter/images/COCO_v2h.png new file mode 100755 index 0000000..e68ae66 Binary files /dev/null and b/Q08_object_counter/images/COCO_v2h.png differ diff --git a/Q08_object_counter/images/Q08_animal.png b/Q08_object_counter/images/Q08_animal.png new file mode 100755 index 0000000..98d6bbd Binary files /dev/null and b/Q08_object_counter/images/Q08_animal.png differ diff --git a/Q08_object_counter/images/Q08_coco.png b/Q08_object_counter/images/Q08_coco.png new file mode 100755 index 0000000..e0ad8d3 Binary files /dev/null and b/Q08_object_counter/images/Q08_coco.png differ diff --git a/Q08_object_counter/images/Q08_vehicle.png b/Q08_object_counter/images/Q08_vehicle.png new file mode 100755 index 0000000..bcf0360 Binary files /dev/null and b/Q08_object_counter/images/Q08_vehicle.png differ diff --git a/Q08_object_counter/images/buffer_flow.png b/Q08_object_counter/images/buffer_flow.png new file mode 100755 index 0000000..a773c2a Binary files /dev/null and b/Q08_object_counter/images/buffer_flow.png differ diff --git a/Q08_object_counter/images/hw_conf_v2h.png b/Q08_object_counter/images/hw_conf_v2h.png new file mode 100755 index 0000000..7b3d546 Binary files /dev/null and b/Q08_object_counter/images/hw_conf_v2h.png differ diff --git a/Q08_object_counter/images/hw_conf_v2l.png b/Q08_object_counter/images/hw_conf_v2l.png new file mode 100755 index 0000000..0005ea3 Binary files /dev/null and b/Q08_object_counter/images/hw_conf_v2l.png differ diff --git a/Q08_object_counter/readme.md b/Q08_object_counter/readme.md deleted file mode 100644 index e3d95ff..0000000 --- a/Q08_object_counter/readme.md +++ /dev/null @@ -1,325 +0,0 @@ - -# Object Counter Application - -## Application: Overview -The Object Counter Application is a user-friendly and efficient generic software tool that can be used to create custom counting applications for any scenario. This application uses the advanced Tiny-YOLOv3 algorithm to identify and count objects in images or videos. - -### Use Cases -The Generic Counter Application is a powerful tool that can be used to count objects in a variety of settings, including: - -- **Animal Counting**: The application can be fine tuned to count the animals only. This application can be used for zoo or farm monitoring, also could be used to prevent the road hazards due to animal interference. The list of animals on which the AI model is trained is available in [animal_class.txt](./exe/animal/animal_class.txt) - -- **Vehicle Counting**: The application can be fine tuned to count the vehicle instances per frame. This application can then be used for traffic monitoring at government/corporate buildings.The list of vehicles on which the AI model is trained is available in [vehicle_class.txt](./exe/vehicle/vehicle_class.txt) - -- **General Counting**: The general counting applications can be used to count any type of object, from people and cars to inventory and products. They are often used in businesses to track customer traffic, inventory levels, and employee productivity. The list of objects on which the AI model is trained is available in [coco_class.txt](./exe/coco/coco_class.txt) - -The other use cases could be: - -- **Manufacturing**: The application can be used to count parts on a production line or to measure the output of a machine. -- **Retail**: The application can be used to count products on a shelf or to track the number of customers in a store. -- **Safety**: The application can be used to count people in a crowd or to monitor the traffic flow in a city. - -### Key Features -Here are some of the key features of the Generic Counter Application: - -- **Automatic Object Detection**: - The application utilizes Tiny-yolov3 model for detection, identifying and localizing objects specified within the provided frame. -- **Flexible**: - The application can be customized to meet the specific needs of any counting scenario. -- **Customizable Settings**: - Users can adjust the detection and classification parameters by using the config file provided in the repository. - -It has following camera input modes. -- Using MIPI Camera -- Using USB Camera - -Users can select detection target from following list -- Animal -- Vehicle -- General (COCO dataset) - -### Demo - - -## Applications: Requirements - -### Hardware Requirements - -- RZ/V2L Evaluation Board Kit - - MIPI Camera -- USB camera -- USB Keyboard -- USB Mouse -- USB Hub -- HDMI monitor with resolution 1280x720 -- micro HDMI to HDMI cable -- SD Card (for file system) - -[Hardware Setup Steps](https://github.com/renesas-rz/rzv_ai_sdk/#hardware-requirements-and-setup) - -### Software Requirements - -- Ubuntu 20.04 -- OpenCV 4.x -- C++11 or higher - -## Application: Build Stage - ->**Note:** User can skip to the next stage [deploy](#application-deploy-stage) if they don't want to build the application. All pre-built binaries are provided. - -**Note:** This project expects the user to have completed [Getting Started Guide](https://renesas-rz.github.io/rzv_ai_sdk/latest/getting_started) provided by Renesas - -After completion of the guide, the user is expected of following things. -- The Board Set Up and booted. -- SD Card Prepared -- The docker image amd container for `rzv2l_ai_sdk_image` running on host machine. - ->**Note:** Docker container is required for building the sample application. By default the Renesas will provide the container named as `rzv2l_ai_sdk_container`. Please use the docker container name as assigned by the user when building the container. - -### Application: File Generation -1. Copy the repository from the GitHub to the desired location. - - i. It is recommended to copy/clone the repository on the `data` folder which is mounted on the `rzv2l_ai_sdk_container` docker container. - ```sh - cd - git clone https://github.com/renesas-rz/rzv_ai_sdk.git - ``` - > Note 1: Please verify the git repository url if error occurs - - > Note 2: This command will download whole repository, which include all other applications, if you have already downloaded the repository of the same version, you may not need to run this command. - -2. Run(or start) the docker container and open the bash terminal on the container. - - > Note: All the build steps/commands listed below are executed on the docker container bash terminal. - -3. Assign path to the `data` directory mounted on the `rzv2l_ai_sdk_container` docker container. - - ```sh - export PROJECT_PATH=/drp-ai_tvm/data/ - ``` - -4. Go to the `src` directory of the application - - ```sh - cd ${PROJECT_PATH}/rzv_ai_sdk/Q08_object_counter/src/ - ``` - -5. Build the application on docker environment by following the steps below - - ```sh - mkdir -p build && cd build - ``` - ```sh - cmake -DCMAKE_TOOLCHAIN_FILE=./toolchain/runtime.cmake .. - ``` - ```sh - make -j$(nproc) - ``` - The following application file would be generated in the `src/build` directory - - object_counter - - -## Application: Deploy Stage - -For the ease of deployment all the deployable files and folders for RZ/V2L are provided on the [exe](./exe) folder. - -|File | Details | -|:---|:---| -|coco/tinyyolov3_onnx | Model object files for Coco Detection | -|coco/coco_class.txt | Label list for Coco Detection | -|coco/config.ini | User input model config object | -|animal/animal_onnx | Model object files for Animal Detection | -|animal/animal_class.txt | Label list for Animal Detection | -|animal/config.ini | User input model config object | -|vehicle/vehicle_onnx | Model object files for Vehicle Detection | -|vehicle/vehicle_class.txt | Label list for Vehicle Detection | -|vehicle/config.ini | User input model config object | -|app_conf.ini | User input application config object | -|object_counter | Application file | - -Follow the steps mentioned below to deploy the project on RZ/V2L Evaluation Board Kit. -1. Copy following files to the `/home/root/tvm` directory of the rootfs (on SD Card) for RZ/V2L Evaluation Board Kit. - 1. The files present in [exe](./exe) directory, which are listed in the table above. - 2. The generated `object_counter` application file if the application file is built at [build stage](#application-build-stage) -2. Check if libtvm_runtime.so is there on `/usr/lib64` directory of the rootfs (SD card) RZ/V2L Evaluation Board Kit. - -Folder structure in the rootfs (SD Card) would look like: -```sh -├── usr/ -│ └── lib64/ -│ └── libtvm_runtime.so -└── home/ - └── root/ - └── tvm/ - ├── coco/ - │ ├── tinyyolov3_onnx/ - │ │ ├── deploy.json - │ │ ├── deploy.params - │ │ ├── deploy.so - │ │ └── preprocess/ - │ │ - │ ├── coco_class.txt - │ └── config.ini - ├── animal/ - │ ├── animal_onnx/ - │ │ ├── deploy.json - │ │ ├── deploy.params - │ │ ├── deploy.so - │ │ └── preprocess/ - │ │ - │ ├── animal_class.txt - │ └── config.ini - ├── vehicle/ - │ ├── vehicle_onnx/ - │ │ ├── deploy.json - │ │ ├── deploy.params - │ │ ├── deploy.so - │ │ └── preprocess/ - │ │ - │ ├── vehicle_class.txt - │ └── config.ini - │ - ├── app_conf.ini - └── object_counter - -``` - - ->**Note:** The directory name could be anything instead of `tvm`. If you copy the whole `exe` folder on the board. You are not required to rename it `tvm`. - -## Application: Run Stage - -For running the application, run the commands as shown below on the RZ/V2L Evaluation Board console. - -1. Go to the `/home/root/tvm` directory of the rootfs - - ```sh - cd /home/root/tvm - ``` - -2. Change the values in `app_conf.ini` as per the requirements. Detailed explanation of the `app_conf.ini` file is given at below section. - - ```sh - vi app_conf.ini - ``` - -3. Run the application in the terminal of the RZ/V2L Evaluation Board Kit using the command - ```sh - ./object_counter - ``` - - mode options - |Value |Description | - |-------|-------------------------------------| - |COCO | Detects coco objects listed | - |animal | Detects animals listed | - |vehicle| Detects automobiles listed | - - >**Note:** The mode will be the section name in app_conf.ini file. - - camera options - |Value|Description | - |-----|---------------------------------------| - | | Default option is MIPI camera | - |USB | USB option takes USB camera as input | - - For example, to run in "animal" mode with a USB camera, write the following command. - ```sh - ./object_counter animal USB - ``` - The expected output will be the same as shown in the demo video - -4. Following window shows up on HDMI screen. - - - Coco Object counting - - - >**Note:** In COCO mode, the default setting allows only limited types of detection. If you want to increase the number of detection targets, edit [exe/coco/config.ini](./exe/coco/config.ini). For details on how to write `config.ini` file, please refer to the [Explanation of the config.ini file](#explanation-of-the-configini-file) section. - - - Animal Counting - - - - - Vehicle Counting - - - - On application window, following information is displayed. - - AI Infernce time is shown on top right - - Frame Per Sec (FPS) is shown below this. - - Total detected object counts are shown , alongside the counts for each user-defined classes - -5. To terminate the application, Switch from the application window to the terminal with using `Super(windows key)+Tab` and press `ENTER` key on the terminal of RZ/V2L Evaluation Board Kit. - -## Application: Specifications - -### AI Model Details - -Tiny Yolov3 is used for better performance. Model weights are taken from [Darknet-Yolo](https://pjreddie.com/darknet/yolo/). - -Then the model is retrained with below mentioned dataset. - -### Dataset - -| Model | Dataset | Description | -|---|---|---| -| coco | [Dataset Link](https://cocodataset.org/#download) | Dataset used is the same as mentioned in the research paper | -| animal | [Dataset Link](https://huggingface.co/datasets/myyyyw/NTLNP) | Dataset of wildlife in the mixed coniferous broad-leaved forest | -| vehicle | [Site](https://universe.roboflow.com/) | Combined multiple sources for different classes from the given site. Sources used are listed in below table | - -| Class | Dataset | -|---|---| -| motorcycle | [Dataset](https://universe.roboflow.com/vehicle-mscoco/vehicles-coco) | -| bus | [Dataset 1](https://universe.roboflow.com/titu/bus-jm7t3), [Dataset 2](https://universe.roboflow.com/final-year-project-shhpl/bus-detection-2wlyo), [Dataset 3](https://universe.roboflow.com/fyp-object-detection-tc8af/sya-bus) | -| car | [Dataset 1](https://universe.roboflow.com/hungdk-t8jb0/nhandienxeoto-udgcp), [Dataset 2](https://universe.roboflow.com/project-fjp7n/car-detection-vwdhg) | -| policecar | [Dataset 1](https://universe.roboflow.com/fyp-tc-idn2o/police-cars-sumfm), [Dataset 2](https://universe.roboflow.com/maryam-mahmood-6hoeq/pol-tslhg) | -| ambulance | [Dataset 1](https://universe.roboflow.com/ambulance-k0z3x/ambulance-detection-azspv), [Dataset 2](https://universe.roboflow.com/school-87zwx/emegency-vehicle-detection) | -| truck | [Dataset 1](https://universe.roboflow.com/project-school-ulsua/truck-detection-g88di), [Dataset 2](https://universe.roboflow.com/kmec/truck-detection-vka5s) | -| bicycle | [Dataset 1](https://universe.roboflow.com/vtc-ywqwf/tt-aio6y), [Dataset 2](https://universe.roboflow.com/north-south-university-faox7/bicycle-bdti6) | -| bike | [Dataset 1](https://universe.roboflow.com/subham-bhansali-fedah/bike-detection-tzvlj), [Dataset 2](https://universe.roboflow.com/fyp-object-detection-tc8af/sya-bike) | -| Auto | [Dataset 1](https://universe.roboflow.com/rutviknirma/smart-traffic-management-system), [Dataset 2](https://universe.roboflow.com/graduation-project-rtgrc/tuk-tuk-labelling) | -| LCV | [Dataset 1](https://universe.roboflow.com/project-final-ltl6m/vehicle-detection-inlat), [Dataset 2](https://universe.roboflow.com/pooja-shri-v/lcvs-zqmsu) | -| Fire engine | [Dataset 1](https://universe.roboflow.com/grad-project-tjt2u/fire-truck-xumw3), [Dataset 2](https://universe.roboflow.com/pouria-maleki/firetruck) | - ->Note: Link for motorcycle dateset has additional classes bus, car and truck which is also used for training - - -### AI inference time - -The AI inference time is 150-190 msec. - -### AI Accuracy - -Mean Average Precision (mAP) for each model given in below table - -| Model | mAP | -|---------|------| -| coco | 33.1 | -| animal | 67 | -| vehicle | 70.9 | - -## Application: Configuration - -### Explanation of the `app_conf.ini` file - -- The section name can be of your choice. And it will be the mode name. - -- The section should contains three variables - `model_path`, `label_path` & `config_path`. - -- The `model_path` value is the path to the folder containing compiled model. The folder should also contains also contain preprocess folder. - -- The `label_path` value is the path to the label list the model supports. - -- The `config_path` value is the path to the model configuration ini file. Detailed explanation of the `config.ini` file is given at below section. - -### Explanation of the `config.ini` file - -- The [**detect**] section contains three variables - 'conf', 'anchors' & 'objects'. - -- The `conf` value is the confidence threshold used for object detection, -- The `anchors` are the yolo anchors for the object detection. -- The `objects` represents class to be identified and it can be changed to other classes present on the class label list. - -- To modify the configuration settings, edit the values in this file using VI Editor, from the RZ/V2L Evaluation Board Kit. - -```sh -vi config.ini -``` diff --git a/Q08_object_counter/src_v2h/CMakeLists.txt b/Q08_object_counter/src_v2h/CMakeLists.txt new file mode 100755 index 0000000..429c0bf --- /dev/null +++ b/Q08_object_counter/src_v2h/CMakeLists.txt @@ -0,0 +1,38 @@ +cmake_minimum_required(VERSION 3.12) +project(object_counter_cpp) + +set(CMAKE_CXX_STANDARD 17) + +set(TVM_ROOT $ENV{TVM_HOME}) +include_directories(${TVM_ROOT}/include) +include_directories(${TVM_ROOT}/3rdparty/dlpack/include) +include_directories(${TVM_ROOT}/3rdparty/dmlc-core/include) +include_directories(${TVM_ROOT}/3rdparty/compiler-rt) + +set(TVM_RUNTIME_LIB ${TVM_ROOT}/build_runtime/libtvm_runtime.so) +set(EXE_NAME object_counter) + +file(GLOB SOURCE *.cpp *.h) +add_executable (${EXE_NAME} +${SOURCE} +) +TARGET_LINK_LIBRARIES(${EXE_NAME} pthread) +TARGET_LINK_LIBRARIES(${EXE_NAME} jpeg) +target_link_libraries(${EXE_NAME} + pthread glib-2.0 mmngr gobject-2.0 gstreamer-1.0 gstbase-1.0 gstapp-1.0 + opencv_imgcodecs opencv_imgproc opencv_core opencv_highgui + jpeg webp tiff z tbb gtk-3 png16 gdk-3 cairo + lzma rt cairo-gobject + xkbcommon wayland-cursor wayland-egl wayland-client epoxy + fribidi harfbuzz fontconfig + glib-2.0 gobject-2.0 gdk_pixbuf-2.0 gmodule-2.0 pangocairo-1.0 + atk-1.0 gio-2.0 pango-1.0 freetype pixman-1 uuid pcre + mount resolv expat pangoft2-1.0 blkid + EGL GLESv2 mmngrbuf) +find_package(OpenCV REQUIRED) +if(OpenCV_FOUND) + target_include_directories(${EXE_NAME} PUBLIC ${OpenCV_INCLUDE_DIRS}) + target_link_libraries(${EXE_NAME} ${OpenCV_LIBS}) +endif() +target_link_libraries(${EXE_NAME} ${TVM_RUNTIME_LIB}) +target_compile_definitions(${EXE_NAME} PRIVATE V2H) diff --git a/Q08_object_counter/src_v2h/MeraDrpRuntimeWrapper.cpp b/Q08_object_counter/src_v2h/MeraDrpRuntimeWrapper.cpp new file mode 100755 index 0000000..ff005a8 --- /dev/null +++ b/Q08_object_counter/src_v2h/MeraDrpRuntimeWrapper.cpp @@ -0,0 +1,208 @@ +/* + * Original Code (C) Copyright Edgecortix, Inc. 2022 + * Modified Code (C) Copyright Renesas Electronics Corporation 2023 + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + * + */ +#include +#include +#include +#include + +#include +#include +#include +#include "MeraDrpRuntimeWrapper.h" + +template +static std::vector LoadBinary(const std::string& bin_file) { + std::ifstream file(bin_file.c_str(), std::ios::in | std::ios::binary); + if (!file.is_open()) { + LOG(FATAL) << "unable to open file " + bin_file; + } + + file.seekg(0, file.end); + const uint32_t file_size = static_cast(file.tellg()); + file.seekg(0, file.beg); + + const auto file_buffer = std::unique_ptr(new char[file_size]); + file.read(file_buffer.get(), file_size); + + if (file.bad() || file.fail()) { + LOG(FATAL) << "error occured while reading the file"; + } + + file.close(); + + auto ptr = reinterpret_cast(file_buffer.get()); + const auto num_elements = file_size / sizeof(T); + return std::vector(ptr, ptr + num_elements); +} + +MeraDrpRuntimeWrapper::MeraDrpRuntimeWrapper() { + //device_type = kDLCPU; + device_type = kDLDrpAi; + device_id = 0; +}; + +MeraDrpRuntimeWrapper::~MeraDrpRuntimeWrapper() = default; + +bool MeraDrpRuntimeWrapper::LoadModel(const std::string& model_dir, uint32_t start_address){ + device_type = kDLCPU; + + return LoadModel(model_dir, (uint64_t)start_address); +} + +bool MeraDrpRuntimeWrapper::LoadModel(const std::string& model_dir, uint64_t start_address = 0x00) { + LOG(INFO) << "Loading json data..."; + const std::string json_file(model_dir + "/deploy.json"); + std::ifstream json_in(json_file.c_str(), std::ios::in); + std::string json_data((std::istreambuf_iterator(json_in)), std::istreambuf_iterator()); + json_in.close(); + + #if 0 + if(json_data.find("drp") == json_data.npos && device_type != kDLCPU){ + LOG(INFO) <<"Break! this model is Not for DRP-AI retry as CPU Only"; + return false; + } + #else + if(json_data.find("drp") == json_data.npos && device_type != kDLCPU){ + LOG(INFO) <<"try as CPU Only"; + device_type = kDLCPU; + } + #endif + + LOG(INFO) << "Loading runtime module..."; + tvm::runtime::Module mod_syslib = tvm::runtime::Module::LoadFromFile(model_dir + "/deploy.so"); + mod = (*tvm::runtime::Registry::Get("tvm.graph_executor_debug.create"))( + json_data, mod_syslib, device_type, device_id); + + LOG(INFO) << "Loading parameters..."; + tvm::runtime::PackedFunc load_params = mod.GetFunction("load_params"); + auto params_data = LoadBinary(model_dir + "/deploy.params"); + TVMByteArray params_arr; + params_arr.data = params_data.data(); + params_arr.size = params_data.size(); + load_params(params_arr); + tvm::runtime::PackedFunc set_start_address = mod.GetFunction("set_start_address"); + if(set_start_address != nullptr){ + set_start_address(start_address); + } + return true; +} + +template +void MeraDrpRuntimeWrapper::SetInput(int input_index, const T* data_ptr) { + LOG(INFO) << "Loading input..."; + + tvm::runtime::PackedFunc get_input = mod.GetFunction("get_input"); + tvm::runtime::NDArray xx = get_input(input_index); + auto in_shape = xx.Shape(); + int64_t in_size = 1; + for (unsigned long i = 0; i < in_shape.size(); ++i) { + in_size *= in_shape[i]; + } + + DLDevice ctx; + ctx.device_id = device_id; + ctx.device_type = DLDeviceType(device_type); + + auto input_array = tvm::runtime::NDArray::Empty(in_shape, xx.DataType(), ctx); + auto input_data = (T*)(input_array->data); + std::memcpy(input_data, data_ptr, sizeof(T) * in_size); + tvm::runtime::PackedFunc set_input = mod.GetFunction("set_input"); + set_input(input_index, input_array); +} +template void MeraDrpRuntimeWrapper::SetInput(int input_index, const float*); +template void MeraDrpRuntimeWrapper::SetInput(int input_index, const unsigned short*); + +void MeraDrpRuntimeWrapper::Run() { + mod.GetFunction("run")(); +} + +void MeraDrpRuntimeWrapper::ProfileRun(const std::string& profile_table, const std::string& profile_csv) { + tvm::runtime::PackedFunc profile = mod.GetFunction("profile"); + tvm::runtime::Array collectors; + tvm::runtime::profiling::Report report = profile(collectors); + + std::string rep_table = report->AsTable(); + std::ofstream ofs_table (profile_table, std::ofstream::out); + ofs_table << rep_table << std::endl; + ofs_table.close(); + + std::string rep_csv = report->AsCSV(); + std::ofstream ofs_csv (profile_csv, std::ofstream::out); + ofs_csv << rep_csv << std::endl; + ofs_csv.close(); +} + +int MeraDrpRuntimeWrapper::GetNumInput(std::string model_dir) { + // TVM does not support api to get number input of model. + // This function calculate input number base on convention + // of input data name (input_xyz.bin) + DIR *dir; + dirent *diread; + int num_input = 0; + if ((dir = opendir(model_dir.c_str())) != nullptr) { + while ((diread = readdir(dir)) != nullptr) { + std::string file_name(diread->d_name); + if (std::regex_match(file_name, std::regex("(input_)(.*)(bin)") )) { + num_input++; + } + } + closedir(dir); + } else { + LOG(FATAL) << "Can not open model dir : " << model_dir; + } + + return num_input; +} + +InOutDataType MeraDrpRuntimeWrapper::GetInputDataType(int index) { + tvm::runtime::PackedFunc get_input = mod.GetFunction("get_input"); + tvm::runtime::NDArray input_info = get_input(index); + InOutDataType data_type = InOutDataType::OTHER; + if (input_info.DataType().is_float() && input_info.DataType().bits() == 32) { + data_type = InOutDataType::FLOAT32; + } else if (input_info.DataType().is_float() && input_info.DataType().bits() == 16) { + data_type = InOutDataType::FLOAT16; + } + return data_type; + } + +int MeraDrpRuntimeWrapper::GetNumOutput() { + return mod.GetFunction("get_num_outputs")(); + } + +std::tuple MeraDrpRuntimeWrapper::GetOutput(int index) { + tvm::runtime::PackedFunc get_output = mod.GetFunction("get_output"); + tvm::runtime::NDArray out = get_output(index); + int64_t out_size = 1; + for ( unsigned long i = 0; i < out.Shape().size(); ++i) { + out_size *= out.Shape()[i]; + } + + InOutDataType data_type = InOutDataType::OTHER; + if (out.DataType().is_float() && out.DataType().bits() == 32) { + data_type = InOutDataType::FLOAT32; + } else if (out.DataType().is_float() && out.DataType().bits() == 16) { + data_type = InOutDataType::FLOAT16; + } + return std::make_tuple(data_type, reinterpret_cast(out->data), out_size); +} diff --git a/Q08_object_counter/src_v2h/MeraDrpRuntimeWrapper.h b/Q08_object_counter/src_v2h/MeraDrpRuntimeWrapper.h new file mode 100755 index 0000000..5f1b2be --- /dev/null +++ b/Q08_object_counter/src_v2h/MeraDrpRuntimeWrapper.h @@ -0,0 +1,52 @@ +/* + * Original Code (C) Copyright Edgecortix, Inc. 2022 + * Modified Code (C) Copyright Renesas Electronics Corporation 2023 + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + * +*/ +#include + +enum class InOutDataType { + FLOAT32, + FLOAT16, + OTHER +}; + +class MeraDrpRuntimeWrapper { + public: + MeraDrpRuntimeWrapper(); + ~MeraDrpRuntimeWrapper(); + + bool LoadModel(const std::string& model_dir, uint32_t start_address); + bool LoadModel(const std::string& model_dir, uint64_t start_address); + template + void SetInput(int input_index, const T* data_ptr); + void Run(); + void ProfileRun(const std::string& profile_table, const std::string& profile_csv); + int GetNumInput(std::string model_dir); + InOutDataType GetInputDataType(int index); + int GetNumOutput(); + + std::tuple GetOutput(int index); + + private: + int device_type; + int device_id; + tvm::runtime::Module mod; +}; diff --git a/Q08_object_counter/src_v2h/box.cpp b/Q08_object_counter/src_v2h/box.cpp new file mode 100755 index 0000000..1d8eeb9 --- /dev/null +++ b/Q08_object_counter/src_v2h/box.cpp @@ -0,0 +1,140 @@ +/*********************************************************************************************************************** +* DISCLAIMER +* This software is supplied by Renesas Electronics Corporation and is only intended for use with Renesas products. No +* other uses are authorized. This software is owned by Renesas Electronics Corporation and is protected under all +* applicable laws, including copyright laws. +* THIS SOFTWARE IS PROVIDED "AS IS" AND RENESAS MAKES NO WARRANTIES REGARDING +* THIS SOFTWARE, WHETHER EXPRESS, IMPLIED OR STATUTORY, INCLUDING BUT NOT LIMITED TO WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. ALL SUCH WARRANTIES ARE EXPRESSLY DISCLAIMED. TO THE MAXIMUM +* EXTENT PERMITTED NOT PROHIBITED BY LAW, NEITHER RENESAS ELECTRONICS CORPORATION NOR ANY OF ITS AFFILIATED COMPANIES +* SHALL BE LIABLE FOR ANY DIRECT, INDIRECT, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES FOR ANY REASON RELATED TO THIS +* SOFTWARE, EVEN IF RENESAS OR ITS AFFILIATES HAVE BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. +* Renesas reserves the right, without notice, to make changes to this software and to discontinue the availability of +* this software. By using this software, you agree to the additional terms and conditions found by accessing the +* following link: +* http://www.renesas.com/disclaimer +* +* Copyright (C) 2023 Renesas Electronics Corporation. All rights reserved. +***********************************************************************************************************************/ +/*********************************************************************************************************************** +* File Name : box.cpp +* Version : v1.00 +* Description : RZ/V2H AI SDK Sample Application for Object Detection +***********************************************************************************************************************/ + +/***************************************** +* Includes +******************************************/ +#include "box.h" + +/***************************************** +* Function Name : overlap +* Description : Function to compute the overlapped data between coordinate x with size w +* Arguments : x1 = 1-dimensional coordinate of first line +* w1 = size of fist line +* x2 = 1-dimensional coordinate of second line +* w2 = size of second line +* Return value : overlapped line size +******************************************/ +float overlap(float x1, float w1, float x2, float w2) +{ + float l1 = x1 - w1/2; + float l2 = x2 - w2/2; + float left = l1 > l2 ? l1 : l2; + float r1 = x1 + w1/2; + float r2 = x2 + w2/2; + float right = r1 < r2 ? r1 : r2; + return right - left; +} + +/***************************************** +* Function Name : box_intersection +* Description : Function to compute the area of intersection of Box a and b +* Arguments : a = Box 1 +* b = Box 2 +* Return value : area of intersection +******************************************/ +float box_intersection(Box a, Box b) +{ + float w = overlap(a.x, a.w, b.x, b.w); + float h = overlap(a.y, a.h, b.y, b.h); + if(w < 0 || h < 0) + { + return 0; + } + float area = w*h; + return area; +} + +/***************************************** +* Function Name : box_union +* Description : Function to compute the area of union of Box a and b +* Arguments : a = Box 1 +* b = Box 2 +* Return value : area of union +******************************************/ +float box_union(Box a, Box b) +{ + float i = box_intersection(a, b); + float u = a.w*a.h + b.w*b.h - i; + return u; +} + +/***************************************** +* Function Name : box_iou +* Description : Function to compute the Intersection over Union (IoU) of Box a and b +* Arguments : a = Box 1 +* b = Box 2 +* Return value : IoU +******************************************/ +float box_iou(Box a, Box b) +{ + return box_intersection(a, b)/box_union(a, b); +} + +/***************************************** +* Function Name : filter_boxes_nms +* Description : Apply Non-Maximum Suppression (NMS) to get rid of overlapped rectangles. +* Arguments : det= detected rectangles +* size = number of detections stored in det +* th_nms = threshold for nms +* Return value : - +******************************************/ +void filter_boxes_nms(std::vector &det, int32_t size, float th_nms) +{ + int32_t count = size; + int32_t i = 0; + int32_t j = 0; + Box a; + Box b; + float b_intersection = 0; + for (i = 0; i < count; i++) + { + a = det[i].bbox; + for (j = 0; j < count; j++) + { + if (i == j) + { + continue; + } + if (det[i].c != det[j].c) + { + continue; + } + b = det[j].bbox; + b_intersection = box_intersection(a, b); + if ((box_iou(a, b)>th_nms) || (b_intersection >= a.h * a.w - 1) || (b_intersection >= b.h * b.w - 1)) + { + if (det[i].prob > det[j].prob) + { + det[j].prob= 0; + } + else + { + det[i].prob= 0; + } + } + } + } + return; +} diff --git a/Q08_object_counter/src_v2h/box.h b/Q08_object_counter/src_v2h/box.h new file mode 100755 index 0000000..872ba23 --- /dev/null +++ b/Q08_object_counter/src_v2h/box.h @@ -0,0 +1,73 @@ +/*********************************************************************************************************************** +* DISCLAIMER +* This software is supplied by Renesas Electronics Corporation and is only intended for use with Renesas products. No +* other uses are authorized. This software is owned by Renesas Electronics Corporation and is protected under all +* applicable laws, including copyright laws. +* THIS SOFTWARE IS PROVIDED "AS IS" AND RENESAS MAKES NO WARRANTIES REGARDING +* THIS SOFTWARE, WHETHER EXPRESS, IMPLIED OR STATUTORY, INCLUDING BUT NOT LIMITED TO WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. ALL SUCH WARRANTIES ARE EXPRESSLY DISCLAIMED. TO THE MAXIMUM +* EXTENT PERMITTED NOT PROHIBITED BY LAW, NEITHER RENESAS ELECTRONICS CORPORATION NOR ANY OF ITS AFFILIATED COMPANIES +* SHALL BE LIABLE FOR ANY DIRECT, INDIRECT, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES FOR ANY REASON RELATED TO THIS +* SOFTWARE, EVEN IF RENESAS OR ITS AFFILIATES HAVE BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. +* Renesas reserves the right, without notice, to make changes to this software and to discontinue the availability of +* this software. By using this software, you agree to the additional terms and conditions found by accessing the +* following link: +* http://www.renesas.com/disclaimer +* +* Copyright (C) 2023 Renesas Electronics Corporation. All rights reserved. +***********************************************************************************************************************/ +/*********************************************************************************************************************** +* File Name : box.h +* Version : v1.00 +* Description : RZ/V2H AI SDK Sample Application for Object Detection +***********************************************************************************************************************/ + +#ifndef BOX_H +#define BOX_H + +#include +#include +#include +#include +#include + +/***************************************** +* Box : Bounding box coordinates and its size +******************************************/ +typedef struct +{ + float x, y, w, h; +} Box; + +/***************************************** +* detection : Detected result +******************************************/ +typedef struct detection +{ + Box bbox; + int32_t c; + float prob; +} detection; + +/***************************************** +* detection : Detected result +******************************************/ +struct bbox_t +{ + std::string name; + int32_t X; + int32_t Y; + int32_t W; + int32_t H; + float pred; +}; +/***************************************** +* Functions +******************************************/ +float box_iou(Box a, Box b); +float overlap(float x1, float w1, float x2, float w2); +float box_intersection(Box a, Box b); +float box_union(Box a, Box b); +void filter_boxes_nms(std::vector &det, int32_t size, float th_nms); + +#endif diff --git a/Q08_object_counter/src_v2h/define.h b/Q08_object_counter/src_v2h/define.h new file mode 100755 index 0000000..786e797 --- /dev/null +++ b/Q08_object_counter/src_v2h/define.h @@ -0,0 +1,182 @@ +/* + * Original Code (C) Copyright Edgecortix, Inc. 2022 + * Modified Code (C) Copyright Renesas Electronics Corporation 2023 + *  + * *1 DRP-AI TVM is powered by EdgeCortix MERA(TM) Compiler Framework. + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + * + */ +/*********************************************************************************************************************** +* DISCLAIMER +* This software is supplied by Renesas Electronics Corporation and is only intended for use with Renesas products. No +* other uses are authorized. This software is owned by Renesas Electronics Corporation and is protected under all +* applicable laws, including copyright laws. +* THIS SOFTWARE IS PROVIDED "AS IS" AND RENESAS MAKES NO WARRANTIES REGARDING +* THIS SOFTWARE, WHETHER EXPRESS, IMPLIED OR STATUTORY, INCLUDING BUT NOT LIMITED TO WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. ALL SUCH WARRANTIES ARE EXPRESSLY DISCLAIMED. TO THE MAXIMUM +* EXTENT PERMITTED NOT PROHIBITED BY LAW, NEITHER RENESAS ELECTRONICS CORPORATION NOR ANY OF ITS AFFILIATED COMPANIES +* SHALL BE LIABLE FOR ANY DIRECT, INDIRECT, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES FOR ANY REASON RELATED TO THIS +* SOFTWARE, EVEN IF RENESAS OR ITS AFFILIATES HAVE BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. +* Renesas reserves the right, without notice, to make changes to this software and to discontinue the availability of +* this software. By using this software, you agree to the additional terms and conditions found by accessing the +* following link: +* http://www.renesas.com/disclaimer +* +* Copyright (C) 2023 Renesas Electronics Corporation. All rights reserved. +***********************************************************************************************************************/ +/*********************************************************************************************************************** +* File Name : define.h +* Version : v1.00 +* Description : RZ/V2H AI SDK Sample Application for Object Detection +***********************************************************************************************************************/ + +#ifndef DEFINE_MACRO_H +#define DEFINE_MACRO_H + +/***************************************** +* includes +******************************************/ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "opencv2/core.hpp" +#include "opencv2/imgproc.hpp" +#include "opencv2/highgui.hpp" +#include "wayland.h" + + +static Wayland wayland; +/* Empty since labels will be loaded from label_list file */ +static std::vector label_file_map = {}; + +/***************************************** +* Macro for YOLOv3 +******************************************/ + +/* Number of class to be detected */ +int32_t NUM_CLASS; +/* Number for [region] layer num parameter */ +#define NUM_BB (3) +#define NUM_INF_OUT_LAYER (3) +/* Thresholds */ +#define TH_PROB (0.5f) +#define TH_NMS (0.5f) +// /* Size of input image to the model */ +#define MODEL_IN_W (416) +#define MODEL_IN_H (416) + +/* Number of grids in the image. The length of this array MUST match with the NUM_INF_OUT_LAYER */ +const static uint8_t num_grids[] = { 13, 26, 52 }; + +/***************************************** +* Macro for Application +******************************************/ +#define IMAGE_CHANNEL_BGRA (4) +/*Waiting Time*/ +#define WAIT_TIME (1000) /* microseconds */ + +/*Timer Related*/ +#define CAPTURE_TIMEOUT (20) /* seconds */ +#define AI_THREAD_TIMEOUT (20) /* seconds */ +/*DRP-AI memory area offset for model objects*/ +/*Offset value depends on the size of memory area used by DRP-AI Pre-processing Runtime Object files*/ +#define DRPAI_MEM_OFFSET (0) + +/*Maximum DRP-AI Timeout threshold*/ +#define DRPAI_TIMEOUT (5) + +/*Frame threshold to execute inference in every loop + *This value must be determined by DRP-AI processing time and capture processing time. + *For your information YOLOv3 takes around 50 msec and capture takes around 50 msec. */ +#define INF_FRAME_NUM (2) + +/*Camera Capture Image Information*/ +#define CAM_IMAGE_WIDTH (640) +#define CAM_IMAGE_HEIGHT (480) +#define CAM_IMAGE_CHANNEL_YUY2 (2) + +#define CAP_BUF_NUM (6) + +/*DRP-AI Input image information*/ +#define DRPAI_IN_WIDTH (CAM_IMAGE_WIDTH) +#define DRPAI_IN_HEIGHT (CAM_IMAGE_HEIGHT) +#define DRPAI_IN_CHANNEL_YUY2 (CAM_IMAGE_CHANNEL_YUY2) + +/* Display Image Information*/ +#define DISP_OUTPUT_WIDTH (1920) +#define DISP_OUTPUT_HEIGHT (1080) +#define DISP_IMAGE_OUTPUT_WIDTH (1500) +#define DISP_IMAGE_OUTPUT_HEIGHT (1080) + + +/*Waiting Time*/ +#define WAIT_TIME (1000) /* microseconds */ + +/*Timer Related*/ +#define CAPTURE_TIMEOUT (20) /* seconds */ +#define AI_THREAD_TIMEOUT (20) /* seconds */ +#define KEY_THREAD_TIMEOUT (5) /* seconds */ +/* DRP_MAX_FREQ and DRPAI_FREQ are the */ +/* frequency settings for DRP-AI. */ +/*Basicallyuse the default values */ + +#define DRP_MAX_FREQ (2) +/* DRP_MAX_FREQ can be set from 2 to 127 */ +/* 2: 420MHz */ +/* 3: 315MHz */ +/* ... */ +/* 127: 9.84MHz */ +/* Calculation Formula: */ +/* 1260MHz /(DRP_MAX_FREQ + 1) */ + +#define DRPAI_FREQ (2) +/* DRPAI_FREQ can be set from 1 to 127 */ +/* 1,2: 1GHz */ +/* 3: 630MHz */ +/* 4: 420MHz */ +/* 5: 315MHz */ +/* ... */ +/* 127: 10MHz */ +/* Calculation Formula: */ +/* 1260MHz /(DRPAI_FREQ - 1) */ +/* (When DRPAI_FREQ = 3 or more.) */ + + +#endif diff --git a/Q08_object_counter/src_v2h/main.cpp b/Q08_object_counter/src_v2h/main.cpp new file mode 100755 index 0000000..64fa495 --- /dev/null +++ b/Q08_object_counter/src_v2h/main.cpp @@ -0,0 +1,1415 @@ +/* + * Original Code (C) Copyright Edgecortix, Inc. 2022 + * Modified Code (C) Copyright Renesas Electronics Corporation 2023 + *  + * *1 DRP-AI TVM is powered by EdgeCortix MERA(TM) Compiler Framework. + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + * + */ +/*********************************************************************************************************************** +* DISCLAIMER +* This software is supplied by Renesas Electronics Corporation and is only intended for use with Renesas products. No +* other uses are authorized. This software is owned by Renesas Electronics Corporation and is protected under all +* applicable laws, including copyright laws. +* THIS SOFTWARE IS PROVIDED "AS IS" AND RENESAS MAKES NO WARRANTIES REGARDING +* THIS SOFTWARE, WHETHER EXPRESS, IMPLIED OR STATUTORY, INCLUDING BUT NOT LIMITED TO WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. ALL SUCH WARRANTIES ARE EXPRESSLY DISCLAIMED. TO THE MAXIMUM +* EXTENT PERMITTED NOT PROHIBITED BY LAW, NEITHER RENESAS ELECTRONICS CORPORATION NOR ANY OF ITS AFFILIATED COMPANIES +* SHALL BE LIABLE FOR ANY DIRECT, INDIRECT, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES FOR ANY REASON RELATED TO THIS +* SOFTWARE, EVEN IF RENESAS OR ITS AFFILIATES HAVE BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. +* Renesas reserves the right, without notice, to make changes to this software and to discontinue the availability of +* this software. By using this software, you agree to the additional terms and conditions found by accessing the +* following link: +* http://www.renesas.com/disclaimer +* +* Copyright (C) 2023 Renesas Electronics Corporation. All rights reserved. +***********************************************************************************************************************/ +/*********************************************************************************************************************** +* File Name : main.cpp +* Version : v1.00 +* Description : RZ/V2H AI SDK Application for Object Detection +***********************************************************************************************************************/ + +/***************************************** +* Includes +******************************************/ +/*DRP-AI TVM[*1] Runtime*/ +#include "MeraDrpRuntimeWrapper.h" +/*Definition of Macros & other variables*/ +#include "define.h" +/*box drawing*/ +#include "box.h" +#include "utils.h" + + + +/*Multithreading*/ +static sem_t terminate_req_sem; +static pthread_t ai_inf_thread; +static pthread_t capture_thread; +static pthread_t exit_thread; +static pthread_t kbhit_thread; +static std::mutex mtx; + +/*Flags*/ +static std::atomic inference_start (0); +static std::atomic img_obj_ready (0); + +/*Global Variables*/ +float * drpai_output_buf; +static uint64_t udmabuf_address = 0; + +/*AI Inference for DRPAI*/ +/* DRP-AI TVM[*1] Runtime object */ +MeraDrpRuntimeWrapper runtime; + +static float pre_time = 0; +static float post_time = 0; +static float ai_time = 0; +static float total_time = 0; +static std::vector det; +cv::Mat yuyv_image; +cv::Mat input_image; +std::unordered_map ini_values; +std::vector anchors; +bool doubleClick = false; +static int32_t drp_max_freq; +static int32_t drpai_freq; +using INI_FORMAT = std::unordered_map>; +/***************************************** +* Function Name : float16_to_float32 +* Description : Function by Edgecortex. Cast uint16_t a into float value. +* Arguments : a = uint16_t number +* Return value : float = float32 number +******************************************/ +float float16_to_float32(uint16_t a) +{ + return __extendXfYf2__(a); +} + +/***************************************** +* Function Name : timedifference_msec +* Description : compute the time differences in ms between two moments +* Arguments : t0 = start time +* t1 = stop time +* Return value : the time difference in ms +******************************************/ +static double timedifference_msec(struct timespec t0, struct timespec t1) +{ + return (t1.tv_sec - t0.tv_sec) * 1000.0 + (t1.tv_nsec - t0.tv_nsec) / 1000000.0; +} + +/***************************************** +* Function Name : wait_join +* Description : waits for a fixed amount of time for the thread to exit +* Arguments : p_join_thread = thread that the function waits for to Exit +* join_time = the timeout time for the thread for exiting +* Return value : 0 if successful +* not 0 otherwise +******************************************/ +static int8_t wait_join(pthread_t *p_join_thread, uint32_t join_time) +{ + int8_t ret_err; + struct timespec join_timeout; + ret_err = clock_gettime(CLOCK_REALTIME, &join_timeout); + if ( 0 == ret_err ) + { + join_timeout.tv_sec += join_time; + ret_err = pthread_timedjoin_np(*p_join_thread, NULL, &join_timeout); + } + return ret_err; +} + +/***************************************** +* Function Name : config_read +* Description : Read configuration from the config.ini file. +******************************************/ +INI_FORMAT config_read(std::string file) +{ + INI_FORMAT ini_values; + /*Open config.ini file*/ + std::ifstream ini_file(file); + std::string line; + std::string current_section; + /*parsing ini file*/ + while (std::getline(ini_file, line)) + { + std::string::size_type comment_pos = line.find(";"); + if (comment_pos != std::string::npos) + { + line.erase(comment_pos); + } + line.erase(0, line.find_first_not_of(" \t\r\n")); + line.erase(line.find_last_not_of(" \t\r\n") + 1); + if (line.empty()) + { + continue; + } + else if (line[0] == '[') + { + current_section = line.substr(1, line.size() - 2); + } + else + { + std::string::size_type delimiter_pos = line.find("="); + std::string key = line.substr(0, delimiter_pos); + std::string value = line.substr(delimiter_pos + 1); + ini_values[current_section][key] = value; + } + } + return ini_values; +} +/***************************************** +* Function Name : load_label_file +* Description : Load label list text file and return the label list that contains the label. +* Arguments : label_file_name = filename of label list. must be in txt format +* Return value : vector list = list contains labels +* empty if error occurred +******************************************/ +std::vector load_label_file(std::string label_file_name) +{ + std::vector list = {}; + std::vector empty = {}; + std::ifstream infile(label_file_name); + + if (!infile.is_open()) + { + return list; + } + + std::string line = ""; + while (getline(infile,line)) + { + list.push_back(line); + if (infile.fail()) + { + return empty; + } + } + + return list; +} + +/***************************************** +* Function Name : get_result +* Description : Get DRP-AI Output from memory via DRP-AI Driver +* Arguments : drpai_fd = file descriptor of DRP-AI Driver +* output_addr = memory start address of DRP-AI output +* output_size = output data size +* Return value : 0 if succeeded +* not 0 otherwise +******************************************/ +int8_t get_result() +{ + int8_t ret = 0; + int32_t i = 0; + + int32_t output_num = 0; + std::tuple output_buffer; + int64_t output_size; + uint32_t size_count = 0; + + /* Get the number of output of the target model. */ + output_num = runtime.GetNumOutput(); + size_count = 0; + /*GetOutput loop*/ + for (i = 0;i(output_buffer). */ + output_size = std::get<2>(output_buffer); + + /*Output Data Type = std::get<0>(output_buffer)*/ + if (InOutDataType::FLOAT16 == std::get<0>(output_buffer)) + { + /*Output Data = std::get<1>(output_buffer)*/ + uint16_t* data_ptr = reinterpret_cast(std::get<1>(output_buffer)); + for (int j = 0; j(output_buffer)) + { + /*Output Data = std::get<1>(output_buffer)*/ + float* data_ptr = reinterpret_cast(std::get<1>(output_buffer)); + for (int j = 0; j rgb_images; + cv::split(image, rgb_images); + cv::Mat m_flat_r = rgb_images[0].reshape(1, 1); + cv::Mat m_flat_g = rgb_images[1].reshape(1, 1); + cv::Mat m_flat_b = rgb_images[2].reshape(1, 1); + cv::Mat matArray[] = {m_flat_r, m_flat_g, m_flat_b}; + cv::Mat flat_image; + cv::hconcat(matArray, 3, flat_image); + return flat_image; +} + +/***************************************** +* Function Name : R_Post_Proc +* Description : Process CPU post-processing for YOLOv3 +* Arguments : floatarr = drpai output address +* Return value : - +******************************************/ +void R_Post_Proc(float* floatarr) +{ + /* Following variables are required for correct_region_boxes in Darknet implementation*/ + /* Note: This implementation refers to the "darknet detector test" */ + mtx.lock(); + float new_w, new_h; + float correct_w = 1.; + float correct_h = 1.; + if ((float) (MODEL_IN_W / correct_w) < (float) (MODEL_IN_H/correct_h) ) + { + new_w = (float) MODEL_IN_W; + new_h = correct_h * MODEL_IN_W / correct_w; + } + else + { + new_w = correct_w * MODEL_IN_H / correct_h; + new_h = MODEL_IN_H; + } + + int32_t n = 0; + int32_t b = 0; + int32_t y = 0; + int32_t x = 0; + int32_t offs = 0; + int32_t i = 0; + float tx = 0; + float ty = 0; + float tw = 0; + float th = 0; + float tc = 0; + float center_x = 0; + float center_y = 0; + float box_w = 0; + float box_h = 0; + float objectness = 0; + uint8_t num_grid = 0; + uint8_t anchor_offset = 0; + float classes[NUM_CLASS]; + float max_pred = 0; + int32_t pred_class = -1; + float probability = 0; + detection d; + /* Clear the detected result list */ + det.clear(); + + /*Post Processing Start*/ + for (n = 0; n < NUM_INF_OUT_LAYER; n++) + { + num_grid = num_grids[n]; + anchor_offset = 2 * NUM_BB * (NUM_INF_OUT_LAYER - (n + 1)); + + for(b = 0; b < NUM_BB; b++) + { + for(y = 0; y < num_grid; y++) + { + for(x = 0; x < num_grid; x++) + { + offs = yolo_offset(n, b, y, x); + tx = floatarr[offs]; + ty = floatarr[yolo_index(n, offs, 1)]; + tw = floatarr[yolo_index(n, offs, 2)]; + th = floatarr[yolo_index(n, offs, 3)]; + tc = floatarr[yolo_index(n, offs, 4)]; + /* Compute the bounding box */ + /*get_region_box*/ + center_x = ((float) x + sigmoid(tx)) / (float) num_grid; + center_y = ((float) y + sigmoid(ty)) / (float) num_grid; + box_w = (float) exp(tw) * anchors[anchor_offset+2*b+0] / (float) MODEL_IN_W; + box_h = (float) exp(th) * anchors[anchor_offset+2*b+1] / (float) MODEL_IN_W; + /* Adjustment for VGA size */ + /* correct_region_boxes */ + center_x = (center_x - (MODEL_IN_W - new_w) / 2. / MODEL_IN_W) / ((float) new_w / MODEL_IN_W); + center_y = (center_y - (MODEL_IN_H - new_h) / 2. / MODEL_IN_H) / ((float) new_h / MODEL_IN_H); + box_w *= (float) (MODEL_IN_W / new_w); + box_h *= (float) (MODEL_IN_H / new_h); + center_x = round(center_x * DRPAI_IN_WIDTH); + center_y = round(center_y * DRPAI_IN_HEIGHT); + box_w = round(box_w * DRPAI_IN_WIDTH); + box_h = round(box_h * DRPAI_IN_HEIGHT); + objectness = sigmoid(tc); + Box bb = {center_x, center_y, box_w, box_h}; + /* Get the class prediction */ + for (i = 0; i < NUM_CLASS; i++) + { + classes[i] = sigmoid(floatarr[yolo_index(n, offs, 5+i)]); + } + max_pred = 0; + pred_class = -1; + for (i = 0; i < NUM_CLASS; i++) + { + if (classes[i] > max_pred) + { + pred_class = i; + max_pred = classes[i]; + } + } + /* Store the result into the list if the probability is more than the threshold */ + probability = max_pred * objectness; + if (probability > TH_PROB) + { + d = {bb, pred_class, probability}; + det.push_back(d); + } + } + } + } + } + /* Non-Maximum Supression filter */ + filter_boxes_nms(det, det.size(), TH_NMS); + mtx.unlock(); + return ; +} + + + + + +/***************************************** + * Function Name : R_exit_Thread + * Description : Executes the double click exit thread + * Arguments : threadid = thread identification + * Return value : - + ******************************************/ +void *R_exit_Thread(void *threadid) +{ + /*Semaphore Variable*/ + int32_t kh_sem_check = 0; + + /*Variable for checking return value*/ + int8_t ret = 0; + devices dev; + + /*Set Standard Input to Non Blocking*/ + errno = 0; + ret = fcntl(0, F_SETFL, O_NONBLOCK); + if (-1 == ret) + { + fprintf(stderr, "[ERROR] Failed to run fctnl(): errno=%d\n", errno); + goto err; + } + + while (1) + { + /*Gets the Termination request semaphore value. If different then 1 Termination was requested*/ + /*Checks if sem_getvalue is executed wihtout issue*/ + errno = 0; + ret = sem_getvalue(&terminate_req_sem, &kh_sem_check); + if (0 != ret) + { + fprintf(stderr, "[ERROR] Failed to get Semaphore Value: errno=%d\n", errno); + goto err; + } + /*Checks the semaphore value*/ + if (1 != kh_sem_check) + { + goto exit_end; + } + + dev.detect_mouse_click(); + if (doubleClick) + { + goto err; + } + } + +/*Error Processing*/ +err: + /*Set Termination Request Semaphore to 0*/ + sem_trywait(&terminate_req_sem); + goto exit_end; + +exit_end: + printf("Exit Thread Terminated\n"); + pthread_exit(NULL); +} + + + +/***************************************** +* Function Name : R_Inf_Thread +* Description : Executes the DRP-AI inference thread +* Arguments : threadid = thread identification +* Return value : - +******************************************/ +void *R_Inf_Thread(void *threadid) +{ + /*Semaphore Variable*/ + int32_t inf_sem_check = 0; + + /*Inference Variables*/ + fd_set rfds; + struct timespec tv; + int8_t inf_status = 0; + /*Variable for checking return value*/ + int8_t ret = 0; + /*Variable for Performance Measurement*/ + static struct timespec start_time; + static struct timespec inf_end_time; + static struct timespec pre_start_time; + static struct timespec pre_end_time; + static struct timespec post_start_time; + static struct timespec post_end_time; + + std::cout << "[INFO] Inference Thread Starting\n"; + + /*Inference Loop Start*/ + while(1) + { + while(1) + { + /*Gets the Termination request semaphore value. If different then 1 Termination was requested*/ + /*Checks if sem_getvalue is executed wihtout issue*/ + errno = 0; + ret = sem_getvalue(&terminate_req_sem, &inf_sem_check); + if (0 != ret) + { + fprintf(stderr, "[ERROR] Failed to get Semaphore Value: errno=%d\n", errno); + goto err; + } + /*Checks the semaphore value*/ + if (1 != inf_sem_check) + { + goto ai_inf_end; + } + /*Checks if image frame from Capture Thread is ready.*/ + if (inference_start.load()) + { + break; + } + usleep(WAIT_TIME); + } + + /*Gets Pre-process starting time*/ + ret = timespec_get(&pre_start_time, TIME_UTC); + if (0 == ret) + { + fprintf(stderr, "[ERROR] Failed to get Pre-process Start Time\n"); + goto err; + } + + cv::Size size(MODEL_IN_H, MODEL_IN_W); + cv::resize(input_image, input_image, size); + cv::cvtColor(input_image, input_image, cv::COLOR_BGR2RGB); + input_image = hwc2chw(input_image); + input_image.convertTo(input_image, CV_32FC3,1.0 / 255.0, 0); + if (!input_image.isContinuous()) + input_image = input_image.clone(); + + ret = timespec_get(&pre_end_time, TIME_UTC); + if ( 0 == ret) + { + fprintf(stderr, "[ERROR] Failed to Get Pre-process End Time\n"); + goto err; + } + + /*Set Pre-processing output to be inference input. */ + runtime.SetInput(0, input_image.ptr()); + + /*Pre-process Time Result*/ + pre_time = (float)((timedifference_msec(pre_start_time, pre_end_time))); + + /*Gets inference starting time*/ + ret = timespec_get(&start_time, TIME_UTC); + if (0 == ret) + { + fprintf(stderr, "[ERROR] Failed to get Inference Start Time\n"); + goto err; + } + + runtime.Run(); + + /*Gets AI Inference End Time*/ + ret = timespec_get(&inf_end_time, TIME_UTC); + if ( 0 == ret) + { + fprintf(stderr, "[ERROR] Failed to Get Inference End Time\n"); + goto err; + } + /*Inference Time Result*/ + ai_time = (float)((timedifference_msec(start_time, inf_end_time))); + + /*Gets Post-process starting time*/ + ret = timespec_get(&post_start_time, TIME_UTC); + if (0 == ret) + { + fprintf(stderr, "[ERROR] Failed to get Post-process Start Time\n"); + goto err; + } + /*Process to read the DRPAI output data.*/ + ret = get_result(); + if (0 != ret) + { + fprintf(stderr, "[ERROR] Failed to get result from memory.\n"); + goto err; + } + + /*CPU Post-Processing For YOLOv3*/ + R_Post_Proc(drpai_output_buf); + /*Gets Post-process End Time*/ + ret = timespec_get(&post_end_time, TIME_UTC); + if ( 0 == ret) + { + fprintf(stderr, "[ERROR] Failed to Get Post-process End Time\n"); + goto err; + } + /*Post-process Time Result*/ + post_time = (float)((timedifference_msec(post_start_time, post_end_time))); + total_time = pre_time + ai_time + post_time; + inference_start.store(0); + } + /*End of Inference Loop*/ + +/*Error Processing*/ +err: + /*Set Termination Request Semaphore to 0*/ + sem_trywait(&terminate_req_sem); + goto ai_inf_end; +/*AI Thread Termination*/ +ai_inf_end: + /*To terminate the loop in Capture Thread.*/ + printf("AI Inference Thread Terminated\n"); + pthread_exit(NULL); +} + +/***************************************** +* Function Name : R_Capture_Thread +* Description : Executes the V4L2 capture with Capture thread. +* Arguments : cap_pipeline = gstreamer pipeline +* Return value : - +******************************************/ +void *R_Capture_Thread(void *cap_pipeline) +{ + std::string &gstream = *(static_cast(cap_pipeline)); + std::cout << gstream << std::endl; + /*Semaphore Variable*/ + int32_t capture_sem_check = 0; + int8_t ret = 0; + cv::Mat g_frame; + cv::Mat raw_frame; + cv::VideoCapture g_cap; + + printf("[INFO] Capture Thread Starting\n"); + + g_cap.open(gstream, cv::CAP_GSTREAMER); + if (!g_cap.isOpened()) + { + std::cout << "[ERROR] Error opening video stream or camera !\n" + << std::endl; + goto err; + } + + while(1) + { + /*Gets the Termination request semaphore value. If different then 1 Termination was requested*/ + /*Checks if sem_getvalue is executed wihtout issue*/ + errno = 0; + ret = sem_getvalue(&terminate_req_sem, &capture_sem_check); + if (0 != ret) + { + fprintf(stderr, "[ERROR] Failed to get Semaphore Value: errno=%d\n", errno); + goto err; + } + /*Checks the semaphore value*/ + if (1 != capture_sem_check) + { + goto capture_end; + } + + g_cap >> g_frame; + /* Breaking the loop if no video frame is detected */ + if (g_frame.empty()) + { + std::cout << "[INFO] Video ended or corrupted frame !\n"; + goto capture_end; + } + else + { + //cv::resize(g_frame, g_frame, cv::Size(CAM_IMAGE_WIDTH, CAM_IMAGE_HEIGHT)); + if (!inference_start.load()) + { + + input_image = g_frame.clone(); + inference_start.store(1); /* Flag for AI Inference Thread. */ + } + + if (!img_obj_ready.load()) + { + yuyv_image = g_frame.clone(); + img_obj_ready.store(1); /* Flag for Main Thread. */ + } + } + } /*End of Loop*/ + +/*Error Processing*/ +err: + sem_trywait(&terminate_req_sem); + goto capture_end; + +capture_end: + /*To terminate the loop in AI Inference Thread.*/ + inference_start.store(1); + + printf("Capture Thread Terminated\n"); + pthread_exit(NULL); +} + + +/***************************************** + * Function Name : create_output_frame + * Description : create the output frame with space for displaying inference details + * Arguments : cv::Mat frame_g, input frame to be displayed in the background + * Return value : cv::Mat background, final display frame to be written to gstreamer pipeline + *****************************************/ +cv::Mat create_output_frame(cv::Mat frame_g) +{ + /* Create a black background image of size 1080x720 */ + cv::Mat background(DISP_OUTPUT_HEIGHT, DISP_OUTPUT_WIDTH, frame_g.type(), cv::Scalar(0, 0, 0)); + /* Resize the original image to fit within 960x720 */ + cv::Mat resizedImage; + cv::resize(frame_g, resizedImage, cv::Size(DISP_IMAGE_OUTPUT_WIDTH, DISP_IMAGE_OUTPUT_HEIGHT)); + /* Copy the resized image to the left side of the background (0 to 960) */ + cv::Rect roi(cv::Rect(0, 0, resizedImage.cols, resizedImage.rows)); + resizedImage.copyTo(background(roi)); + return background; +} + + +/***************************************** +* Function Name : R_Main_Process +* Description : Runs the main process loop +* Arguments : - +* Return value : 0 if succeeded +* not 0 otherwise +******************************************/ +int8_t R_Main_Process() +{ + /*Main Process Variables*/ + int8_t main_ret = 0; + /*Semaphore Related*/ + int32_t sem_check = 0; + /*Variable for checking return value*/ + int8_t ret = 0; + /*Variable for image buffer id*/ + uint8_t img_buf_id; + /*Variable for detected object time ids*/ + std::map id_time; + std::set unique_ids = {}; + std::map location_history; + long int infer_time_ms; + cv::Mat bgra_image; + + std::set detection_object_set; + std::string objects_available = ""; + std::string objects_not_available = ""; + /* detection count */ + std::map detection_count; + std::string config_file_path = ini_values["config_path"]; + INI_FORMAT config_values = config_read(config_file_path); + float conf = std::stof(config_values["detect"]["conf"]); + std::string get_anchor = config_values["detect"]["anchors"]; + std::string detection_object_string = config_values["detect"]["objects"]; + std::stringstream detection_anchor_ss(get_anchor); + std::string anch_value; + while (std::getline(detection_anchor_ss, anch_value, ',')) + { + double conv_anch_value =std::stod(anch_value); + anchors.push_back(conv_anch_value); + } + std::stringstream detection_object_ss(detection_object_string); + std::string item; + while (std::getline(detection_object_ss, item, ',')) + { + if (count(label_file_map.begin(), label_file_map.end(), item) > 0) + { + detection_object_set.insert(item); + objects_available += item + "\n"; + } + else + { + objects_not_available += item + "\n"; + } + } + std::cout << "[INFO] *******************Detection Parameters*******************" << std::endl; + if (!objects_not_available.empty()) + { + std::cout << "[INFO] Selected objects in config.ini which is not found in the label list\n" + << objects_not_available << "\n"; + } + if (!objects_available.empty()) + { + std::cout << "[INFO] Selected objects to detect\n" + << objects_available << "\n"; + } + else + { + std::cerr << "[ERROR] No matching objects in label list from the config.ini file" << std::endl; + exit(0); + } + /*Display font parameter values*/ + float font_size = .85; + float font_weight = 1; + float font_size_dt = 0.65; + float font_size_bb = 0.5; + float font_weight_bb = 1; + + printf("Main Loop Starts\n"); + while(1) + { + /*Gets the Termination request semaphore value. If different then 1 Termination was requested*/ + errno = 0; + ret = sem_getvalue(&terminate_req_sem, &sem_check); + if (0 != ret) + { + fprintf(stderr, "[ERROR] Failed to get Semaphore Value: errno=%d\n", errno); + goto err; + } + /*Checks the semaphore value*/ + if (1 != sem_check) + { + goto main_proc_end; + } + /* Check img_obj_ready flag which is set in Capture Thread. */ + if (img_obj_ready.load()) + { + /*key : object, value:count*/ + detection_count.clear(); + bgra_image = yuyv_image; + infer_time_ms = total_time; + int total_count = 0; + int items = 0; + + mtx.lock(); + /*filter detection based on confidence score and objects selected*/ + for (detection detect : det) + { + bbox_t dat; + + /*ignore detection based on the threshold from the config.ini file*/ + if (detect.prob < conf) + { + continue; + } + /*get the label from label file map*/ + dat.name = label_file_map[detect.c].c_str(); + + /*check if the detected object is in the list of objects to be detected(from the config.ini file)*/ + if (count(detection_object_set.begin(), detection_object_set.end(), dat.name) <= 0) + { + continue; + } + + /*map for storing the count of detected objects*/ + if (detection_count.count(dat.name) > 0) + { + detection_count[dat.name]++; + } + else + { + detection_count[dat.name] = 1; + } + + dat.X = (int32_t)(detect.bbox.x - (detect.bbox.w / 2)); + dat.Y = (int32_t)(detect.bbox.y - (detect.bbox.h / 2)); + dat.W = (int32_t)detect.bbox.w; + dat.H = (int32_t)detect.bbox.h; + dat.pred = detect.prob * 100.0; + + cv::Size text_size = cv::getTextSize(dat.name, cv::FONT_HERSHEY_SIMPLEX, font_size_bb, 2, 0); + /*adjust the font size based on the detection text size*/ + if (text_size.width > dat.W) + { + font_weight_bb = .75; + font_size_bb = 0.3; + } + else + { + font_size_dt = 0.65; + font_size_bb = 0.55; + } + + cv::Rect rect(dat.X, dat.Y, dat.W, dat.H); + cv::Rect rect_text_box(dat.X, dat.Y - 20, dat.W, 20); + /*draw the rectangle for detected object*/ + cv::rectangle(bgra_image, rect, cv::Scalar(0, 255, 0), 1.5); + /*draw text box for holding the class label*/ + cv::rectangle(bgra_image, rect_text_box, cv::Scalar(0, 255, 0), cv::FILLED); + /*writing class label to the display frame */ + cv::putText(bgra_image, dat.name, cv::Point(dat.X + 5, dat.Y - 8), + cv::FONT_HERSHEY_SIMPLEX, font_size_bb, cv::Scalar(0, 0, 0), font_weight_bb); + } + mtx.unlock(); + bgra_image = create_output_frame(bgra_image); + cv::putText(bgra_image, "Preprocess Time: " + std::to_string(int(pre_time)), cv::Point(1520, 60), + cv::FONT_HERSHEY_DUPLEX, font_size, cv::Scalar(255, 255, 255), font_weight); + cv::putText(bgra_image, "AI Inference Time: " + std::to_string(int(ai_time)), cv::Point(1520, 90), + cv::FONT_HERSHEY_DUPLEX, font_size, cv::Scalar(255, 255, 255), font_weight); + cv::putText(bgra_image, "Postprocess Time: " + std::to_string(int(post_time)), cv::Point(1520, 120), + cv::FONT_HERSHEY_DUPLEX, font_size, cv::Scalar(255, 255, 255), font_weight); + // cv::putText(bgra_image, "Double Click to exit the Application!!", cv::Point(1540, 1020), + // cv::FONT_HERSHEY_SIMPLEX, 0.60, cv::Scalar(255, 255, 255), font_weight, cv::LINE_AA); + for (std::map::iterator it = detection_count.begin(); it != detection_count.end(); ++it) + { + cv::putText(bgra_image, std::string(it->first) + ": " + std::to_string(it->second), + cv::Point(1530, 180 + 30*items), cv::FONT_HERSHEY_DUPLEX, font_size, + cv::Scalar(255, 255, 255), font_weight); + total_count += (int)it->second; + items++; + } + cv::putText(bgra_image, "Total Objects: " + std::to_string(total_count), cv::Point(1520, 150), + cv::FONT_HERSHEY_DUPLEX, font_size, cv::Scalar(255, 255, 255), font_weight); + cv::cvtColor(bgra_image, bgra_image, cv::COLOR_BGR2BGRA); + wayland.commit(bgra_image.data, NULL); + img_obj_ready.store(0); + } + + /*Wait for 1 TICK.*/ + usleep(WAIT_TIME); + } + +/*Error Processing*/ +err: + sem_trywait(&terminate_req_sem); + main_ret = 1; + goto main_proc_end; +/*Main Processing Termination*/ +main_proc_end: + /*To terminate the loop in Capture Thread.*/ + img_obj_ready.store(0); + printf("Main Process Terminated\n"); + return main_ret; +} + + +/***************************************** +* Function Name : get_drpai_start_addr +* Description : Get DRP-AI Memory Area Address via DRP-AI Driver +* Arguments : - +* Return value : drpai start address +******************************************/ +uint64_t get_drpai_start_addr(int drpai_fd) +{ + int fd = 0; + int ret = 0; + drpai_data_t drpai_data; + + errno = 0; + + fd = open("/dev/drpai0", O_RDWR); + if (0 > fd ) + { + LOG(FATAL) << "[ERROR] Failed to open DRP-AI Driver : errno=" << errno; + return (uint32_t)NULL; + } + + /* Get DRP-AI Memory Area Address via DRP-AI Driver */ + ret = ioctl(fd , DRPAI_GET_DRPAI_AREA, &drpai_data); + if (-1 == ret) + { + LOG(FATAL) << "[ERROR] Failed to get DRP-AI Memory Area : errno=" << errno ; + return (uint32_t)NULL; + } + + return drpai_data.address; +} + + +/***************************************** + * Function Name : query_device_status + * Description : function to check USB device is connectod. + * Return value : media_port, media port that device is connectod. + ******************************************/ +std::string query_device_status(std::string device_type) +{ + std::string media_port = ""; + /* Linux command to be executed */ + const char* command = "v4l2-ctl --list-devices"; + /* Open a pipe to the command and execute it */ + FILE* pipe = popen(command, "r"); + if (!pipe) + { + std::cerr << "[ERROR] Unable to open the pipe." << std::endl; + return media_port; + } + /* Read the command output line by line */ + char buffer[128]; + size_t found; + while (fgets(buffer, sizeof(buffer), pipe) != nullptr) + { + std::string response = std::string(buffer); + found = response.find(device_type); + if (found != std::string::npos) + { + fgets(buffer, sizeof(buffer), pipe); + media_port = std::string(buffer); + pclose(pipe); + /* return media port*/ + return media_port; + } + } + pclose(pipe); + /* return media port*/ + return media_port; +} + +/***************************************** +* Function Name : set_drpai_freq +* Description : Function to set the DRP and DRP-AI frequency. +* Arguments : drpai_fd: DRP-AI file descriptor +* Return value : 0 if succeeded +* not 0 otherwise +******************************************/ +int set_drpai_freq(int drpai_fd) +{ + int ret = 0; + uint32_t data; + + errno = 0; + data = drp_max_freq; + ret = ioctl(drpai_fd , DRPAI_SET_DRP_MAX_FREQ, &data); + if (-1 == ret) + { + std::cerr << "[ERROR] Failed to set DRP Max Frequency : errno=" << errno << std::endl; + return -1; + } + + errno = 0; + data = drpai_freq; + ret = ioctl(drpai_fd , DRPAI_SET_DRPAI_FREQ, &data); + if (-1 == ret) + { + std::cerr << "[ERROR] Failed to set DRP-AI Frequency : errno=" << errno << std::endl; + return -1; + } + + return 0; +} + +/***************************************** +* Function Name : init_drpai +* Description : Function to initialize DRP-AI. +* Arguments : drpai_fd: DRP-AI file descriptor +* Return value : If non-zero, DRP-AI memory start address. +* 0 is failure. +******************************************/ +uint64_t init_drpai(int drpai_fd) +{ + int ret = 0; + uint64_t drpai_addr = 0; + + /*Get DRP-AI memory start address*/ + drpai_addr = get_drpai_start_addr(drpai_fd); + if (drpai_addr == 0) + { + return 0; + } + + /*Set DRP-AI frequency*/ + ret = set_drpai_freq(drpai_fd); + if (ret != 0) + { + return 0; + } + + return drpai_addr; +} +/***************************************** + * Function Name : R_Kbhit_Thread + * Description : Executes the Keyboard hit thread (checks if enter key is hit) + * Arguments : threadid = thread identification + * Return value : - + ******************************************/ +void *R_Kbhit_Thread(void *threadid) +{ + /*Semaphore Variable*/ + int32_t kh_sem_check = 0; + /*Variable to store the getchar() value*/ + int32_t c = 0; + /*Variable for checking return value*/ + int8_t ret = 0; + + printf("[INFO] Key Hit Thread Starting\n"); + + printf("************************************************\n"); + printf("* Press ENTER key to quit. *\n"); + printf("************************************************\n"); + + /*Set Standard Input to Non Blocking*/ + errno = 0; + ret = fcntl(0, F_SETFL, O_NONBLOCK); + if (-1 == ret) + { + fprintf(stderr, "[ERROR] Failed to run fctnl(): errno=%d\n", errno); + goto err; + } + + while (1) + { + /*Gets the Termination request semaphore value. If different then 1 Termination was requested*/ + /*Checks if sem_getvalue is executed wihtout issue*/ + errno = 0; + ret = sem_getvalue(&terminate_req_sem, &kh_sem_check); + if (0 != ret) + { + fprintf(stderr, "[ERROR] Failed to get Semaphore Value: errno=%d\n", errno); + goto err; + } + /*Checks the semaphore value*/ + if (1 != kh_sem_check) + { + goto key_hit_end; + } + + c = getchar(); + if (EOF != c) + { + /* When key is pressed. */ + printf("key Detected.\n"); + goto err; + } + else + { + /* When nothing is pressed. */ + usleep(WAIT_TIME); + } + } + +/*Error Processing*/ +err: + /*Set Termination Request Semaphore to 0*/ + sem_trywait(&terminate_req_sem); + goto key_hit_end; + +key_hit_end: + printf("Key Hit Thread Terminated\n"); + pthread_exit(NULL); +} + +int32_t main(int32_t argc, char * argv[]) +{ + int8_t main_proc = 0; + int8_t ret = 0; + int8_t ret_w = 0; + int8_t ret_main = 0; + /*Multithreading Variables*/ + int32_t create_thread_ai = -1; + int32_t create_thread_capture = -1; + int32_t create_thread_exit = -1; + int32_t create_thread_key = -1; + int32_t sem_create = -1; + InOutDataType input_data_type; + bool runtime_status = false; + std::string gstreamer_pipeline; + + /*Disable OpenCV Accelerator due to the use of multithreading */ + unsigned long OCA_list[16]; + for (int i=0; i < 16; i++) OCA_list[i] = 0; + OCA_Activate( &OCA_list[0] ); + + if (argc<3) + { + std::cout << "[ERROR] Please specify Input Option/Source\n"; + std::cout << "[INFO] usage: ./object_counter COCO|animal|vehicle USB\n"; + std::cout << "[INFO] End Application.\n"; + return -1; + + } + + std::string mode = argv[1]; + std::string input_source = argv[2]; + + /* DRP-AI Frequency Setting */ + if (4 <= argc) + drp_max_freq = atoi(argv[3]); + else + drp_max_freq = DRP_MAX_FREQ; + if (5 <= argc) + drpai_freq = atoi(argv[4]); + else + drpai_freq = DRPAI_FREQ; + std::cout<<"\n[INFO] DRP MAX FREQUENCY : "< drpai_fd) + { + std::cerr << "[ERROR] Failed to open DRP-AI Driver : errno=" << errno << std::endl; + return -1; + } + + uint64_t drpaimem_addr_start = 0; + // uint64_t drpaimem_addr_start = 1073741824; + + /*Load Label from label_list file*/ + label_file_map = load_label_file(ini_values["label_path"]); + if (label_file_map.empty()) + { + fprintf(stderr,"[ERROR] Failed to load label file: %s\n", ini_values["label_path"].c_str()); + ret = -1; + goto end_main; + } + uint32_t INF_OUT_SIZE; + + /*Load number of class from label_list file*/ + NUM_CLASS = label_file_map.size(); + INF_OUT_SIZE = (NUM_CLASS + 5) * NUM_BB * num_grids[0] * num_grids[0] + + (NUM_CLASS + 5) * NUM_BB * num_grids[1] * num_grids[1] + + (NUM_CLASS + 5) * NUM_BB * num_grids[2] * num_grids[2]; + + drpai_output_buf =new float[INF_OUT_SIZE]; + + /*Load model_dir structure and its weight to runtime object */ + drpaimem_addr_start = init_drpai(drpai_fd); + + if ((uint32_t)NULL == drpaimem_addr_start) + { + fprintf(stderr, "[ERROR] Failed to get DRP-AI memory area start address.\n"); + goto end_main; + } + + runtime_status = runtime.LoadModel(ini_values["model_path"], drpaimem_addr_start+DRPAI_MEM_OFFSET); + if(!runtime_status) + { + fprintf(stderr, "[ERROR] Failed to load model.\n"); + goto end_main; + } + + /*Get input data */ + input_data_type = runtime.GetInputDataType(0); + if (InOutDataType::FLOAT32 == input_data_type) + { + /*Do nothing*/ + } + else if (InOutDataType::FLOAT16 == input_data_type) + { + fprintf(stderr, "[ERROR] Input data type : FP16.\n"); + /*If your model input data type is FP16, use std::vector for reading input data. */ + goto end_main; + } + else + { + fprintf(stderr, "[ERROR] Input data type : neither FP32 nor FP16.\n"); + goto end_main; + } + + /*Termination Request Semaphore Initialization*/ + /*Initialized value at 1.*/ + sem_create = sem_init(&terminate_req_sem, 0, 1); + if (0 != sem_create) + { + fprintf(stderr, "[ERROR] Failed to Initialize Termination Request Semaphore.\n"); + ret_main = -1; + goto end_threads; + } + /*Create exit Thread*/ + create_thread_exit = pthread_create(&exit_thread, NULL, R_exit_Thread, NULL); + if (0 != create_thread_exit) + { + fprintf(stderr, "[ERROR] Failed to create exit Thread.\n"); + ret_main = -1; + goto end_threads; + } + + /*Create Key Hit Thread*/ + create_thread_key = pthread_create(&kbhit_thread, NULL, R_Kbhit_Thread, NULL); + if (0 != create_thread_key) + { + fprintf(stderr, "[ERROR] Failed to create Key Hit Thread.\n"); + ret_main = -1; + goto end_threads; + } + + /*Create Inference Thread*/ + create_thread_ai = pthread_create(&ai_inf_thread, NULL, R_Inf_Thread, NULL); + if (0 != create_thread_ai) + { + sem_trywait(&terminate_req_sem); + fprintf(stderr, "[ERROR] Failed to create AI Inference Thread.\n"); + ret_main = -1; + goto end_threads; + } + + /*Create Capture Thread*/ + create_thread_capture = pthread_create(&capture_thread, NULL, R_Capture_Thread, (void *) &gstreamer_pipeline); + if (0 != create_thread_capture) + { + sem_trywait(&terminate_req_sem); + fprintf(stderr, "[ERROR] Failed to create Capture Thread.\n"); + ret_main = -1; + goto end_threads; + } + + /*Main Processing*/ + main_proc = R_Main_Process(); + if (0 != main_proc) + { + fprintf(stderr, "[ERROR] Error during Main Process\n"); + ret_main = -1; + } + goto end_threads; + +end_threads: + if(0 == create_thread_capture) + { + ret = wait_join(&capture_thread, CAPTURE_TIMEOUT); + if (0 != ret) + { + fprintf(stderr, "[ERROR] Failed to exit Capture Thread on time.\n"); + ret_main = -1; + } + } + if (0 == create_thread_ai) + { + ret = wait_join(&ai_inf_thread, AI_THREAD_TIMEOUT); + if (0 != ret) + { + fprintf(stderr, "[ERROR] Failed to exit AI Inference Thread on time.\n"); + ret_main = -1; + } + } + + + /*Delete Terminate Request Semaphore.*/ + if (0 == sem_create) + { + sem_destroy(&terminate_req_sem); + } + goto end_main; + +end_main: + printf("Application End\n"); + return ret_main; +} \ No newline at end of file diff --git a/Q08_object_counter/src_v2h/toolchain/runtime.cmake b/Q08_object_counter/src_v2h/toolchain/runtime.cmake new file mode 100755 index 0000000..0ad3cfc --- /dev/null +++ b/Q08_object_counter/src_v2h/toolchain/runtime.cmake @@ -0,0 +1,14 @@ +set(CMAKE_SYSTEM_NAME Linux) +set(CMAKE_SYSTEM_PROCESSOR aarch64) +set(MERA_DRP_RUNTIME ON) +set(DCMAKE_SYSTEM_VERSION 1) + +set(CMAKE_SYSROOT $ENV{SDK}/sysroots/aarch64-poky-linux) +set(CMAKE_FIND_ROOT_PATH $ENV{SDK}/sysroots/aarch64-poky-linux/usr/include/gnu) +set(CMAKE_CXX_COMPILER $ENV{SDK}/sysroots/x86_64-pokysdk-linux/usr/bin/aarch64-poky-linux/aarch64-poky-linux-g++) +set(CMAKE_C_COMPILER $ENV{SDK}/sysroots/x86_64-pokysdk-linux/usr/bin/aarch64-poky-linux/aarch64-poky-linux-gcc) + +set(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER) +set(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY) +set(CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY) +set(CMAKE_FIND_ROOT_PATH_MODE_PACKAGE ONLY) diff --git a/Q08_object_counter/src_v2h/utils.cpp b/Q08_object_counter/src_v2h/utils.cpp new file mode 100755 index 0000000..e414593 --- /dev/null +++ b/Q08_object_counter/src_v2h/utils.cpp @@ -0,0 +1,125 @@ +#include "utils.h" + +extern bool doubleClick; + +int devices::detect_mouse_click(void) +{ + /* Gets the full path to the mouse event device. */ + std::string full_path = find_mouse_event(); + /* Converts the full path to a const char pointer. */ + const char* device_path = full_path.c_str(); + /* Opens the mouse event device file. */ + int fd = open(device_path, O_RDONLY); + if (fd < 0) + { + std::cerr << "Error opening input device" << std::endl; + return 1; + } + /* Enters a loop to read mouse events. */ + while (true) + { + struct input_event ev; + /* Reads a mouse event from the device file. */ + ssize_t bytesRead = read(fd, &ev, sizeof(struct input_event)); + /* Checks if the event is a key event and if the key code is for the left or right mouse button. */ + if (bytesRead == sizeof(struct input_event) && ev.type == EV_KEY && + (ev.code == BTN_LEFT || ev.code == BTN_RIGHT)) + { + /* Checks if the key is being pressed (value == 1) or released (value == 0). */ + if (ev.value == 0) + { + if(first_click == true) + { + t2 = std::chrono::high_resolution_clock::now(); + duration = std::chrono::duration_cast(t2 - t1).count(); + first_click = false; + } + if(second_click == true) + { + t4 = std::chrono::high_resolution_clock::now(); + duration_sc = std::chrono::duration_cast(t4 - t3).count(); + second_click = false; + } + /* If the second click has occurred and the duration between clicks is less than 110 milliseconds, + then a double click has been detected. */ + if(duration_cd < tm_clk_difference && duration_cd != 0 && duration < tm_difference + && duration != 0 && duration_sc < tm_difference && duration_sc != 0) + { + std::cout<<"[INFO] Double click detected\n"; + duration = 0; + duration_cd = 0; + duration_sc = 0; + doubleClick = true; + break; + } + } + else if (ev.value == 1) + { + t1 = std::chrono::high_resolution_clock::now(); + if(first_click == false && duration < tm_difference && duration != 0) + { + t3 = std::chrono::high_resolution_clock::now(); + duration = 0; + second_click = true; + } + if(second_click == true) + duration_cd = std::chrono::duration_cast(t3 - t2).count(); + first_click = true; + } + } + } + /* Closes the mouse event device file. */ + close(fd); + /* Returns 0 if no double click was detected. */ + return 0; +} + +std::string devices::find_mouse_event(void) +{ + /* Path to the directory containing all input devices. */ + const char* inputDirPath = "/dev/input/"; + /* Opens the input directory.*/ + DIR* dir = opendir(inputDirPath); + if (!dir) + { + std::cerr << "Error opening input directory" << std::endl; + exit(1); + } + /* Iterates over all entries in the input directory. */ + dirent* entry; + while ((entry = readdir(dir)) != nullptr) + { + /* Skips entries that start with a dot (i.e., hidden files). */ + if (entry->d_name[0] == '.') + { + continue; + } + /* Constructs the full path to the input device. */ + std::string fullPath = inputDirPath + std::string(entry->d_name); + int fd = open(fullPath.c_str(), O_RDONLY | O_NONBLOCK); + if (fd < 0) + { + continue; + } + /* Checks if the input device has relative axis (e.g., a mouse). */ + if (ioctl(fd, EVIOCGBIT(0, EV_MAX), sizeof(long)*EV_MAX)) + { + unsigned long evBits[EV_MAX]; + ioctl(fd, EVIOCGBIT(0, EV_MAX), evBits); + /* Returns the path to the device if the EV_REL bit is set (i.e., the device has relative axis). */ + if (evBits[EV_REL / BITS_PER_LONG] & (1 << (EV_REL % BITS_PER_LONG))) + { + close(fd); + closedir(dir); + return fullPath; + } + } + /* Closes the input device file. */ + close(fd); + } + /* Closes the input directory. */ + closedir(dir); + /* Prints an error message and exits the program if no mouse device is found. */ + std::cerr << "No mouse device found" << std::endl; + exit(1); +} diff --git a/Q08_object_counter/src_v2h/utils.h b/Q08_object_counter/src_v2h/utils.h new file mode 100755 index 0000000..787683b --- /dev/null +++ b/Q08_object_counter/src_v2h/utils.h @@ -0,0 +1,55 @@ +/* Prevents the header file from being included multiple times. */ +#ifndef UTILS +#define UTILS + +/* Includes necessary header files. */ +#include +#include +#include +#include +#include +#include +#include + +/* Defines constants that are used in the code. */ +#ifndef BITS_PER_LONG +#define BITS_PER_LONG (sizeof(long) * 8) +#endif + +#ifndef EV_REL +#define EV_REL 0x02 +#endif + +#ifndef EV_MAX +#define EV_MAX 0x1F +#endif + +/* Declares a class called devices. */ +class devices +{ + private: + /* Initializes variables to track the duration between mouse clicks. */ + std::chrono::high_resolution_clock::time_point t1; + std::chrono::high_resolution_clock::time_point t2; + std::chrono::high_resolution_clock::time_point t3; + std::chrono::high_resolution_clock::time_point t4; + + int duration = 0; + int duration_sc = 0; + int duration_cd = 0; + + const int tm_difference = 110; + const int tm_clk_difference = 190; + + bool last_click = false; + bool first_click = false; + bool second_click = false; + + /* Finds the path to the mouse event device. */ + std::string find_mouse_event(void); + public: + /* Detects mouse clicks. */ + int detect_mouse_click(void); +}; + +#endif \ No newline at end of file diff --git a/Q08_object_counter/src_v2h/wayland.cpp b/Q08_object_counter/src_v2h/wayland.cpp new file mode 100755 index 0000000..b34f20b --- /dev/null +++ b/Q08_object_counter/src_v2h/wayland.cpp @@ -0,0 +1,494 @@ +/*********************************************************************************************************************** + * Copyright (C) 2023 Renesas Electronics Corporation. All rights reserved. + ***********************************************************************************************************************/ +/*********************************************************************************************************************** + * File Name : wayland.cpp + * Version : 0.90 + * Description : RZ/V2H DRP-AI Sample Application for Megvii-Base Detection YOLOX with MIPI/USB Camera + ***********************************************************************************************************************/ + +/***************************************** + * Includes + ******************************************/ +// #include "define.h" +#include "wayland.h" +#include +#include +#include +#include +#include +#include +#include +#include + + +struct WaylandGlobals { + struct wl_compositor* compositor; + struct wl_shell* shell; +}; + +/***************************************** + * Function Name : registry_global + * Description : wl_registry_listener callback + * wayland func bind. + * Arguments : data = The third argument of wl_registry_add_listener() is notified. + * regisry = The first argument of wl_registry_add_listener() is notified. + * name = global object ID is notified. + * interface = interface name is notifed. + * version = interface version is notified. + * Return value : - + ******************************************/ +static void registry_global(void *data, + struct wl_registry *registry, uint32_t id, + const char *interface, uint32_t version) +{ + struct WaylandGlobals* globals = (struct WaylandGlobals*)data; + if (strcmp(interface, "wl_compositor") == 0) { + globals->compositor = (struct wl_compositor*)wl_registry_bind(registry, id, &wl_compositor_interface, 1); + } + else if (strcmp(interface, "wl_shell") == 0) { + globals->shell = (struct wl_shell*)wl_registry_bind(registry, id, &wl_shell_interface, 1); + } +} + +/* registry callback for listener */ +static const struct wl_registry_listener registry_listener = { registry_global, NULL }; + +/***************************************** + * Function Name : shell_surface_ping + * Description : wl_shell_surface_listener callback + * compositer check hungup + * Arguments : data = The third argument of wl_shell_surface_add_listener() is notified. + * shell_surface = The first argument of wl_shell_surface_add_listener() is notified. + * serial = Identification ID is notified. + * Return value : - + ******************************************/ +static void shell_surface_ping(void *data, + struct wl_shell_surface *shell_surface, + uint32_t serial) +{ + wl_shell_surface_pong(shell_surface, serial); +} + +static const struct wl_shell_surface_listener shell_surface_listener = +{ + .ping = shell_surface_ping, +}; + +Wayland::Wayland() +{ +} + +Wayland::~Wayland() +{ +} + +/***************************************** + * Function Name : LoadShader + * Description : Return the loaded and compiled shader + * Arguments : type + * shaderSrc + * Return value : 0 if Success + * not 0 otherwise + ******************************************/ +GLuint Wayland::LoadShader(GLenum type, const char* shaderSrc) +{ + GLuint shader = glCreateShader(type); + assert(shader); + + glShaderSource(shader, 1, &shaderSrc, NULL); + glCompileShader(shader); + + GLint compiled; + glGetShaderiv(shader, GL_COMPILE_STATUS, &compiled); + assert(compiled); + + return shader; +} + +/***************************************** + * Function Name : initProgramObject + * Description : Initialize the shaders and return the program object + * Arguments : pShader + * Return value : 0 if Success + * not 0 otherwise + ******************************************/ +GLuint Wayland::initProgramObject(SShader* pShader) +{ + const char* vshader = R"( + attribute vec4 position; + attribute vec2 texcoord; + varying vec2 texcoordVarying; + void main() { + gl_Position = position; + texcoordVarying = texcoord; + } + )"; + + const char* fshader = R"( + precision mediump float; + uniform sampler2D texture; + varying vec2 texcoordVarying; + void main() { + highp float r = texture2D(texture, texcoordVarying).b; + highp float g = texture2D(texture, texcoordVarying).g; + highp float b = texture2D(texture, texcoordVarying).r; + highp float a = texture2D(texture, texcoordVarying).a; + gl_FragColor = vec4(r,g,b,a); + } + + )"; + + GLuint vertexShader = LoadShader(GL_VERTEX_SHADER, vshader); + GLuint fragmentShader = LoadShader(GL_FRAGMENT_SHADER, fshader); + + GLuint programObject = glCreateProgram(); + assert(programObject); + + glAttachShader(programObject, vertexShader); + glAttachShader(programObject, fragmentShader); + + glLinkProgram(programObject); + + GLint linked; + glGetProgramiv(programObject, GL_LINK_STATUS, &linked); + assert(linked); + + glDeleteShader(fragmentShader); + glDeleteShader(vertexShader); + + pShader->unProgram = programObject; + pShader->nAttrPos = glGetAttribLocation(pShader->unProgram, "position"); + pShader->nAttrColor = glGetAttribLocation(pShader->unProgram, "texcoord"); + return programObject; +} + +/***************************************** + * Function Name : initEGLDisplay + * Description : Configure EGL and return necessary resources + * Arguments : nativeDisplay + * nativeWindow + * eglDisplay + * eglSurface + * Return value : 0 if Success + * not 0 otherwise + ******************************************/ +static int8_t initEGLDisplay(EGLNativeDisplayType nativeDisplay, EGLNativeWindowType nativeWindow, EGLDisplay* eglDisplay, EGLSurface* eglSurface) +{ +// int8_t ret = 0; + + EGLint number_of_config; + EGLint config_attribs[] = { + EGL_SURFACE_TYPE, EGL_WINDOW_BIT, + EGL_RED_SIZE, 8, + EGL_GREEN_SIZE, 8, + EGL_BLUE_SIZE, 8, + EGL_ALPHA_SIZE, 8, + EGL_RENDERABLE_TYPE, EGL_OPENGL_ES2_BIT, + EGL_NONE + }; + + static const EGLint context_attribs[] = { + EGL_CONTEXT_CLIENT_VERSION, 2, + EGL_NONE + }; + + *eglDisplay = eglGetDisplay(nativeDisplay); + if (*eglDisplay == EGL_NO_DISPLAY) + { + return -1; + } + + EGLBoolean initialized = eglInitialize(*eglDisplay, NULL, NULL); + if (initialized != EGL_TRUE) + { + return -1; + } + + EGLConfig configs[1]; + + EGLBoolean config = eglChooseConfig(*eglDisplay, config_attribs, configs, 1, &number_of_config); + if (config != EGL_TRUE) + { + return -1; + } + + EGLContext eglContext = eglCreateContext(*eglDisplay, configs[0], EGL_NO_CONTEXT, context_attribs); + + *eglSurface = eglCreateWindowSurface(*eglDisplay, configs[0], nativeWindow, NULL); + if (*eglSurface == EGL_NO_SURFACE) + { + return -1; + } + + EGLBoolean makeCurrent = eglMakeCurrent(*eglDisplay, *eglSurface, *eglSurface, eglContext); + if (makeCurrent != EGL_TRUE) + { + return -1; + } + return 0; +} + + +/***************************************** + * Function Name : initWaylandDisplay + * Description : Connect to the Wayland display and return the display and the surface + * Arguments : wlDisplay + * wlSurface + * Return value : 0 if Success + * not 0 otherwise + ******************************************/ +static int8_t initWaylandDisplay(struct wl_display** wlDisplay, struct wl_surface** wlSurface) +{ +// int8_t ret = 0; + struct WaylandGlobals globals = { 0 }; + + *wlDisplay = wl_display_connect(NULL); + if(*wlDisplay == NULL) + { + return -1; + } + + struct wl_registry* registry = wl_display_get_registry(*wlDisplay); + wl_registry_add_listener(registry, ®istry_listener, (void*)&globals); + + wl_display_dispatch(*wlDisplay); + wl_display_roundtrip(*wlDisplay); + if (globals.compositor == NULL || globals.shell == NULL) + { + return -1; + } + + *wlSurface = wl_compositor_create_surface(globals.compositor); + if (*wlSurface == NULL) + { + return -1; + } + + struct wl_shell_surface* shellSurface = wl_shell_get_shell_surface(globals.shell, *wlSurface); + wl_shell_surface_set_toplevel(shellSurface); + return 0; +} + +/***************************************** + * Function Name : initWindow + * Description : Connect Wayland and make EGL + * Arguments : width + * height + * wlDisplay + * eglDisplay + * eglSurface + * Return value : 0 if Success + * not 0 otherwise + ******************************************/ +static int8_t initWindow(GLint width, GLint height, struct wl_display** wlDisplay, EGLDisplay* eglDisplay, EGLSurface* eglSurface) +{ + int8_t ret = 0; + struct wl_surface* wlSurface; + ret = initWaylandDisplay(wlDisplay, &wlSurface); + if (ret != 0) + { + return -1; + } + + struct wl_egl_window* wlEglWindow = wl_egl_window_create(wlSurface, width, height); + if (wlEglWindow == NULL) + { + return -1; + } + + ret = initEGLDisplay((EGLNativeDisplayType)*wlDisplay, (EGLNativeWindowType)wlEglWindow, eglDisplay, eglSurface); + if (ret != 0) + { + return -1; + } + return 0; +} + +/***************************************** + * Function Name : init + * Description : wayland client init + * create buffer. + * Arguments : idx = index of the display buffer + * w = width + * c = color + * Return value : 0 if Success + * not 0 otherwise + ******************************************/ +uint8_t Wayland::init(uint32_t idx, uint32_t w, uint32_t h, uint32_t c, bool overlay) +{ + int8_t ret = 0; + img_w = w; + img_h = h; + img_c = c; + img_overlay = overlay; + + // Connect Wayland and make EGL + ret = initWindow(w, h, &display, &eglDisplay, &eglSurface); + if (ret != 0) + { + return -1; + } + + //Initialize the shaders and return the program object + GLuint programObject = initProgramObject(&sShader); + if (programObject == 0) + { + return -1; + } + + // Apply program object + glUseProgram(sShader.unProgram); + glGenTextures(2, textures); + + glEnableVertexAttribArray(sShader.nAttrPos); + glEnableVertexAttribArray(sShader.nAttrColor); + + // enable Alpha Blending + if (img_overlay == true){ + glEnable(GL_BLEND); + glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA); + } + + glUniform1i(glGetUniformLocation(sShader.unProgram, "texture"), 0); + + return 0; +} + +/***************************************** + * Function Name : exit + * Description : Exit Wayland + * Arguments : - + * Return value : 0 if Success + * not 0 otherwise + ******************************************/ +uint8_t Wayland::exit() +{ + SShader* pShader = &sShader; + if (pShader) { + glDeleteProgram(pShader->unProgram); + pShader->unProgram = 0; + pShader->nAttrPos = -1; + pShader->nAttrColor = -1; + } + wl_display_disconnect(display); + return 0; +} + + +/***************************************** + * Function Name : render + * Description : + * Arguments : pShader + * texID + * Return value : 0 if Success + * not 0 otherwise + ******************************************/ +uint8_t Wayland::render(SShader* pShader, GLuint texID) +{ + const float vertices[] = { + -1.0f, 1.0f, 0.0f, + -1.0f, -1.0f, 0.0f, + 1.0f, 1.0f, 0.0f, + 1.0f, -1.0f, 0.0f + }; + + const float texcoords[] = { + 0.0f, 0.0f, + 0.0f, 1.0f, + 1.0f, 0.0f, + 1.0f, 1.0f }; + + + glVertexAttribPointer(pShader->nAttrColor, 2, GL_FLOAT, GL_FALSE, 0, texcoords); + glVertexAttribPointer(pShader->nAttrPos, 3, GL_FLOAT, GL_FALSE, 0, vertices); + + // draw texture + glActiveTexture(GL_TEXTURE0); + glBindTexture(GL_TEXTURE_2D, texID); + //glUniform1i(uniID, texID); + glDrawArrays(GL_TRIANGLE_STRIP, 0, 4); + return 0; +} + + +/***************************************** + * Function Name : setupTexture + * Description : Bind Texture + * Arguments : texID + * src_pixels + * Return value : 0 if Success + * not 0 otherwise + ******************************************/ +uint8_t Wayland::setupTexture(GLuint texID, uint8_t* src_pixels) +{ + glPixelStorei(GL_UNPACK_ALIGNMENT, 1); + glBindTexture(GL_TEXTURE_2D, texID); + glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, img_w, img_h, 0, GL_RGBA, GL_UNSIGNED_BYTE, src_pixels); + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR); + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE); + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE); + return 0; +} + + +/***************************************** + * Function Name : commit + * Description : Commit to update the display image + * Arguments : buf_id = buffer id + * Return value : 0 if Success + * not 0 otherwise + ******************************************/ +uint8_t Wayland::commit(uint8_t* cam_buffer, uint8_t* ol_buffer) +{ + uint8_t ret = 0; +#ifdef DEBUG_TIME_FLG + using namespace std; + chrono::system_clock::time_point start, end; + double time = 0; + start = chrono::system_clock::now(); +#endif // DEBUG_TIME_FLG + + // setup texture + setupTexture(textures[0], cam_buffer); + if (ol_buffer != NULL && img_overlay == true) { + setupTexture(textures[1], ol_buffer); + } +#ifdef DEBUG_TIME_FLG + end = chrono::system_clock::now(); + time = static_cast(chrono::duration_cast(end - start).count() / 1000.0); + printf("Setup Image Time : %lf[ms]\n", time); +#endif // DEBUG_TIME_FLG + + // clear + glClearColor(0.0f, 0.0f, 0.0f, 1.0f); + glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); + +#ifdef DEBUG_TIME_FLG + start = chrono::system_clock::now(); +#endif // DEBUG_TIME_FLG + + // render + render(&sShader, textures[0]); + if (ol_buffer != NULL && img_overlay == true) { + render(&sShader, textures[1]); + } +#ifdef DEBUG_TIME_FLG + end = chrono::system_clock::now(); + time = static_cast(chrono::duration_cast(end - start).count() / 1000.0); + printf("Specifies Render Time : %lf[ms]\n", time); + start = chrono::system_clock::now(); +#endif // DEBUG_TIME_FLG + + eglSwapBuffers(eglDisplay, eglSurface); + +#ifdef DEBUG_TIME_FLG + end = chrono::system_clock::now(); + time = static_cast(chrono::duration_cast(end - start).count() / 1000.0); + printf("Update Frame Time : %lf[ms]\n", time); +#endif // DEBUG_TIME_FLG + + return ret; +} + diff --git a/Q08_object_counter/src_v2h/wayland.h b/Q08_object_counter/src_v2h/wayland.h new file mode 100755 index 0000000..ec3ef13 --- /dev/null +++ b/Q08_object_counter/src_v2h/wayland.h @@ -0,0 +1,62 @@ +/*********************************************************************************************************************** +* Copyright (C) 2023 Renesas Electronics Corporation. All rights reserved. +***********************************************************************************************************************/ +/*********************************************************************************************************************** +* File Name : wayland.h +* Version : 0.90 +* Description : RZ/V2H DRP-AI Sample Application for Megvii-Base Detection YOLOX with MIPI/USB Camera +***********************************************************************************************************************/ + +#ifndef WAYLAND_H +#define WAYLAND_H + + +// #include "define.h" +#include +#include +#include +#include + +class Wayland +{ + /* structure of Shader settings */ + typedef struct _SShader { + GLuint unProgram; + GLint nAttrPos; + GLint nAttrColor; + } SShader; + + public: + Wayland(); + ~Wayland(); + + uint8_t init(uint32_t idx, uint32_t w, uint32_t h, uint32_t c, bool overlay = false); + uint8_t exit(); + uint8_t commit(uint8_t* cam_buffer, uint8_t* ol_buffer); + + struct wl_compositor *compositor = NULL; + struct wl_shm *shm = NULL; + struct wl_shell *shell = NULL; + + private: + uint32_t img_h; + uint32_t img_w; + uint32_t img_c; + bool img_overlay; + + struct wl_display *display = NULL; + struct wl_surface *surface; + struct wl_shell_surface *shell_surface; + struct wl_registry *registry = NULL; + EGLDisplay eglDisplay; + EGLSurface eglSurface; + SShader sShader; + GLuint textures[2]; + + GLuint LoadShader(GLenum type, const char* shaderSrc); + GLuint initProgramObject(SShader* pShader); + uint8_t render(SShader* pShader, GLuint texID); + uint8_t setupTexture(GLuint texID, uint8_t* src_pixels); +}; + +#endif diff --git a/R01_object_detection/README.md b/R01_object_detection/README.md index a8d503a..52e52db 100755 --- a/R01_object_detection/README.md +++ b/R01_object_detection/README.md @@ -1,61 +1,182 @@ # Object Detection ## Application: Overview -This application is designed to detect objects in the capture image of MIPI Camera Module and displays the result on HDMI screen. +This application is designed to detect objects in the capture image of Camera and displays the result on HDMI screen. This software could be useful in a variety of settings, such as retail stores, museums, and events. The AI model used for the sample application is [YOLOV3](https://arxiv.org/pdf/1804.02767.pdf). +### Supported Product +- RZ/V2L Evaluation Board Kit (RZ/V2L EVK) +- RZ/V2H Evaluation Board Kit (RZ/V2H EVK) + +### Input/Output + + + + + + + + + + + + + + + +
BoardInputOutput
RZ/V2L EVKMIPI cameraHDMI
RZ/V2H EVKUSB camera
+ + + + + + + + + + + + + + + +
I/ORZ/V2L EVKRZ/V2H EVK
InputMIPI cameraUSB camera +
OutputHDMI
+ + ## Application: Requirements -#### Hardware Requirements -- RZ/V2L Evaluation Board Kit - - RZ/V2L Board - - MIPI Camera Module (Google Coral Camera) - - USB cable -- HDMI monitor : Display the application. -- micro HDMI to HDMI cable : Connect HDMI monitor and RZ/V2L Board. -- SD Card : Used as filesystem. -- USB Hub : Used for connecting USB Mouse and USB Keyboard to the board. -- USB Mouse : Used for HDMI screen control. -- USB Keyboard : Used for terminal input. +### Hardware Requirements + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ForEquipmentDetails
RZ/V2LRZ/V2L EVKEvaluation Board Kit for RZ/V2L.
Includes followings. +
    +
  • + MIPI Camera Module(Google Coral Camera)
    + Used as a camera input source. +
  • +
  • MicroUSB to Serical Cable for serial communication.
  • +
+
AC AdapterUSB Power Delivery adapter for the board power supply.
MicroHDMI CableUsed to connect the HDMI Monitor and the board.
+ RZ/V2L EVK has microHDMI port.
RZ/V2HRZ/V2H EVKEvaluation Board Kit for RZ/V2H.
AC AdapterUSB Power Delivery adapter for the board power supply.
+ 100W is required.
HDMI CableUsed to connect the HDMI Monitor and the board.
+ RZ/V2H EVK has HDMI port.
USB CameraUsed as a camera input source.
CommonUSB Cable Type-CConnect AC adapter and the board.
HDMI MonitorUsed to display the graphics of the board.
microSD cardUsed as the filesystem.
+ Must have over 4GB capacity of blank space.
+ Operating Environment: Transcend UHS-I microSD 300S 16GB
Linux PCUsed to build application and setup microSD card.
+ Operating Environment: Ubuntu 20.04
SD card readerUsed for setting up microSD card.
USB HubUsed to connect USB Keyboard and USB Mouse to the board.
USB KeyboardUsed to type strings on the terminal of board.
USB MouseUsed to operate the mouse on the screen of board.
+ >**Note:** All external devices will be attached to the board and does not require any driver installation (Plug n Play Type) Connect the hardware as shown below. - +|RZ/V2L EVK | RZ/V2H EVK | +|:---|:---| +|| | -##### Notes -1. To see which port to be connected, refer to [Hardware Requirements and Setup](https://github.com/renesas-rz/rzv_ai_applications/tree/develop#hardware-requirements-and-setup). -2. When using the keyboard connected to RZ/V2L Evaluation Board, the keyboard layout and language are fixed to English. +>**Note 1:** When using the keyboard connected to RZ/V Evaluation Board, the keyboard layout and language are fixed to English. +**Note 2:** For RZ/V2H EVK, there are USB 2.0 and USB 3.0 ports. +USB camera needs to be connected to appropriate port based on its requirement. ## Application: Build Stage ->**Note:** User can skip to the next stage (deploy) if they don't want to build the application. All pre-built binaries are provided. +>**Note:** User can skip to the [next stage (deploy)](#application-deploy-stage) if they do not want to build the application. +All pre-built binaries are provided. -This project expects the user to have completed [Getting Startup Guide](https://renesas-rz.github.io/rzv_ai_sdk/getting_started.html) provided by Renesas. +### Prerequisites +This section expects the user to have completed Step 5 of [Getting Started Guide](https://renesas-rz.github.io/rzv_ai_sdk/latest/getting_started.html) provided by Renesas. After completion of the guide, the user is expected of following things. -- The board setup is done. -- SD card is prepared. -- The docker container of `rzv2l_ai_sdk_image` is running on the host machine. +- AI SDK setup is done. +- Following docker container is running on the host machine. + |Board | Docker container | + |:---|:---| + |RZ/V2L EVK|`rzv2l_ai_sdk_container` | + |RZ/V2H EVK|`rzv2h_ai_sdk_container` | ->**Note:** Docker environment is required for building the sample application. + >**Note:** Docker environment is required for building the sample application. -#### Application File Generation +### Application File Generation 1. On your host machine, copy the repository from the GitHub to the desired location. - 1. It is recommended to copy/clone the repository on the `data` folder which is mounted on the `rzv2l_ai_sdk_container` docker container as shown below. + 1. It is recommended to copy/clone the repository on the `data` folder, which is mounted on the Docker container. ```sh cd /data git clone https://github.com/renesas-rz/rzv_ai_sdk.git ``` - > Note 1: Please verify the git repository url if error occurs + >Note: This command will download the whole repository, which include all other applications. + If you have already downloaded the repository of the same version, you may not need to run this command. - > Note 2: This command will download whole repository, which include all other applications, if you have already downloaded the repository of the same version, you may not need to run this command. - 2. Run (or start) the docker container and open the bash terminal on the container. -Here, we use the `rzv2l_ai_sdk_container` as the name of container created from `rzv2l_ai_sdk_image` docker image. +E.g., for RZ/V2L, use the `rzv2l_ai_sdk_container` as the name of container created from `rzv2l_ai_sdk_image` docker image. > Note that all the build steps/commands listed below are executed on the docker container bash terminal. 3. Set your clone directory to the environment variable. @@ -64,74 +185,126 @@ Here, we use the `rzv2l_ai_sdk_container` as the name of container created from ``` 3. Go to the application source code directory. ```sh - cd ${PROJECT_PATH}/R01_object_detection/src + cd ${PROJECT_PATH}/R01_object_detection/ ``` -4. Build the application by following the commands below. + |Board | `SRC_DIR` | + |:---|:---| + |RZ/V2L EVK|`src` | + |RZ/V2H EVK|`src_v2h` | + +4. Create and move to the `build` directory. ```sh mkdir -p build && cd build - cmake -DCMAKE_TOOLCHAIN_FILE=./toolchain/runtime.cmake .. - make -j$(nproc) - ``` -5. The following application file would be genarated in the `${PROJECT_PATH}/R01_object_detection/src/build` directory -- object_detection + `````` +4. Build the application by following the commands below. + - For RZ/V2L, + ```sh + cmake -DCMAKE_TOOLCHAIN_FILE=./toolchain/runtime.cmake .. + make -j$(nproc) + ``` + - For RZ/V2H, + ```sh + cmake -DCMAKE_TOOLCHAIN_FILE=./toolchain/runtime.cmake -DV2H=ON .. + make -j$(nproc) + ``` +5. The following application file would be generated in the `${PROJECT_PATH}/R01_object_detection//build` directory + - object_detection ## Application: Deploy Stage -For the ease of deployment all the deployables file and folders for RZV2L are provided on the [exe](./exe) folder. +### Prerequisites +This section expects the user to have completed Step 7-1 of [Getting Started Guide](https://renesas-rz.github.io/rzv_ai_sdk/latest/getting_started.html#step7) provided by Renesas. +After completion of the guide, the user is expected of following things. +- microSD card setup is done. + +### File Configuration +For the ease of deployment all the deployables file and folders are provided in following folders. +|Board | `EXE_DIR` | +|:---|:---| +|RZ/V2L EVK|[exe_v2l](./exe_v2l) | +|RZ/V2H EVK|[exe_v2h](./exe_v2h) | + +Each folder contains following items. |File | Details | |:---|:---| |licecnses | License information of AI model.
Not necessary for running application. | |yolov3_onnx | Model object files for deployment.
Pre-processing Runtime Object files included. | +|yolov3_onnx_fhd | **For `exe_v2h` only.**
Required when using FHD camera input image.
See [Reference](#reference). | |coco-lables-2014_2017.txt | Label list for Object Detection. | |object_detection | application file. | -1. Follow the steps below to deploy the project on RZV2L Board. - 1. Run the commands below to download the `R01_object_detection_deploy_tvm-v111.so` from [Release v2.00](https://github.com/renesas-rz/rzv_ai_sdk/releases/tag/v2.00/) - ``` - cd ${PROJECT_PATH}/R01_object_detection/exe/yolov3_onnx - wget https://github.com/renesas-rz/rzv_ai_sdk/releases/download/v2.00/R01_object_detection_deploy_tvm-v111.so +### Instruction +1. Run following commands to download the necessary file. +Replace each variable according to your board. + ```sh + cd ${PROJECT_PATH}/R01_object_detection//yolov3_onnx + wget / ``` - 2. Rename the `R01_object_detection_deploy_tvm-v111.so` to `deploy.so`. + |Board | `EXE_DIR` |`SO_FILE` |`URL` | + |:---|:---|:---|:---| + |RZ/V2L EVK|[exe_v2l](./exe_v2l) |`R01_object_detection_deploy_tvm-v111.so` |[Release v2.00](https://github.com/renesas-rz/rzv_ai_sdk/releases/tag/v2.00/) | + |RZ/V2H EVK|[exe_v2h](./exe_v2h) |`R01_object_detection_deploy_tvm_v2h-v210.so` |[Release v3.00](https://github.com/renesas-rz/rzv_ai_sdk/releases/tag/v3.00/) | + + - E.g., for RZ/V2L EVK, use following commands. + ```sh + cd ${PROJECT_PATH}/R01_object_detection/exe_v2l/yolov3_onnx + wget https://github.com/renesas-rz/rzv_ai_sdk/releases/download/v2.00/R01_object_detection_deploy_tvm-v111.so + ``` +2. Rename the `R01_object_detection_deploy_*.so` to `deploy.so`. + ```sh + mv deploy.so ``` - mv R01_object_detection_deploy_tvm-v111.so deploy.so +3. Copy the following files to the `/home/root/tvm` directory of the rootfs (SD Card) for the board. + |File | Details | + |:---|:---| + |All files in `EXE_DIR` directory | Including `deploy.so` file. | + |`object_detection` application file | Generated the file according to [Application File Generation](#application-file-generation) | + +4. Check if `libtvm_runtime.so` exists under `/usr/lib64` directory of the rootfs (SD card) on the board. + +5. Folder structure in the rootfs (SD Card) would look like: + ```sh + ├── usr/ + │ └── lib64/ + │ └── libtvm_runtime.so + └── home/ + └── root/ + └── tvm/ + ├── yolov3_onnx/ + │ ├── preprocess + │ ├── deploy.json + │ ├── deploy.params + │ └── deploy.so + ├── coco-labels-2014_2017.txt + └── object_detection ``` - 3. Copy the following files to the `/home/root/tvm` directory of the rootfs (SD Card) for RZV2L Board. - - All files in [exe](./exe) directory. (Including `deploy.so` file.) - - `object_detection` application file if you generated the file according to [Application File Generation](#application-file-generation) - 4. Check if `libtvm_runtime.so` is there on `/usr/lib64` directory of the rootfs (SD card) on RZ/V2L board. - -2. Folder structure in the rootfs (SD Card) would look like: -```sh -├── usr/ -│ └── lib64/ -│ └── libtvm_runtime.so -└── home/ - └── root/ - └── tvm/ - ├── yolov3_onnx/ - │ ├── preprocess - │ ├── deploy.json - │ ├── deploy.params - │ └── deploy.so - ├── coco-labels-2014_2017.txt - └── object_detection -``` ->**Note:** The directory name could be anything instead of `tvm`. If you copy the whole `exe` folder on the board. You are not required to rename it `tvm`. +>**Note:** The directory name could be anything instead of `tvm`. If you copy the whole `EXE_DIR` folder on the board, you are not required to rename it `tvm`. ## Application: Run Stage -1. On RZ/V2L Board terminal, go to the `tvm` directory of the rootfs. -```sh -cd /home/root/tvm -``` + +### Prerequisites +This section expects the user to have completed Step 7-3 of [Getting Started Guide](https://renesas-rz.github.io/rzv_ai_sdk/latest/getting_started.html#step7-3) provided by Renesas. + +After completion of the guide, the user is expected of following things. +- The board setup is done. +- The board is booted with microSD card, which contains the application file. + +### Instruction +1. On Board terminal, go to the `tvm` directory of the rootfs. + ```sh + cd /home/root/tvm + ``` 2. Run the application. -```sh -./object_detection -``` + ```sh + ./object_detection + ``` 3. Following window shows up on HDMI screen. - + |RZ/V2L EVK | RZ/V2H EVK | + |:---|:---| + || | On application window, following information is displayed. - Camera capture @@ -139,10 +312,10 @@ cd /home/root/tvm - Processing time - Total AI Time: Sum of all processing time below. - Inference: Processing time taken for AI inference. - - PreProces: Processing time taken for AI pre-processing. + - PreProcess: Processing time taken for AI pre-processing. - PostProcess: Processing time taken for AI post-processing.
(excluding the time for drawing on HDMI screen). -4. To terninate the application, switch the application window to the terminal with using `Super(windows key)+Tab` and  press ENTER key on the terminal of RZ/V2L Board. +4. To terninate the application, switch the application window to the terminal by using `Super(windows key)+Tab` and press ENTER key on the terminal of the board. ## Application: Configuration @@ -155,15 +328,43 @@ Output2 size: 1x26x26x255 Output3 size: 1x52x52x255 ### AI inference time -The AI inference time is around 430 msec. +|Board | AI inference time| +|:---|:---| +|RZ/V2L EVK| Approximately 430ms | +|RZ/V2H EVK | Approximately 40ms | ### Processing |Processing | Details | |:---|:---| -|Pre-processing | Processed by DRP-AI. | +|Pre-processing | Processed by DRP-AI.
| |Inference | Processed by DRP-AI and CPU. | |Post-processing | Processed by CPU. | + +### Image buffer size + +|Board | Camera capture buffer size|HDMI output buffer size| +|:---|:---|:---| +|RZ/V2L EVK| VGA (640x480) in YUYV format | FHD (1920x1080) in BGRA format | +|RZ/V2H EVK | VGA (640x480) in YUYV format | FHD (1920x1080) in BGRA format | + +> **Note:** For RZ/V2H, this application allocates the DRP-AI input buffer with **640x640** resolution in order to maintain the same aspect ratio with **416x416** square size of YOLOv3 input shape after the resize pre-processing. + +Following is the buffer flow for RZ/V2H application. + + + + +## Reference +- For RZ/V2H EVK, this application supports USB camera only with 640x480 resolution. +FHD resolution is supported by e-CAM22_CURZH camera (MIPI). +Please refer to following URL for how to change camera input to MIPI camera. +[https://renesas-rz.github.io/rzv_ai_sdk/latest/about-applications](https://renesas-rz.github.io/rzv_ai_sdk/latest/about-applications#mipi). + ## License -For AI model, see `exe/licenses` directory. +For AI model, see following directory.. +|Board | AI Model | License directory| +|:---|:---|:---| +|RZ/V2L EVK| YOLOv3 | `exe_v2l/licenses` | +|RZ/V2H EVK | YOLOv3 | `exe_v2h/licenses` | \ No newline at end of file diff --git a/R01_object_detection/exe_v2h/coco-labels-2014_2017.txt b/R01_object_detection/exe_v2h/coco-labels-2014_2017.txt new file mode 100755 index 0000000..941cb4e --- /dev/null +++ b/R01_object_detection/exe_v2h/coco-labels-2014_2017.txt @@ -0,0 +1,80 @@ +person +bicycle +car +motorcycle +airplane +bus +train +truck +boat +traffic light +fire hydrant +stop sign +parking meter +bench +bird +cat +dog +horse +sheep +cow +elephant +bear +zebra +giraffe +backpack +umbrella +handbag +tie +suitcase +frisbee +skis +snowboard +sports ball +kite +baseball bat +baseball glove +skateboard +surfboard +tennis racket +bottle +wine glass +cup +fork +knife +spoon +bowl +banana +apple +sandwich +orange +broccoli +carrot +hot dog +pizza +donut +cake +chair +couch +potted plant +bed +dining table +toilet +tv +laptop +mouse +remote +keyboard +cell phone +microwave +oven +toaster +sink +refrigerator +book +clock +vase +scissors +teddy bear +hair drier +toothbrush diff --git a/R01_object_detection/exe/licenses/coco-labels/LICENSE.txt b/R01_object_detection/exe_v2h/licenses/coco-labels/LICENSE.txt similarity index 100% rename from R01_object_detection/exe/licenses/coco-labels/LICENSE.txt rename to R01_object_detection/exe_v2h/licenses/coco-labels/LICENSE.txt diff --git a/R01_object_detection/exe/licenses/onnx_tutorials/LICENSE.txt b/R01_object_detection/exe_v2h/licenses/onnx_tutorials/LICENSE.txt similarity index 100% rename from R01_object_detection/exe/licenses/onnx_tutorials/LICENSE.txt rename to R01_object_detection/exe_v2h/licenses/onnx_tutorials/LICENSE.txt diff --git a/R01_object_detection/exe/licenses/pytorch-yolov3/LICENSE.txt b/R01_object_detection/exe_v2h/licenses/pytorch-yolov3/LICENSE.txt similarity index 100% rename from R01_object_detection/exe/licenses/pytorch-yolov3/LICENSE.txt rename to R01_object_detection/exe_v2h/licenses/pytorch-yolov3/LICENSE.txt diff --git a/R01_object_detection/exe/licenses/pytorch/LICENSE.txt b/R01_object_detection/exe_v2h/licenses/pytorch/LICENSE.txt similarity index 100% rename from R01_object_detection/exe/licenses/pytorch/LICENSE.txt rename to R01_object_detection/exe_v2h/licenses/pytorch/LICENSE.txt diff --git a/R01_object_detection/exe/licenses/pytorch_vision/LICENSE.txt b/R01_object_detection/exe_v2h/licenses/pytorch_vision/LICENSE.txt similarity index 100% rename from R01_object_detection/exe/licenses/pytorch_vision/LICENSE.txt rename to R01_object_detection/exe_v2h/licenses/pytorch_vision/LICENSE.txt diff --git a/R01_object_detection/exe_v2h/object_detection b/R01_object_detection/exe_v2h/object_detection new file mode 100755 index 0000000..16de0b1 Binary files /dev/null and b/R01_object_detection/exe_v2h/object_detection differ diff --git a/R01_object_detection/exe_v2h/yolov3_onnx/deploy.json b/R01_object_detection/exe_v2h/yolov3_onnx/deploy.json new file mode 100755 index 0000000..7d1ceb3 --- /dev/null +++ b/R01_object_detection/exe_v2h/yolov3_onnx/deploy.json @@ -0,0 +1,76 @@ +{ + "nodes": [ + { + "op": "null", + "name": "input1", + "inputs": [] + }, + { + "op": "tvm_op", + "name": "tvmgen_default_tvmgen_default_tvmgen_default_mera_drp_main_0", + "attrs": { + "flatten_data": "0", + "func_name": "tvmgen_default_tvmgen_default_tvmgen_default_mera_drp_main_0", + "hash": "8204a5bc4fae0685", + "num_outputs": "3", + "num_inputs": "1", + "global_symbol": "tvmgen_default_tvmgen_default_tvmgen_default_mera_drp_main_0", + "Compiler": "mera_drp" + }, + "inputs": [ + [ + 0, + 0, + 0 + ] + ] + } + ], + "arg_nodes": [0], + "heads": [ + [ + 1, + 0, + 0 + ], + [ + 1, + 1, + 0 + ], + [ + 1, + 2, + 0 + ] + ], + "attrs": { + "dltype": [ + "list_str", + [ + "float32", + "float16", + "float16", + "float16" + ] + ], + "device_index": [ + "list_int", + [1, 1, 1, 1] + ], + "storage_id": [ + "list_int", + [0, 1, 2, 3] + ], + "shape": [ + "list_shape", + [ + [1, 3, 416, 416], + [1, 255, 13, 13], + [1, 255, 26, 26], + [1, 255, 52, 52] + ] + ] + }, + "node_row_ptr": [0, 1, 4] +} \ No newline at end of file diff --git a/R01_object_detection/exe_v2h/yolov3_onnx/deploy.params b/R01_object_detection/exe_v2h/yolov3_onnx/deploy.params new file mode 100755 index 0000000..1011def Binary files /dev/null and b/R01_object_detection/exe_v2h/yolov3_onnx/deploy.params differ diff --git a/R01_object_detection/exe_v2h/yolov3_onnx/preprocess/addr_map.txt b/R01_object_detection/exe_v2h/yolov3_onnx/preprocess/addr_map.txt new file mode 100755 index 0000000..80eed1d --- /dev/null +++ b/R01_object_detection/exe_v2h/yolov3_onnx/preprocess/addr_map.txt @@ -0,0 +1,12 @@ +data_in 0 12c000 +data 12c000 279c00 +data_out 3a5c00 1fb000 +work 5a0c00 80 +weight 5a0c80 80 +drp_config 5a0d00 f3bc0 +aimac_param_cmd 6948c0 20 +aimac_param_desc 694900 10 +aimac_cmd 694940 180 +aimac_desc 694ac0 50 +drp_param 694b80 f0 +drp_desc 694c80 110 diff --git a/R01_object_detection/exe_v2h/yolov3_onnx/preprocess/aimac_cmd.bin b/R01_object_detection/exe_v2h/yolov3_onnx/preprocess/aimac_cmd.bin new file mode 100755 index 0000000..c49322b Binary files /dev/null and b/R01_object_detection/exe_v2h/yolov3_onnx/preprocess/aimac_cmd.bin differ diff --git a/R01_object_detection/exe_v2h/yolov3_onnx/preprocess/aimac_desc.bin b/R01_object_detection/exe_v2h/yolov3_onnx/preprocess/aimac_desc.bin new file mode 100755 index 0000000..c094b76 Binary files /dev/null and b/R01_object_detection/exe_v2h/yolov3_onnx/preprocess/aimac_desc.bin differ diff --git a/R01_object_detection/exe_v2h/yolov3_onnx/preprocess/aimac_param_cmd.bin b/R01_object_detection/exe_v2h/yolov3_onnx/preprocess/aimac_param_cmd.bin new file mode 100755 index 0000000..f9869be Binary files /dev/null and b/R01_object_detection/exe_v2h/yolov3_onnx/preprocess/aimac_param_cmd.bin differ diff --git a/R01_object_detection/exe_v2h/yolov3_onnx/preprocess/aimac_param_desc.bin b/R01_object_detection/exe_v2h/yolov3_onnx/preprocess/aimac_param_desc.bin new file mode 100755 index 0000000..fc9e2f9 Binary files /dev/null and b/R01_object_detection/exe_v2h/yolov3_onnx/preprocess/aimac_param_desc.bin differ diff --git a/R01_object_detection/exe_v2h/yolov3_onnx/preprocess/drp_config.mem b/R01_object_detection/exe_v2h/yolov3_onnx/preprocess/drp_config.mem new file mode 100755 index 0000000..5239a14 Binary files /dev/null and b/R01_object_detection/exe_v2h/yolov3_onnx/preprocess/drp_config.mem differ diff --git a/R01_object_detection/exe_v2h/yolov3_onnx/preprocess/drp_desc.bin b/R01_object_detection/exe_v2h/yolov3_onnx/preprocess/drp_desc.bin new file mode 100755 index 0000000..6d48660 Binary files /dev/null and b/R01_object_detection/exe_v2h/yolov3_onnx/preprocess/drp_desc.bin differ diff --git a/R01_object_detection/exe_v2h/yolov3_onnx/preprocess/drp_param.bin b/R01_object_detection/exe_v2h/yolov3_onnx/preprocess/drp_param.bin new file mode 100755 index 0000000..cbeeea0 Binary files /dev/null and b/R01_object_detection/exe_v2h/yolov3_onnx/preprocess/drp_param.bin differ diff --git a/R01_object_detection/exe_v2h/yolov3_onnx/preprocess/drp_param_info.txt b/R01_object_detection/exe_v2h/yolov3_onnx/preprocess/drp_param_info.txt new file mode 100755 index 0000000..1e3190a --- /dev/null +++ b/R01_object_detection/exe_v2h/yolov3_onnx/preprocess/drp_param_info.txt @@ -0,0 +1,110 @@ +OFFSET_ADD:0,layer_name:0_node_pre_0_0_resize_hwc_R_W,drp_lib:resize_hwc,prop:input,node:in, + Param:raddr, Value:0, offset:0, size:4, + Param:waddr, Value:1228800, offset:4, size:4, + Param:IMG_IWIDHT, Value:640, offset:8, size:2, + Param:IMG_IHEIGHT, Value:640, offset:10,size:2, + Param:IMG_ICH, Value:3, offset:12,size:2, + Param:RFU1, Value:0, offset:14,size:2, + Param:IMG_OWIDTH, Value:416, offset:16,size:2, + Param:IMG_OHEIGHT, Value:416, offset:18,size:2, + Param:IMG_OCH, Value:3, offset:20,size:2, + Param:RFU2, Value:0, offset:22,size:2, + Param:OCH0_SYNCSET_DT, Value:1, offset:24,size:2, + Param:OCH0_SYNCSET_ID, Value:12, offset:26,size:2, + Param:OCH1_SYNCSET_DT, Value:2, offset:28,size:2, + Param:OCH1_SYNCSET_ID, Value:12, offset:30,size:2, + Param:OCH2_SYNCSET_DT, Value:4, offset:32,size:2, + Param:OCH2_SYNCSET_ID, Value:12, offset:34,size:2, + Param:OCH3_SYNCSET_DT, Value:24, offset:36,size:2, + Param:OCH3_SYNCSET_ID, Value:12, offset:38,size:2, + Param:INT_DISABLE, Value:1, offset:40,size:2, + Param:RFU3, Value:0, offset:42,size:2, + Param:RFU4, Value:0, offset:44,size:2, + Param:RFU5, Value:0, offset:46,size:2, + Param:RESIZE_ALG, Value:1, offset:48,size:2, + Param:DATA_TYPE, Value:0, offset:50,size:2, + Param:RFU7, Value:0, offset:52,size:2, + Param:RFU8, Value:0, offset:54,size:2, +OFFSET_ADD:56,layer_name:1_node_pre_0_1_imagescaler_R_W,drp_lib:imagescaler,prop:intermediate_data,node:, + Param:raddr, Value:1228800, offset:0, size:4, + Param:waddr, Value:1747968, offset:4, size:4, + Param:IMG_IWIDHT, Value:416, offset:8, size:2, + Param:IMG_IHEIGHT, Value:416, offset:10,size:2, + Param:IMG_ICH, Value:3, offset:12,size:2, + Param:IMG_ICH_ORG, Value:3, offset:14,size:2, + Param:IMG_OWIDTH, Value:416, offset:16,size:2, + Param:IMG_OHEIGHT, Value:416, offset:18,size:2, + Param:IMG_OCH, Value:3, offset:20,size:2, + Param:IMG_OCH_ORG, Value:3, offset:22,size:2, + Param:OCH0_SYNCSET_DT, Value:1, offset:24,size:2, + Param:OCH0_SYNCSET_ID, Value:12, offset:26,size:2, + Param:OCH1_SYNCSET_DT, Value:2, offset:28,size:2, + Param:OCH1_SYNCSET_ID, Value:12, offset:30,size:2, + Param:OCH2_SYNCSET_DT, Value:4, offset:32,size:2, + Param:OCH2_SYNCSET_ID, Value:12, offset:34,size:2, + Param:OCH3_SYNCSET_DT, Value:24, offset:36,size:2, + Param:OCH3_SYNCSET_ID, Value:12, offset:38,size:2, + Param:INT_DISABLE, Value:1, offset:40,size:2, + Param:RFU3, Value:0, offset:42,size:2, + Param:RFU4, Value:0, offset:44,size:2, + Param:RFU5, Value:0, offset:46,size:2, + Param:ADD_ADDR, Value:5901440, offset:48,size:4, + Param:MUL_ADDR, Value:5901504, offset:52,size:4, + Param:DIN_FORMAT, Value:0, offset:56,size:2, + Param:DOUT_RGB_ORDER, Value:1, offset:58,size:2, + Param:RFU6, Value:0, offset:60,size:2, + Param:RFU7, Value:0, offset:62,size:2, +OFFSET_ADD:120,layer_name:2_node_pre_0_3_transpose_R_W,drp_lib:transpose,prop:intermediate_data,node:, + Param:raddr, Value:1747968, offset:0, size:4, + Param:waddr, Value:2786304, offset:4, size:4, + Param:IMG_IWIDHT, Value:416, offset:8, size:2, + Param:IMG_IHEIGHT, Value:416, offset:10,size:2, + Param:IMG_ICH, Value:3, offset:12,size:2, + Param:IMG_ICH_ORG, Value:3, offset:14,size:2, + Param:IMG_OWIDTH, Value:416, offset:16,size:2, + Param:IMG_OHEIGHT, Value:416, offset:18,size:2, + Param:IMG_OCH, Value:3, offset:20,size:2, + Param:IMG_OCH_ORG, Value:3, offset:22,size:2, + Param:OCH0_SYNCSET_DT, Value:1, offset:24,size:2, + Param:OCH0_SYNCSET_ID, Value:12, offset:26,size:2, + Param:OCH1_SYNCSET_DT, Value:2, offset:28,size:2, + Param:OCH1_SYNCSET_ID, Value:12, offset:30,size:2, + Param:OCH2_SYNCSET_DT, Value:4, offset:32,size:2, + Param:OCH2_SYNCSET_ID, Value:12, offset:34,size:2, + Param:OCH3_SYNCSET_DT, Value:24, offset:36,size:2, + Param:OCH3_SYNCSET_ID, Value:12, offset:38,size:2, + Param:INT_DISABLE, Value:1, offset:40,size:2, + Param:RFU1, Value:0, offset:42,size:2, + Param:RFU2, Value:0, offset:44,size:2, + Param:RFU3, Value:0, offset:46,size:2, + Param:WORD_SIZE, Value:1, offset:48,size:2, + Param:IS_CHW2HWC, Value:0, offset:50,size:2, + Param:RFU4, Value:0, offset:52,size:2, + Param:RFU5, Value:0, offset:54,size:2, +OFFSET_ADD:176,layer_name:3_node_pre_0_4_cast_fp16_fp32_R_W,drp_lib:cast_fp16_fp32,prop:intermediate_data,node:, + Param:raddr, Value:2786304, offset:0, size:4, + Param:waddr, Value:3824640, offset:4, size:4, + Param:IMG_IWIDHT, Value:416, offset:8, size:2, + Param:IMG_IHEIGHT, Value:416, offset:10,size:2, + Param:IMG_ICH, Value:3, offset:12,size:2, + Param:RFU1, Value:0, offset:14,size:2, + Param:IMG_OWIDTH, Value:416, offset:16,size:2, + Param:IMG_OHEIGHT, Value:416, offset:18,size:2, + Param:IMG_OCH, Value:3, offset:20,size:2, + Param:RFU2, Value:0, offset:22,size:2, + Param:OCH0_SYNCSET_DT, Value:1, offset:24,size:2, + Param:OCH0_SYNCSET_ID, Value:12, offset:26,size:2, + Param:OCH1_SYNCSET_DT, Value:2, offset:28,size:2, + Param:OCH1_SYNCSET_ID, Value:12, offset:30,size:2, + Param:OCH2_SYNCSET_DT, Value:4, offset:32,size:2, + Param:OCH2_SYNCSET_ID, Value:12, offset:34,size:2, + Param:OCH3_SYNCSET_DT, Value:24, offset:36,size:2, + Param:OCH3_SYNCSET_ID, Value:12, offset:38,size:2, + Param:INT_DISABLE, Value:1, offset:40,size:2, + Param:RFU1, Value:0, offset:42,size:2, + Param:RFU2, Value:0, offset:44,size:2, + Param:RFU3, Value:0, offset:46,size:2, + Param:CAST_MODE, Value:0, offset:48,size:2, + Param:RFU1, Value:0, offset:50,size:2, + Param:RFU2, Value:0, offset:52,size:2, + Param:RFU3, Value:0, offset:54,size:2, diff --git a/R01_object_detection/exe_v2h/yolov3_onnx/preprocess/weight.bin b/R01_object_detection/exe_v2h/yolov3_onnx/preprocess/weight.bin new file mode 100755 index 0000000..494b5eb Binary files /dev/null and b/R01_object_detection/exe_v2h/yolov3_onnx/preprocess/weight.bin differ diff --git a/R01_object_detection/exe_v2h/yolov3_onnx_fhd/deploy.json b/R01_object_detection/exe_v2h/yolov3_onnx_fhd/deploy.json new file mode 100755 index 0000000..7d1ceb3 --- /dev/null +++ b/R01_object_detection/exe_v2h/yolov3_onnx_fhd/deploy.json @@ -0,0 +1,76 @@ +{ + "nodes": [ + { + "op": "null", + "name": "input1", + "inputs": [] + }, + { + "op": "tvm_op", + "name": "tvmgen_default_tvmgen_default_tvmgen_default_mera_drp_main_0", + "attrs": { + "flatten_data": "0", + "func_name": "tvmgen_default_tvmgen_default_tvmgen_default_mera_drp_main_0", + "hash": "8204a5bc4fae0685", + "num_outputs": "3", + "num_inputs": "1", + "global_symbol": "tvmgen_default_tvmgen_default_tvmgen_default_mera_drp_main_0", + "Compiler": "mera_drp" + }, + "inputs": [ + [ + 0, + 0, + 0 + ] + ] + } + ], + "arg_nodes": [0], + "heads": [ + [ + 1, + 0, + 0 + ], + [ + 1, + 1, + 0 + ], + [ + 1, + 2, + 0 + ] + ], + "attrs": { + "dltype": [ + "list_str", + [ + "float32", + "float16", + "float16", + "float16" + ] + ], + "device_index": [ + "list_int", + [1, 1, 1, 1] + ], + "storage_id": [ + "list_int", + [0, 1, 2, 3] + ], + "shape": [ + "list_shape", + [ + [1, 3, 416, 416], + [1, 255, 13, 13], + [1, 255, 26, 26], + [1, 255, 52, 52] + ] + ] + }, + "node_row_ptr": [0, 1, 4] +} \ No newline at end of file diff --git a/R01_object_detection/exe_v2h/yolov3_onnx_fhd/deploy.params b/R01_object_detection/exe_v2h/yolov3_onnx_fhd/deploy.params new file mode 100755 index 0000000..1011def Binary files /dev/null and b/R01_object_detection/exe_v2h/yolov3_onnx_fhd/deploy.params differ diff --git a/R01_object_detection/exe_v2h/yolov3_onnx_fhd/preprocess/addr_map.txt b/R01_object_detection/exe_v2h/yolov3_onnx_fhd/preprocess/addr_map.txt new file mode 100755 index 0000000..7888f92 --- /dev/null +++ b/R01_object_detection/exe_v2h/yolov3_onnx_fhd/preprocess/addr_map.txt @@ -0,0 +1,12 @@ +data_in 0 a8c000 +data a8c000 279c00 +data_out d05c00 1fb000 +work f00c00 80 +weight f00c80 80 +drp_config f00d00 f3bc0 +aimac_param_cmd ff48c0 20 +aimac_param_desc ff4900 10 +aimac_cmd ff4940 180 +aimac_desc ff4ac0 50 +drp_param ff4b80 f0 +drp_desc ff4c80 110 diff --git a/R01_object_detection/exe_v2h/yolov3_onnx_fhd/preprocess/aimac_cmd.bin b/R01_object_detection/exe_v2h/yolov3_onnx_fhd/preprocess/aimac_cmd.bin new file mode 100755 index 0000000..c49322b Binary files /dev/null and b/R01_object_detection/exe_v2h/yolov3_onnx_fhd/preprocess/aimac_cmd.bin differ diff --git a/R01_object_detection/exe_v2h/yolov3_onnx_fhd/preprocess/aimac_desc.bin b/R01_object_detection/exe_v2h/yolov3_onnx_fhd/preprocess/aimac_desc.bin new file mode 100755 index 0000000..2ab30d2 Binary files /dev/null and b/R01_object_detection/exe_v2h/yolov3_onnx_fhd/preprocess/aimac_desc.bin differ diff --git a/R01_object_detection/exe_v2h/yolov3_onnx_fhd/preprocess/aimac_param_cmd.bin b/R01_object_detection/exe_v2h/yolov3_onnx_fhd/preprocess/aimac_param_cmd.bin new file mode 100755 index 0000000..f9869be Binary files /dev/null and b/R01_object_detection/exe_v2h/yolov3_onnx_fhd/preprocess/aimac_param_cmd.bin differ diff --git a/R01_object_detection/exe_v2h/yolov3_onnx_fhd/preprocess/aimac_param_desc.bin b/R01_object_detection/exe_v2h/yolov3_onnx_fhd/preprocess/aimac_param_desc.bin new file mode 100755 index 0000000..cdbbfc8 Binary files /dev/null and b/R01_object_detection/exe_v2h/yolov3_onnx_fhd/preprocess/aimac_param_desc.bin differ diff --git a/R01_object_detection/exe_v2h/yolov3_onnx_fhd/preprocess/drp_config.mem b/R01_object_detection/exe_v2h/yolov3_onnx_fhd/preprocess/drp_config.mem new file mode 100755 index 0000000..5239a14 Binary files /dev/null and b/R01_object_detection/exe_v2h/yolov3_onnx_fhd/preprocess/drp_config.mem differ diff --git a/R01_object_detection/exe_v2h/yolov3_onnx_fhd/preprocess/drp_desc.bin b/R01_object_detection/exe_v2h/yolov3_onnx_fhd/preprocess/drp_desc.bin new file mode 100755 index 0000000..6d8938b Binary files /dev/null and b/R01_object_detection/exe_v2h/yolov3_onnx_fhd/preprocess/drp_desc.bin differ diff --git a/R01_object_detection/exe_v2h/yolov3_onnx_fhd/preprocess/drp_param.bin b/R01_object_detection/exe_v2h/yolov3_onnx_fhd/preprocess/drp_param.bin new file mode 100755 index 0000000..b320f35 Binary files /dev/null and b/R01_object_detection/exe_v2h/yolov3_onnx_fhd/preprocess/drp_param.bin differ diff --git a/R01_object_detection/exe_v2h/yolov3_onnx_fhd/preprocess/drp_param_info.txt b/R01_object_detection/exe_v2h/yolov3_onnx_fhd/preprocess/drp_param_info.txt new file mode 100755 index 0000000..cde5e87 --- /dev/null +++ b/R01_object_detection/exe_v2h/yolov3_onnx_fhd/preprocess/drp_param_info.txt @@ -0,0 +1,110 @@ +OFFSET_ADD:0,layer_name:0_node_pre_0_0_resize_hwc_R_W,drp_lib:resize_hwc,prop:input,node:in, + Param:raddr, Value:0, offset:0, size:4, + Param:waddr, Value:11059200, offset:4, size:4, + Param:IMG_IWIDHT, Value:1920, offset:8, size:2, + Param:IMG_IHEIGHT, Value:1920, offset:10,size:2, + Param:IMG_ICH, Value:3, offset:12,size:2, + Param:RFU1, Value:0, offset:14,size:2, + Param:IMG_OWIDTH, Value:416, offset:16,size:2, + Param:IMG_OHEIGHT, Value:416, offset:18,size:2, + Param:IMG_OCH, Value:3, offset:20,size:2, + Param:RFU2, Value:0, offset:22,size:2, + Param:OCH0_SYNCSET_DT, Value:1, offset:24,size:2, + Param:OCH0_SYNCSET_ID, Value:12, offset:26,size:2, + Param:OCH1_SYNCSET_DT, Value:2, offset:28,size:2, + Param:OCH1_SYNCSET_ID, Value:12, offset:30,size:2, + Param:OCH2_SYNCSET_DT, Value:4, offset:32,size:2, + Param:OCH2_SYNCSET_ID, Value:12, offset:34,size:2, + Param:OCH3_SYNCSET_DT, Value:24, offset:36,size:2, + Param:OCH3_SYNCSET_ID, Value:12, offset:38,size:2, + Param:INT_DISABLE, Value:1, offset:40,size:2, + Param:RFU3, Value:0, offset:42,size:2, + Param:RFU4, Value:0, offset:44,size:2, + Param:RFU5, Value:0, offset:46,size:2, + Param:RESIZE_ALG, Value:1, offset:48,size:2, + Param:DATA_TYPE, Value:0, offset:50,size:2, + Param:RFU7, Value:0, offset:52,size:2, + Param:RFU8, Value:0, offset:54,size:2, +OFFSET_ADD:56,layer_name:1_node_pre_0_1_imagescaler_R_W,drp_lib:imagescaler,prop:intermediate_data,node:, + Param:raddr, Value:11059200, offset:0, size:4, + Param:waddr, Value:11578368, offset:4, size:4, + Param:IMG_IWIDHT, Value:416, offset:8, size:2, + Param:IMG_IHEIGHT, Value:416, offset:10,size:2, + Param:IMG_ICH, Value:3, offset:12,size:2, + Param:IMG_ICH_ORG, Value:3, offset:14,size:2, + Param:IMG_OWIDTH, Value:416, offset:16,size:2, + Param:IMG_OHEIGHT, Value:416, offset:18,size:2, + Param:IMG_OCH, Value:3, offset:20,size:2, + Param:IMG_OCH_ORG, Value:3, offset:22,size:2, + Param:OCH0_SYNCSET_DT, Value:1, offset:24,size:2, + Param:OCH0_SYNCSET_ID, Value:12, offset:26,size:2, + Param:OCH1_SYNCSET_DT, Value:2, offset:28,size:2, + Param:OCH1_SYNCSET_ID, Value:12, offset:30,size:2, + Param:OCH2_SYNCSET_DT, Value:4, offset:32,size:2, + Param:OCH2_SYNCSET_ID, Value:12, offset:34,size:2, + Param:OCH3_SYNCSET_DT, Value:24, offset:36,size:2, + Param:OCH3_SYNCSET_ID, Value:12, offset:38,size:2, + Param:INT_DISABLE, Value:1, offset:40,size:2, + Param:RFU3, Value:0, offset:42,size:2, + Param:RFU4, Value:0, offset:44,size:2, + Param:RFU5, Value:0, offset:46,size:2, + Param:ADD_ADDR, Value:15731840, offset:48,size:4, + Param:MUL_ADDR, Value:15731904, offset:52,size:4, + Param:DIN_FORMAT, Value:0, offset:56,size:2, + Param:DOUT_RGB_ORDER, Value:1, offset:58,size:2, + Param:RFU6, Value:0, offset:60,size:2, + Param:RFU7, Value:0, offset:62,size:2, +OFFSET_ADD:120,layer_name:2_node_pre_0_3_transpose_R_W,drp_lib:transpose,prop:intermediate_data,node:, + Param:raddr, Value:11578368, offset:0, size:4, + Param:waddr, Value:12616704, offset:4, size:4, + Param:IMG_IWIDHT, Value:416, offset:8, size:2, + Param:IMG_IHEIGHT, Value:416, offset:10,size:2, + Param:IMG_ICH, Value:3, offset:12,size:2, + Param:IMG_ICH_ORG, Value:3, offset:14,size:2, + Param:IMG_OWIDTH, Value:416, offset:16,size:2, + Param:IMG_OHEIGHT, Value:416, offset:18,size:2, + Param:IMG_OCH, Value:3, offset:20,size:2, + Param:IMG_OCH_ORG, Value:3, offset:22,size:2, + Param:OCH0_SYNCSET_DT, Value:1, offset:24,size:2, + Param:OCH0_SYNCSET_ID, Value:12, offset:26,size:2, + Param:OCH1_SYNCSET_DT, Value:2, offset:28,size:2, + Param:OCH1_SYNCSET_ID, Value:12, offset:30,size:2, + Param:OCH2_SYNCSET_DT, Value:4, offset:32,size:2, + Param:OCH2_SYNCSET_ID, Value:12, offset:34,size:2, + Param:OCH3_SYNCSET_DT, Value:24, offset:36,size:2, + Param:OCH3_SYNCSET_ID, Value:12, offset:38,size:2, + Param:INT_DISABLE, Value:1, offset:40,size:2, + Param:RFU1, Value:0, offset:42,size:2, + Param:RFU2, Value:0, offset:44,size:2, + Param:RFU3, Value:0, offset:46,size:2, + Param:WORD_SIZE, Value:1, offset:48,size:2, + Param:IS_CHW2HWC, Value:0, offset:50,size:2, + Param:RFU4, Value:0, offset:52,size:2, + Param:RFU5, Value:0, offset:54,size:2, +OFFSET_ADD:176,layer_name:3_node_pre_0_4_cast_fp16_fp32_R_W,drp_lib:cast_fp16_fp32,prop:intermediate_data,node:, + Param:raddr, Value:12616704, offset:0, size:4, + Param:waddr, Value:13655040, offset:4, size:4, + Param:IMG_IWIDHT, Value:416, offset:8, size:2, + Param:IMG_IHEIGHT, Value:416, offset:10,size:2, + Param:IMG_ICH, Value:3, offset:12,size:2, + Param:RFU1, Value:0, offset:14,size:2, + Param:IMG_OWIDTH, Value:416, offset:16,size:2, + Param:IMG_OHEIGHT, Value:416, offset:18,size:2, + Param:IMG_OCH, Value:3, offset:20,size:2, + Param:RFU2, Value:0, offset:22,size:2, + Param:OCH0_SYNCSET_DT, Value:1, offset:24,size:2, + Param:OCH0_SYNCSET_ID, Value:12, offset:26,size:2, + Param:OCH1_SYNCSET_DT, Value:2, offset:28,size:2, + Param:OCH1_SYNCSET_ID, Value:12, offset:30,size:2, + Param:OCH2_SYNCSET_DT, Value:4, offset:32,size:2, + Param:OCH2_SYNCSET_ID, Value:12, offset:34,size:2, + Param:OCH3_SYNCSET_DT, Value:24, offset:36,size:2, + Param:OCH3_SYNCSET_ID, Value:12, offset:38,size:2, + Param:INT_DISABLE, Value:1, offset:40,size:2, + Param:RFU1, Value:0, offset:42,size:2, + Param:RFU2, Value:0, offset:44,size:2, + Param:RFU3, Value:0, offset:46,size:2, + Param:CAST_MODE, Value:0, offset:48,size:2, + Param:RFU1, Value:0, offset:50,size:2, + Param:RFU2, Value:0, offset:52,size:2, + Param:RFU3, Value:0, offset:54,size:2, diff --git a/R01_object_detection/exe_v2h/yolov3_onnx_fhd/preprocess/weight.bin b/R01_object_detection/exe_v2h/yolov3_onnx_fhd/preprocess/weight.bin new file mode 100755 index 0000000..494b5eb Binary files /dev/null and b/R01_object_detection/exe_v2h/yolov3_onnx_fhd/preprocess/weight.bin differ diff --git a/R01_object_detection/exe_v2l/coco-labels-2014_2017.txt b/R01_object_detection/exe_v2l/coco-labels-2014_2017.txt new file mode 100755 index 0000000..941cb4e --- /dev/null +++ b/R01_object_detection/exe_v2l/coco-labels-2014_2017.txt @@ -0,0 +1,80 @@ +person +bicycle +car +motorcycle +airplane +bus +train +truck +boat +traffic light +fire hydrant +stop sign +parking meter +bench +bird +cat +dog +horse +sheep +cow +elephant +bear +zebra +giraffe +backpack +umbrella +handbag +tie +suitcase +frisbee +skis +snowboard +sports ball +kite +baseball bat +baseball glove +skateboard +surfboard +tennis racket +bottle +wine glass +cup +fork +knife +spoon +bowl +banana +apple +sandwich +orange +broccoli +carrot +hot dog +pizza +donut +cake +chair +couch +potted plant +bed +dining table +toilet +tv +laptop +mouse +remote +keyboard +cell phone +microwave +oven +toaster +sink +refrigerator +book +clock +vase +scissors +teddy bear +hair drier +toothbrush diff --git a/R01_object_detection/exe_v2l/licenses/coco-labels/LICENSE.txt b/R01_object_detection/exe_v2l/licenses/coco-labels/LICENSE.txt new file mode 100755 index 0000000..45127cf --- /dev/null +++ b/R01_object_detection/exe_v2l/licenses/coco-labels/LICENSE.txt @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2020 Amikelive + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/R01_object_detection/exe_v2l/licenses/onnx_tutorials/LICENSE.txt b/R01_object_detection/exe_v2l/licenses/onnx_tutorials/LICENSE.txt new file mode 100755 index 0000000..25e527d --- /dev/null +++ b/R01_object_detection/exe_v2l/licenses/onnx_tutorials/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/R01_object_detection/exe_v2l/licenses/pytorch-yolov3/LICENSE.txt b/R01_object_detection/exe_v2l/licenses/pytorch-yolov3/LICENSE.txt new file mode 100755 index 0000000..001b337 --- /dev/null +++ b/R01_object_detection/exe_v2l/licenses/pytorch-yolov3/LICENSE.txt @@ -0,0 +1,674 @@ +GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. \ No newline at end of file diff --git a/R01_object_detection/exe_v2l/licenses/pytorch/LICENSE.txt b/R01_object_detection/exe_v2l/licenses/pytorch/LICENSE.txt new file mode 100755 index 0000000..7a5e09c --- /dev/null +++ b/R01_object_detection/exe_v2l/licenses/pytorch/LICENSE.txt @@ -0,0 +1,77 @@ +From PyTorch: + +Copyright (c) 2016- Facebook, Inc (Adam Paszke) +Copyright (c) 2014- Facebook, Inc (Soumith Chintala) +Copyright (c) 2011-2014 Idiap Research Institute (Ronan Collobert) +Copyright (c) 2012-2014 Deepmind Technologies (Koray Kavukcuoglu) +Copyright (c) 2011-2012 NEC Laboratories America (Koray Kavukcuoglu) +Copyright (c) 2011-2013 NYU (Clement Farabet) +Copyright (c) 2006-2010 NEC Laboratories America (Ronan Collobert, Leon Bottou, Iain Melvin, Jason Weston) +Copyright (c) 2006 Idiap Research Institute (Samy Bengio) +Copyright (c) 2001-2004 Idiap Research Institute (Ronan Collobert, Samy Bengio, Johnny Mariethoz) + +From Caffe2: + +Copyright (c) 2016-present, Facebook Inc. All rights reserved. + +All contributions by Facebook: +Copyright (c) 2016 Facebook Inc. + +All contributions by Google: +Copyright (c) 2015 Google Inc. +All rights reserved. + +All contributions by Yangqing Jia: +Copyright (c) 2015 Yangqing Jia +All rights reserved. + +All contributions by Kakao Brain: +Copyright 2019-2020 Kakao Brain + +All contributions by Cruise LLC: +Copyright (c) 2022 Cruise LLC. +All rights reserved. + +All contributions from Caffe: +Copyright(c) 2013, 2014, 2015, the respective contributors +All rights reserved. + +All other contributions: +Copyright(c) 2015, 2016 the respective contributors +All rights reserved. + +Caffe2 uses a copyright model similar to Caffe: each contributor holds +copyright over their contributions to Caffe2. The project versioning records +all such contribution and copyright details. If a contributor wants to further +mark their specific copyright on a particular contribution, they should +indicate their copyright solely in the commit message of the change when it is +committed. + +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +3. Neither the names of Facebook, Deepmind Technologies, NYU, NEC Laboratories America + and IDIAP Research Institute nor the names of its contributors may be + used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/R01_object_detection/exe_v2l/licenses/pytorch_vision/LICENSE.txt b/R01_object_detection/exe_v2l/licenses/pytorch_vision/LICENSE.txt new file mode 100755 index 0000000..f9ce6ae --- /dev/null +++ b/R01_object_detection/exe_v2l/licenses/pytorch_vision/LICENSE.txt @@ -0,0 +1,29 @@ +BSD 3-Clause License + +Copyright (c) Soumith Chintala 2016, +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/R01_object_detection/exe/object_detection b/R01_object_detection/exe_v2l/object_detection similarity index 100% rename from R01_object_detection/exe/object_detection rename to R01_object_detection/exe_v2l/object_detection diff --git a/R01_object_detection/exe/yolov3_onnx/deploy.json b/R01_object_detection/exe_v2l/yolov3_onnx/deploy.json similarity index 100% rename from R01_object_detection/exe/yolov3_onnx/deploy.json rename to R01_object_detection/exe_v2l/yolov3_onnx/deploy.json diff --git a/R01_object_detection/exe_v2l/yolov3_onnx/deploy.params b/R01_object_detection/exe_v2l/yolov3_onnx/deploy.params new file mode 100755 index 0000000..1011def Binary files /dev/null and b/R01_object_detection/exe_v2l/yolov3_onnx/deploy.params differ diff --git a/R01_object_detection/exe/yolov3_onnx/preprocess/aimac_desc.bin b/R01_object_detection/exe_v2l/yolov3_onnx/preprocess/aimac_desc.bin similarity index 100% rename from R01_object_detection/exe/yolov3_onnx/preprocess/aimac_desc.bin rename to R01_object_detection/exe_v2l/yolov3_onnx/preprocess/aimac_desc.bin diff --git a/R01_object_detection/exe/yolov3_onnx/preprocess/drp_desc.bin b/R01_object_detection/exe_v2l/yolov3_onnx/preprocess/drp_desc.bin similarity index 100% rename from R01_object_detection/exe/yolov3_onnx/preprocess/drp_desc.bin rename to R01_object_detection/exe_v2l/yolov3_onnx/preprocess/drp_desc.bin diff --git a/R01_object_detection/exe/yolov3_onnx/preprocess/drp_param.bin b/R01_object_detection/exe_v2l/yolov3_onnx/preprocess/drp_param.bin similarity index 100% rename from R01_object_detection/exe/yolov3_onnx/preprocess/drp_param.bin rename to R01_object_detection/exe_v2l/yolov3_onnx/preprocess/drp_param.bin diff --git a/R01_object_detection/exe/yolov3_onnx/preprocess/drp_param_info.txt b/R01_object_detection/exe_v2l/yolov3_onnx/preprocess/drp_param_info.txt similarity index 100% rename from R01_object_detection/exe/yolov3_onnx/preprocess/drp_param_info.txt rename to R01_object_detection/exe_v2l/yolov3_onnx/preprocess/drp_param_info.txt diff --git a/R01_object_detection/exe/yolov3_onnx/preprocess/pp_addrmap_intm.txt b/R01_object_detection/exe_v2l/yolov3_onnx/preprocess/pp_addrmap_intm.txt similarity index 100% rename from R01_object_detection/exe/yolov3_onnx/preprocess/pp_addrmap_intm.txt rename to R01_object_detection/exe_v2l/yolov3_onnx/preprocess/pp_addrmap_intm.txt diff --git a/R01_object_detection/exe/yolov3_onnx/preprocess/pp_drpcfg.mem b/R01_object_detection/exe_v2l/yolov3_onnx/preprocess/pp_drpcfg.mem similarity index 100% rename from R01_object_detection/exe/yolov3_onnx/preprocess/pp_drpcfg.mem rename to R01_object_detection/exe_v2l/yolov3_onnx/preprocess/pp_drpcfg.mem diff --git a/R01_object_detection/exe/yolov3_onnx/preprocess/pp_weight.dat b/R01_object_detection/exe_v2l/yolov3_onnx/preprocess/pp_weight.dat similarity index 100% rename from R01_object_detection/exe/yolov3_onnx/preprocess/pp_weight.dat rename to R01_object_detection/exe_v2l/yolov3_onnx/preprocess/pp_weight.dat diff --git a/R01_object_detection/img/buffer_flow.png b/R01_object_detection/img/buffer_flow.png new file mode 100755 index 0000000..a773c2a Binary files /dev/null and b/R01_object_detection/img/buffer_flow.png differ diff --git a/R01_object_detection/img/hw_conf.jpg b/R01_object_detection/img/hw_conf.jpg deleted file mode 100755 index 3b04781..0000000 Binary files a/R01_object_detection/img/hw_conf.jpg and /dev/null differ diff --git a/R01_object_detection/img/hw_conf_v2h.png b/R01_object_detection/img/hw_conf_v2h.png new file mode 100755 index 0000000..7b3d546 Binary files /dev/null and b/R01_object_detection/img/hw_conf_v2h.png differ diff --git a/R01_object_detection/img/hw_conf_v2l.png b/R01_object_detection/img/hw_conf_v2l.png new file mode 100755 index 0000000..0005ea3 Binary files /dev/null and b/R01_object_detection/img/hw_conf_v2l.png differ diff --git a/R01_object_detection/img/objectdetection_v2h.png b/R01_object_detection/img/objectdetection_v2h.png new file mode 100755 index 0000000..0bd9807 Binary files /dev/null and b/R01_object_detection/img/objectdetection_v2h.png differ diff --git a/R01_object_detection/src/MeraDrpRuntimeWrapper.cpp b/R01_object_detection/src/MeraDrpRuntimeWrapper.cpp index b0fa17b..79d3085 100755 --- a/R01_object_detection/src/MeraDrpRuntimeWrapper.cpp +++ b/R01_object_detection/src/MeraDrpRuntimeWrapper.cpp @@ -1,6 +1,10 @@ /* * Original Code (C) Copyright Edgecortix, Inc. 2022 +<<<<<<< HEAD * Modified Code (C) Copyright Renesas Electronics Corporation 2023  +======= + * Modified Code (C) Copyright Renesas Electronics Corporation 2023 +>>>>>>> origin/object_detection * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file diff --git a/R01_object_detection/src/MeraDrpRuntimeWrapper.h b/R01_object_detection/src/MeraDrpRuntimeWrapper.h index 416ff5b..5cd5700 100755 --- a/R01_object_detection/src/MeraDrpRuntimeWrapper.h +++ b/R01_object_detection/src/MeraDrpRuntimeWrapper.h @@ -1,6 +1,10 @@ /* * Original Code (C) Copyright Edgecortix, Inc. 2022 +<<<<<<< HEAD * Modified Code (C) Copyright Renesas Electronics Corporation 2023  +======= + * Modified Code (C) Copyright Renesas Electronics Corporation 2023 +>>>>>>> origin/object_detection * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file diff --git a/R01_object_detection/src/PreRuntime.cpp b/R01_object_detection/src/PreRuntime.cpp index e241507..c2aea81 100755 --- a/R01_object_detection/src/PreRuntime.cpp +++ b/R01_object_detection/src/PreRuntime.cpp @@ -1,6 +1,6 @@ /* * Original Code (C) Copyright Renesas Electronics Corporation 2023 - *  + * * *1 DRP-AI TVM is powered by EdgeCortix MERA(TM) Compiler Framework. * * Licensed to the Apache Software Foundation (ASF) under one diff --git a/R01_object_detection/src/PreRuntime.h b/R01_object_detection/src/PreRuntime.h index b1736b8..50c5818 100755 --- a/R01_object_detection/src/PreRuntime.h +++ b/R01_object_detection/src/PreRuntime.h @@ -1,6 +1,6 @@ /* * Original Code (C) Copyright Renesas Electronics Corporation 2023 - *  + * * *1 DRP-AI TVM is powered by EdgeCortix MERA(TM) Compiler Framework. * * Licensed to the Apache Software Foundation (ASF) under one diff --git a/R01_object_detection/src/define.h b/R01_object_detection/src/define.h index 44e9ba7..bd1b9b0 100755 --- a/R01_object_detection/src/define.h +++ b/R01_object_detection/src/define.h @@ -1,7 +1,7 @@ /* * Original Code (C) Copyright Edgecortix, Inc. 2022 * Modified Code (C) Copyright Renesas Electronics Corporation 2023 - *  + * * *1 DRP-AI TVM is powered by EdgeCortix MERA(TM) Compiler Framework. * * Licensed to the Apache Software Foundation (ASF) under one diff --git a/R01_object_detection/src/main.cpp b/R01_object_detection/src/main.cpp index deb6113..784ece1 100755 --- a/R01_object_detection/src/main.cpp +++ b/R01_object_detection/src/main.cpp @@ -1,7 +1,7 @@ /* * Original Code (C) Copyright Edgecortix, Inc. 2022 * Modified Code (C) Copyright Renesas Electronics Corporation 2023 - *  + * * *1 DRP-AI TVM is powered by EdgeCortix MERA(TM) Compiler Framework. * * Licensed to the Apache Software Foundation (ASF) under one diff --git a/R01_object_detection/src_v2h/CMakeLists.txt b/R01_object_detection/src_v2h/CMakeLists.txt new file mode 100755 index 0000000..8178806 --- /dev/null +++ b/R01_object_detection/src_v2h/CMakeLists.txt @@ -0,0 +1,38 @@ +cmake_minimum_required(VERSION 3.12) +project(object_detection_cpp) + +set(CMAKE_CXX_STANDARD 17) + +set(TVM_ROOT $ENV{TVM_HOME}) +include_directories(${TVM_ROOT}/include) +include_directories(${TVM_ROOT}/3rdparty/dlpack/include) +include_directories(${TVM_ROOT}/3rdparty/dmlc-core/include) +include_directories(${TVM_ROOT}/3rdparty/compiler-rt) + +set(TVM_RUNTIME_LIB ${TVM_ROOT}/build_runtime/libtvm_runtime.so) +set(EXE_NAME object_detection) + +file(GLOB SOURCE *.cpp *.h) +add_executable (${EXE_NAME} +${SOURCE} +) + +TARGET_LINK_LIBRARIES(${EXE_NAME} pthread) +TARGET_LINK_LIBRARIES(${EXE_NAME} jpeg) +TARGET_LINK_LIBRARIES(${EXE_NAME} wayland-client wayland-cursor wayland-egl) +target_link_libraries(${EXE_NAME} mmngr mmngrbuf) +target_link_libraries(${EXE_NAME} + opencv_imgcodecs opencv_imgproc opencv_core opencv_highgui + EGL GLESv2 ) + +find_package(OpenCV REQUIRED) +if(OpenCV_FOUND) + target_include_directories(${EXE_NAME} PUBLIC ${OpenCV_INCLUDE_DIRS}) + target_link_libraries(${EXE_NAME} ${OpenCV_LIBS}) +endif() + +target_link_libraries(${EXE_NAME} ${TVM_RUNTIME_LIB}) + +if(V2H) + target_compile_definitions(${EXE_NAME} PRIVATE V2H) +endif() diff --git a/R01_object_detection/src_v2h/MeraDrpRuntimeWrapper.cpp b/R01_object_detection/src_v2h/MeraDrpRuntimeWrapper.cpp new file mode 100755 index 0000000..ff005a8 --- /dev/null +++ b/R01_object_detection/src_v2h/MeraDrpRuntimeWrapper.cpp @@ -0,0 +1,208 @@ +/* + * Original Code (C) Copyright Edgecortix, Inc. 2022 + * Modified Code (C) Copyright Renesas Electronics Corporation 2023 + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + * + */ +#include +#include +#include +#include + +#include +#include +#include +#include "MeraDrpRuntimeWrapper.h" + +template +static std::vector LoadBinary(const std::string& bin_file) { + std::ifstream file(bin_file.c_str(), std::ios::in | std::ios::binary); + if (!file.is_open()) { + LOG(FATAL) << "unable to open file " + bin_file; + } + + file.seekg(0, file.end); + const uint32_t file_size = static_cast(file.tellg()); + file.seekg(0, file.beg); + + const auto file_buffer = std::unique_ptr(new char[file_size]); + file.read(file_buffer.get(), file_size); + + if (file.bad() || file.fail()) { + LOG(FATAL) << "error occured while reading the file"; + } + + file.close(); + + auto ptr = reinterpret_cast(file_buffer.get()); + const auto num_elements = file_size / sizeof(T); + return std::vector(ptr, ptr + num_elements); +} + +MeraDrpRuntimeWrapper::MeraDrpRuntimeWrapper() { + //device_type = kDLCPU; + device_type = kDLDrpAi; + device_id = 0; +}; + +MeraDrpRuntimeWrapper::~MeraDrpRuntimeWrapper() = default; + +bool MeraDrpRuntimeWrapper::LoadModel(const std::string& model_dir, uint32_t start_address){ + device_type = kDLCPU; + + return LoadModel(model_dir, (uint64_t)start_address); +} + +bool MeraDrpRuntimeWrapper::LoadModel(const std::string& model_dir, uint64_t start_address = 0x00) { + LOG(INFO) << "Loading json data..."; + const std::string json_file(model_dir + "/deploy.json"); + std::ifstream json_in(json_file.c_str(), std::ios::in); + std::string json_data((std::istreambuf_iterator(json_in)), std::istreambuf_iterator()); + json_in.close(); + + #if 0 + if(json_data.find("drp") == json_data.npos && device_type != kDLCPU){ + LOG(INFO) <<"Break! this model is Not for DRP-AI retry as CPU Only"; + return false; + } + #else + if(json_data.find("drp") == json_data.npos && device_type != kDLCPU){ + LOG(INFO) <<"try as CPU Only"; + device_type = kDLCPU; + } + #endif + + LOG(INFO) << "Loading runtime module..."; + tvm::runtime::Module mod_syslib = tvm::runtime::Module::LoadFromFile(model_dir + "/deploy.so"); + mod = (*tvm::runtime::Registry::Get("tvm.graph_executor_debug.create"))( + json_data, mod_syslib, device_type, device_id); + + LOG(INFO) << "Loading parameters..."; + tvm::runtime::PackedFunc load_params = mod.GetFunction("load_params"); + auto params_data = LoadBinary(model_dir + "/deploy.params"); + TVMByteArray params_arr; + params_arr.data = params_data.data(); + params_arr.size = params_data.size(); + load_params(params_arr); + tvm::runtime::PackedFunc set_start_address = mod.GetFunction("set_start_address"); + if(set_start_address != nullptr){ + set_start_address(start_address); + } + return true; +} + +template +void MeraDrpRuntimeWrapper::SetInput(int input_index, const T* data_ptr) { + LOG(INFO) << "Loading input..."; + + tvm::runtime::PackedFunc get_input = mod.GetFunction("get_input"); + tvm::runtime::NDArray xx = get_input(input_index); + auto in_shape = xx.Shape(); + int64_t in_size = 1; + for (unsigned long i = 0; i < in_shape.size(); ++i) { + in_size *= in_shape[i]; + } + + DLDevice ctx; + ctx.device_id = device_id; + ctx.device_type = DLDeviceType(device_type); + + auto input_array = tvm::runtime::NDArray::Empty(in_shape, xx.DataType(), ctx); + auto input_data = (T*)(input_array->data); + std::memcpy(input_data, data_ptr, sizeof(T) * in_size); + tvm::runtime::PackedFunc set_input = mod.GetFunction("set_input"); + set_input(input_index, input_array); +} +template void MeraDrpRuntimeWrapper::SetInput(int input_index, const float*); +template void MeraDrpRuntimeWrapper::SetInput(int input_index, const unsigned short*); + +void MeraDrpRuntimeWrapper::Run() { + mod.GetFunction("run")(); +} + +void MeraDrpRuntimeWrapper::ProfileRun(const std::string& profile_table, const std::string& profile_csv) { + tvm::runtime::PackedFunc profile = mod.GetFunction("profile"); + tvm::runtime::Array collectors; + tvm::runtime::profiling::Report report = profile(collectors); + + std::string rep_table = report->AsTable(); + std::ofstream ofs_table (profile_table, std::ofstream::out); + ofs_table << rep_table << std::endl; + ofs_table.close(); + + std::string rep_csv = report->AsCSV(); + std::ofstream ofs_csv (profile_csv, std::ofstream::out); + ofs_csv << rep_csv << std::endl; + ofs_csv.close(); +} + +int MeraDrpRuntimeWrapper::GetNumInput(std::string model_dir) { + // TVM does not support api to get number input of model. + // This function calculate input number base on convention + // of input data name (input_xyz.bin) + DIR *dir; + dirent *diread; + int num_input = 0; + if ((dir = opendir(model_dir.c_str())) != nullptr) { + while ((diread = readdir(dir)) != nullptr) { + std::string file_name(diread->d_name); + if (std::regex_match(file_name, std::regex("(input_)(.*)(bin)") )) { + num_input++; + } + } + closedir(dir); + } else { + LOG(FATAL) << "Can not open model dir : " << model_dir; + } + + return num_input; +} + +InOutDataType MeraDrpRuntimeWrapper::GetInputDataType(int index) { + tvm::runtime::PackedFunc get_input = mod.GetFunction("get_input"); + tvm::runtime::NDArray input_info = get_input(index); + InOutDataType data_type = InOutDataType::OTHER; + if (input_info.DataType().is_float() && input_info.DataType().bits() == 32) { + data_type = InOutDataType::FLOAT32; + } else if (input_info.DataType().is_float() && input_info.DataType().bits() == 16) { + data_type = InOutDataType::FLOAT16; + } + return data_type; + } + +int MeraDrpRuntimeWrapper::GetNumOutput() { + return mod.GetFunction("get_num_outputs")(); + } + +std::tuple MeraDrpRuntimeWrapper::GetOutput(int index) { + tvm::runtime::PackedFunc get_output = mod.GetFunction("get_output"); + tvm::runtime::NDArray out = get_output(index); + int64_t out_size = 1; + for ( unsigned long i = 0; i < out.Shape().size(); ++i) { + out_size *= out.Shape()[i]; + } + + InOutDataType data_type = InOutDataType::OTHER; + if (out.DataType().is_float() && out.DataType().bits() == 32) { + data_type = InOutDataType::FLOAT32; + } else if (out.DataType().is_float() && out.DataType().bits() == 16) { + data_type = InOutDataType::FLOAT16; + } + return std::make_tuple(data_type, reinterpret_cast(out->data), out_size); +} diff --git a/R01_object_detection/src_v2h/MeraDrpRuntimeWrapper.h b/R01_object_detection/src_v2h/MeraDrpRuntimeWrapper.h new file mode 100755 index 0000000..5f1b2be --- /dev/null +++ b/R01_object_detection/src_v2h/MeraDrpRuntimeWrapper.h @@ -0,0 +1,52 @@ +/* + * Original Code (C) Copyright Edgecortix, Inc. 2022 + * Modified Code (C) Copyright Renesas Electronics Corporation 2023 + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + * +*/ +#include + +enum class InOutDataType { + FLOAT32, + FLOAT16, + OTHER +}; + +class MeraDrpRuntimeWrapper { + public: + MeraDrpRuntimeWrapper(); + ~MeraDrpRuntimeWrapper(); + + bool LoadModel(const std::string& model_dir, uint32_t start_address); + bool LoadModel(const std::string& model_dir, uint64_t start_address); + template + void SetInput(int input_index, const T* data_ptr); + void Run(); + void ProfileRun(const std::string& profile_table, const std::string& profile_csv); + int GetNumInput(std::string model_dir); + InOutDataType GetInputDataType(int index); + int GetNumOutput(); + + std::tuple GetOutput(int index); + + private: + int device_type; + int device_id; + tvm::runtime::Module mod; +}; diff --git a/R01_object_detection/src_v2h/PreRuntime.h b/R01_object_detection/src_v2h/PreRuntime.h new file mode 100755 index 0000000..ecd4e40 --- /dev/null +++ b/R01_object_detection/src_v2h/PreRuntime.h @@ -0,0 +1,473 @@ +/* + * Original Code (C) Copyright Renesas Electronics Corporation 2023 + * + * *1 DRP-AI TVM is powered by EdgeCortix MERA(TM) Compiler Framework. + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + * + */ + +/*********************************************************************************************************************** +* File Name : PreRuntime.h +* Version : 1.1.0 +* Description : PreRuntime Header file +***********************************************************************************************************************/ +#pragma once + +#ifndef PRERUNTIME_H +#define PRERUNTIME_H +/*********************************************************************************************************************** +* Include +***********************************************************************************************************************/ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +/*********************************************************************************************************************** +* Macro +***********************************************************************************************************************/ +#define BUF_SIZE (1024) +#define NUM_OBJ_FILE (6) +#define INDEX_I (0) +#define INDEX_D (1) +#define INDEX_C (2) +#define INDEX_P (3) +#define INDEX_A (4) +#define INDEX_W (5) +#define DRPAI_TIMEOUT (5) + +/*Uncomment to enable displaying the debug console log*/ +// #define DEBUG_LOG + +/*Error List*/ +#define PRE_SUCCESS (0) +#define PRE_ERROR (1) +#define PRE_ERROR_UI (-1) + +/* Library Name */ +#define LIB_CONVYUV2RGB ("conv_yuv2rgb") +#define LIB_RESIZE_HWC ("resize_hwc") +#define LIB_IMAGESCALER ("imagescaler") +#define LIB_TRANSPOSE ("transpose") +#define LIB_CASTFP16_FP32 ("cast_fp16_fp32") +#define LIB_CONVX2GRAY ("conv_x2gray") +#define LIB_CROP ("crop") +#define LIB_ARGMINMAX ("argminmax") + +/* Param Info ID */ +#define OP_HEAD ("OFFSET_ADD:") +#define OP_LAYER_NAME ("layer_name:") +#define OP_LIB ("drp_lib:") +#define PRAM_HEAD ("Param:") +#define PARAM_VALUE ("Value:") +#define PARAM_OFFSET ("offset:") +#define PARAM_SIZE ("size:") + +/* Param name */ +#define P_RADDR ("raddr") +#define P_WADDR ("waddr") +#define P_IMG_IWIDTH ("IMG_IWIDHT") +#define P_IMG_IHEIGHT ("IMG_IHEIGHT") +#define P_IMG_OWIDTH ("IMG_OWIDTH") +#define P_IMG_OHEIGHT ("IMG_OHEIGHT") +#define P_INPUT_YUV_FORMAT ("INPUT_YUV_FORMAT") +#define P_DOUT_RGB_FORMAT ("DOUT_RGB_FORMAT") +#define P_RESIZE_ALG ("RESIZE_ALG") +#define P_DATA_TYPE ("DATA_TYPE") +#define P_ADD_ADDR ("ADD_ADDR") +#define P_MUL_ADDR ("MUL_ADDR") +#define P_DOUT_RGB_ORDER ("DOUT_RGB_ORDER") +#define P_WORD_SIZE ("WORD_SIZE") +#define P_IS_CHW2HWC ("IS_CHW2HWC") +#define P_CAST_MODE ("CAST_MODE") +#define P_CROP_POS_X ("CROP_POS_X") +#define P_CROP_POS_Y ("CROP_POS_Y") +#define P_DIN_FORMAT ("DIN_FORMAT") +#define P_DOUT_RGB_FORMAT ("DOUT_RGB_FORMAT") +#define P_IMG_ICH ("IMG_ICH") +#define P_IMG_OCH ("IMG_OCH") + +/* Other related values */ +#define FORMAT_YUYV_422 (0x0000) +#define FORMAT_YVYU_422 (0x0001) +#define FORMAT_UYUV_422 (0x0002) +#define FORMAT_VUYY_422 (0x0003) +#define FORMAT_YUYV_420 (0x1000) +#define FORMAT_UYVY_420 (0x1001) +#define FORMAT_YV12_420 (0x1002) +#define FORMAT_IYUV_420 (0x1003) +#define FORMAT_NV12_420 (0x1004) +#define FORMAT_NV21_420 (0x1005) +#define FORMAT_IMC1_420 (0x1006) +#define FORMAT_IMC2_420 (0x1007) +#define FORMAT_IMC3_420 (0x1008) +#define FORMAT_IMC4_420 (0x1009) +#define FORMAT_GRAY (0xFFFC) +#define FORMAT_BGR (0xFFFD) +#define FORMAT_RGB (0xFFFE) +#define FORMAT_UNKNOWN (0xFFFF) + +/* Format in string. Only used when DEBUG_LOG is ON */ +#define FORMAT_YUYV_422_STR ("YUYV_422") +#define FORMAT_YVYU_422_STR ("YVYU_422") +#define FORMAT_UYUV_422_STR ("UYUV_422") +#define FORMAT_VUYY_422_STR ("VUYY_422") +#define FORMAT_YUYV_420_STR ("YVYU_420") +#define FORMAT_UYVY_420_STR ("UYVY_420") +#define FORMAT_YV12_420_STR ("YV12_420") +#define FORMAT_IYUV_420_STR ("IYUV_420") +#define FORMAT_NV12_420_STR ("NV12_420") +#define FORMAT_NV21_420_STR ("NV21_420") +#define FORMAT_IMC1_420_STR ("IMC1_420") +#define FORMAT_IMC2_420_STR ("IMC2_420") +#define FORMAT_IMC3_420_STR ("IMC3_420") +#define FORMAT_IMC4_420_STR ("IMC4_420") +#define FORMAT_GRAY_STR ("GRAY") +#define FORMAT_BGR_STR ("BGR") +#define FORMAT_RGB_STR ("RGB") +#define FORMAT_UNKNOWN_STR ("UNKNOWN") +/* Format in string. Only used when DEBUG_LOG is ON */ +static const std::unordered_map format_string_table = +{ + {FORMAT_YUYV_422, FORMAT_YUYV_422_STR}, + {FORMAT_YVYU_422 , FORMAT_YVYU_422_STR}, + {FORMAT_UYUV_422 , FORMAT_UYUV_422_STR}, + {FORMAT_VUYY_422 , FORMAT_VUYY_422_STR}, + {FORMAT_YUYV_420 , FORMAT_YUYV_420_STR}, + {FORMAT_UYVY_420 , FORMAT_UYVY_420_STR}, + {FORMAT_YV12_420 , FORMAT_YV12_420_STR}, + {FORMAT_IYUV_420 , FORMAT_IYUV_420_STR}, + {FORMAT_NV12_420 , FORMAT_NV12_420_STR}, + {FORMAT_NV21_420 , FORMAT_NV21_420_STR}, + {FORMAT_IMC1_420 , FORMAT_IMC1_420_STR}, + {FORMAT_IMC2_420 , FORMAT_IMC2_420_STR}, + {FORMAT_IMC3_420 , FORMAT_IMC3_420_STR}, + {FORMAT_IMC4_420 , FORMAT_IMC4_420_STR}, + {FORMAT_GRAY , FORMAT_GRAY_STR}, + {FORMAT_BGR , FORMAT_BGR_STR}, + {FORMAT_RGB , FORMAT_RGB_STR}, + {FORMAT_UNKNOWN , FORMAT_UNKNOWN_STR} +}; + +/*If FORMAT_* >> BIT_YUV is 1, YUV420. + If 0, YUV422. + >1 otherwise.*/ +#define BIT_YUV (12) + +#define DIN_FORMAT_RGB (0x1000) +#define DIN_FORMAT_BGR (0x1001) + +#define NUM_C_YUV (2) +#define NUM_C_RGB_BGR (3) +#define NUM_C_GRAY (1) + +#define ALG_NEAREST (0) +#define ALG_BILINEAR (1) +#define INVALID_ADDR (0xFFFFFFFF) +#define INVALID_SHAPE (0xFFFF) +#define INVALID_FORMAT (FORMAT_UNKNOWN) +#define INVALID_RESIZE_ALG (0xFF) + +#define MIN_INPUT_W_BOUND (0) +#define MIN_INPUT_H_BOUND (0) +#define MIN_RESIZE_W_BOUND (2) +#define MIN_RESIZE_H_BOUND (2) +#define MAX_RESIZE_W_BOUND (4096) +#define MAX_RESIZE_H_BOUND (4096) +#define MIN_CROP_W_BOUND (0) +#define MIN_CROP_H_BOUND (0) + +#define MODE_PRE (0) +#define MODE_POST (1) +/*********************************************************************************************************************** +* Struct and related function +***********************************************************************************************************************/ + +/* For dynamic allocation support of DRP-AI Object files */ +typedef struct +{ + std::string directory_name; + uint64_t start_address; + unsigned long object_files_size; + unsigned long data_in_addr; + unsigned long data_in_size; + unsigned long data_out_addr; + unsigned long data_out_size; +} st_drpai_data_t; + +typedef struct +{ + unsigned long desc_aimac_addr; + unsigned long desc_aimac_size; + unsigned long desc_drp_addr; + unsigned long desc_drp_size; + unsigned long drp_param_addr; + unsigned long drp_param_size; + unsigned long data_in_addr; + unsigned long data_in_size; + unsigned long data_addr; + unsigned long data_size; + unsigned long work_addr; + unsigned long work_size; + unsigned long data_out_addr; + unsigned long data_out_size; + unsigned long drp_config_addr; + unsigned long drp_config_size; + unsigned long weight_addr; + unsigned long weight_size; + unsigned long aimac_param_cmd_addr; + unsigned long aimac_param_cmd_size; + unsigned long aimac_param_desc_addr; + unsigned long aimac_param_desc_size; + unsigned long aimac_cmd_addr; + unsigned long aimac_cmd_size; +} st_addr_info_t; + + +typedef struct +{ + int drpai_fd = -1; + st_drpai_data_t data_inout; + st_addr_info_t drpai_address; +} drpai_handle_t; + +typedef struct +{ + uint16_t pre_in_shape_w = INVALID_SHAPE; + uint16_t pre_in_shape_h = INVALID_SHAPE; + uint64_t pre_in_addr = INVALID_ADDR; + uint16_t pre_in_format = INVALID_FORMAT; + uint16_t pre_out_format = INVALID_FORMAT; + uint8_t resize_alg = INVALID_RESIZE_ALG; + uint16_t resize_w = INVALID_SHAPE; + uint16_t resize_h = INVALID_SHAPE; + float cof_add[3] = { -FLT_MAX, -FLT_MAX, -FLT_MAX }; + float cof_mul[3] = { -FLT_MAX, -FLT_MAX, -FLT_MAX }; + uint16_t crop_tl_x = INVALID_SHAPE; + uint16_t crop_tl_y = INVALID_SHAPE; + uint16_t crop_w = INVALID_SHAPE; + uint16_t crop_h = INVALID_SHAPE; +} s_preproc_param_t; + +typedef struct +{ + std::string name; + uint32_t value; + uint16_t offset; + uint16_t size; +} s_op_param_t; + +typedef struct +{ + std::string name; + std::string lib; + uint16_t offset; + std::vector param_list; +} s_op_t; + +static void clear_param(s_op_param_t* data) +{ + data->name = ""; + data->value = 0; + data->offset = 0; + data->size = 0; +} + +static void clear_op(s_op_t* data) +{ + data->name = ""; + data->lib = ""; + data->offset = 0; + data->param_list.clear(); +} + +static std::string setW(std::string const &str, int n) +{ + std::ostringstream oss; + oss << std::left< param_data; + /*Buffer to store weight.dat data*/ + std::vector weight_data; + /*List to store parmeter information, i.e., address, offset, size.*/ + std::vector param_info; + uint8_t run_mode; + /*Variables for internal in/out information */ + uint16_t pre_out_shape_w = (uint16_t) INVALID_SHAPE; + uint16_t pre_out_shape_h = (uint16_t) INVALID_SHAPE; + uint16_t pre_in_shape_c = (uint16_t) FORMAT_UNKNOWN; + uint16_t pre_out_shape_c = (uint16_t) FORMAT_UNKNOWN; + uint8_t pre_in_type_size = 0; + uint8_t pre_out_type_size = 0; + /*Flags to figure out whether operators are included in Pre-Runtime Object files*/ + bool crop_included = false; + bool resize_included = false; + bool normalize_included = false; + + /*Since ADRCONV cannot delete just any entry, a means to reconfigure everything became necessary.*/ + uint64_t start_addr_v2h; + uint64_t mapped_in_addr_v2h; + + /*Supported Format*/ + const uint16_t supported_format_in[17] = + { + FORMAT_YUYV_422, + FORMAT_YVYU_422, + FORMAT_UYUV_422, + FORMAT_VUYY_422, + FORMAT_YUYV_420, + FORMAT_UYVY_420, + FORMAT_YV12_420, + FORMAT_IYUV_420, + FORMAT_NV12_420, + FORMAT_NV21_420, + FORMAT_IMC1_420, + FORMAT_IMC2_420, + FORMAT_IMC3_420, + FORMAT_IMC4_420, + FORMAT_GRAY, + FORMAT_RGB, + FORMAT_BGR + }; + const uint16_t supported_format_out[3] = + { + FORMAT_GRAY, + FORMAT_RGB, + FORMAT_BGR + }; + /*Functions*/ + uint8_t ReadAddrmapTxt(std::string addr_file); + uint8_t WritePrerunData(const std::string dir); + uint8_t LoadFileToMemDynamic(std::string data, unsigned long offset, unsigned long size, uint32_t file_type); + uint8_t LoadFileToMemDynamic(std::string data, unsigned long offset, unsigned long size); + uint8_t LoadDataToMem(std::vector *data, unsigned long from, unsigned long size); + uint8_t LoadDataToMem(std::vector data, unsigned long from, unsigned long size); + uint8_t ReadFileData(std::vector *data, std::string file, unsigned long size); + uint8_t ReadFileData(std::vector &data, std::string file, unsigned long size); + uint8_t GetResult(unsigned long output_addr, unsigned long output_size); + uint8_t ParseParamInfo(const std::string info_file); + uint8_t LoadParamInfo(); + uint8_t UpdateParamToDynamic(uint32_t start_addr); + + int8_t UpdateParamData(const s_preproc_param_t param); + int8_t UpdateWeightData(const s_preproc_param_t param); + + void UpdateInputShape(const uint16_t w, const uint16_t h); + void UpdateResizeShape(const uint16_t w, const uint16_t h); + void UpdateResizeAlg(const uint8_t val); + void UpdateFormat(const uint16_t input_val, const uint16_t output_val); + uint8_t UpdateCoefficient(const float* cof_add, const float* cof_mul); + void UpdateCropParam(const uint16_t tl_x, const uint16_t tl_y, const uint16_t w, const uint16_t h); + + bool IsDifferentFmInternal(const float* cof_add, const float* cof_mul); + void WriteValue(uint16_t offset, uint32_t value, uint8_t size); + bool IsInSupportedList(uint16_t format, uint8_t is_input); + bool IsSupportedFormat(const s_preproc_param_t param, uint16_t format_in, uint16_t format_out); + uint32_t GetStartAddress(uint32_t addr, drpai_data_t drpai_data); + uint64_t GetStartAddress(uint64_t addr, drpai_data_t drpai_data); + bool StartsWith(std::string str, std::string prefix); + double timedifference_msec(struct timespec t0, struct timespec t1); +}; + +#endif //PRERUNTIME_H diff --git a/R01_object_detection/src_v2h/PreRuntimeV2H.cpp b/R01_object_detection/src_v2h/PreRuntimeV2H.cpp new file mode 100755 index 0000000..e0e69e8 --- /dev/null +++ b/R01_object_detection/src_v2h/PreRuntimeV2H.cpp @@ -0,0 +1,836 @@ +/* + * Original Code (C) Copyright Renesas Electronics Corporation 2023 + * + * *1 DRP-AI TVM is powered by EdgeCortix MERA(TM) Compiler Framework. + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + * + */ + +/*********************************************************************************************************************** +* File Name : PreRuntime.cpp +* Version : 1.1.0 +* Description : PreRuntime Source file +***********************************************************************************************************************/ + +#include +#include +#include +#include "PreRuntime.h" + +PreRuntime::PreRuntime() +{ +} + +PreRuntime::~PreRuntime() +{ + /*Free internal output buffer*/ + if(NULL != internal_buffer) + { + free(internal_buffer); + } + /*Close DRP-AI Driver*/ + if (0 <= drpai_obj_info.drpai_fd ) + { + errno = 0; + if (PRE_SUCCESS != close(drpai_obj_info.drpai_fd )) + { + std::cerr << "[ERROR] Failed to close DRP-AI Driver : errno=" << errno << std::endl; + } + } +} + +/***************************************** +* Function Name : timedifference_msec +* Description : Function to compute the processing time in mili-seconds +* Arguments : t0 = processing start time +* t1 = processing end time +* Return value : processing time in mili-seconds +******************************************/ +double PreRuntime::timedifference_msec(struct timespec t0, struct timespec t1) +{ + return (t1.tv_sec - t0.tv_sec) * 1000.0 + (t1.tv_nsec - t0.tv_nsec) / 1000.0 / 1000.0; +} + +/***************************************** +* Function Name : ReadAddrmapTxt +* Description : Loads address and size of DRP-AI Object files into struct addr. +* Arguments : addr_file = filename of addressmap file (from DRP-AI Object files) +* Return value : 0 if succeeded +* not 0 otherwise +******************************************/ +uint8_t PreRuntime::ReadAddrmapTxt(std::string addr_file) +{ + std::string str; + unsigned long l_addr = 0; + unsigned long l_size = 0; + std::string element, a, s; + errno = 0; + + std::ifstream ifs(addr_file); + if (ifs.fail()) + { + std::cerr << "[ERROR] Failed to open Address Map List "< config_data; + std::string fname = dir+"/drp_config.mem"; + ret = ReadFileData(&config_data, fname, drpai_obj_info.drpai_address.drp_config_size); + if ( PRE_SUCCESS < ret ) + { + std::cerr << "[ERROR] Failed to read file "<< dir+"/drp_config.mem" << std::endl; + return PRE_ERROR; + } + /*Load weight data to memory using non-dynamic function.*/ + ret = LoadDataToMem(&config_data, drpai_obj_info.drpai_address.drp_config_addr + drpai_obj_info.data_inout.start_address, drpai_obj_info.drpai_address.drp_config_size); + if ( PRE_SUCCESS != ret ) + { + std::cerr << "[ERROR] Failed to write file data "<< dir+"/drp_config.mem" << std::endl; + return PRE_ERROR; + } + + + std::vector aimac_desc_data; + ret = ReadFileData(&aimac_desc_data, dir+"/aimac_desc.bin", drpai_obj_info.drpai_address.desc_aimac_size); + if ( PRE_SUCCESS < ret ) + { + std::cerr << "[ERROR] Failed to read file "<< dir+"/aimac_desc.bin" << std::endl; + return PRE_ERROR; + } + /*Load weight data to memory using non-dynamic function.*/ + ret = LoadDataToMem(&aimac_desc_data, drpai_obj_info.drpai_address.desc_aimac_addr + drpai_obj_info.data_inout.start_address, drpai_obj_info.drpai_address.desc_aimac_size); + if ( PRE_SUCCESS != ret ) + { + std::cerr << "[ERROR] Failed to write file data "<< dir+"/aimac_desc.bin" << std::endl; + return PRE_ERROR; + } + + std::vector drp_desc_data; + ret = ReadFileData(&drp_desc_data, dir+"/drp_desc.bin", drpai_obj_info.drpai_address.desc_drp_size); + if ( PRE_SUCCESS < ret ) + { + std::cerr << "[ERROR] Failed to read file "<< dir+"/drp_desc.bin" << std::endl; + return PRE_ERROR; + } + ret = LoadDataToMem(&drp_desc_data, drpai_obj_info.drpai_address.desc_drp_addr + drpai_obj_info.data_inout.start_address, drpai_obj_info.drpai_address.desc_drp_size); + if ( PRE_SUCCESS != ret ) + { + std::cerr << "[ERROR] Failed to write file data "<< dir+"/drp_desc.bin" << std::endl; + return PRE_ERROR; + } + + + std::vector drp_param_data; + ret = ReadFileData(&drp_param_data, dir+"/drp_param.bin", drpai_obj_info.drpai_address.drp_param_size); + if ( PRE_SUCCESS < ret ) + { + std::cerr << "[ERROR] Failed to read file "<< dir+"/drp_param.bin" << std::endl; + return PRE_ERROR; + } + ret = LoadDataToMem(&drp_param_data, drpai_obj_info.drpai_address.drp_param_addr + drpai_obj_info.data_inout.start_address, drpai_obj_info.drpai_address.drp_param_size); + if ( PRE_SUCCESS != ret ) + { + std::cerr << "[ERROR] Failed to write file data "<< dir+"/drp_param.bin" << std::endl; + return PRE_ERROR; + } + + std::vector weight_data; + ret = ReadFileData(&weight_data, dir+"/weight.bin", drpai_obj_info.drpai_address.weight_size); + if ( PRE_SUCCESS < ret ) + { + std::cerr << "[ERROR] Failed to read file "<< dir+"/weight.bin" << std::endl; + return PRE_ERROR; + } + ret = LoadDataToMem(&weight_data, drpai_obj_info.drpai_address.weight_addr + drpai_obj_info.data_inout.start_address, drpai_obj_info.drpai_address.weight_size); + if ( PRE_SUCCESS != ret ) + { + std::cerr << "[ERROR] Failed to write file data "<< dir+"/weight.bin" << std::endl; + return PRE_ERROR; + } + + std::vector aimac_param_cmd_data; + ret = ReadFileData(&aimac_param_cmd_data, dir+"/aimac_param_cmd.bin", drpai_obj_info.drpai_address.aimac_param_cmd_size); + if ( PRE_SUCCESS < ret ) + { + std::cerr << "[ERROR] Failed to read file "<< dir+"/aimac_param_cmd.bin" << std::endl; + return PRE_ERROR; + } + ret = LoadDataToMem(&aimac_param_cmd_data, drpai_obj_info.drpai_address.aimac_param_cmd_addr + drpai_obj_info.data_inout.start_address, drpai_obj_info.drpai_address.aimac_param_cmd_size); + if ( PRE_SUCCESS != ret ) + { + std::cerr << "[ERROR] Failed to write file data "<< dir+"/aimac_param_cmd.bin" << std::endl; + return PRE_ERROR; + } + + std::vector aimac_param_desc_data; + ret = ReadFileData(&aimac_param_desc_data, dir+"/aimac_param_desc.bin", drpai_obj_info.drpai_address.aimac_param_desc_size); + if ( PRE_SUCCESS < ret ) + { + std::cerr << "[ERROR] Failed to read file "<< dir+"/aimac_param_desc.bin" << std::endl; + return PRE_ERROR; + } + ret = LoadDataToMem(&aimac_param_desc_data, drpai_obj_info.drpai_address.aimac_param_desc_addr + drpai_obj_info.data_inout.start_address, drpai_obj_info.drpai_address.aimac_param_desc_size); + if ( PRE_SUCCESS != ret ) + { + std::cerr << "[ERROR] Failed to write file data "<< dir+"/aimac_param_desc.bin" << std::endl; + return PRE_ERROR; + } + + std::vector aimac_cmd_data; + ret = ReadFileData(&aimac_cmd_data, dir+"/aimac_cmd.bin", drpai_obj_info.drpai_address.aimac_cmd_size); + if ( PRE_SUCCESS < ret ) + { + std::cerr << "[ERROR] Failed to read file "<< dir+"/aimac_cmd.bin" << std::endl; + return PRE_ERROR; + } + ret = LoadDataToMem(&aimac_cmd_data, drpai_obj_info.drpai_address.aimac_cmd_addr + drpai_obj_info.data_inout.start_address, drpai_obj_info.drpai_address.aimac_cmd_size); + if ( PRE_SUCCESS != ret ) + { + std::cerr << "[ERROR] Failed to write file data "<< dir+"/aimac_cmd.bin" << std::endl; + return PRE_ERROR; + } + + return PRE_SUCCESS; +} + +/***************************************** +* Function Name : LoadDataToMem +* Description : Loads a drp_param.bin to memory via DRP-AI Driver +* Arguments : data = filename to be written to memory +* from = memory start address where the data is written +* size = data size to be written +* Return value : 0 if succeeded +* not 0 otherwise +******************************************/ +uint8_t PreRuntime::LoadDataToMem(std::vector *data, unsigned long from, unsigned long size) +{ + int drpai_fd = drpai_obj_info.drpai_fd; + drpai_data_t drpai_data; + uint8_t ret = 0; + + errno = 0; + drpai_data.address = from; + drpai_data.size = size; + ret = ioctl(drpai_fd, DRPAI_ASSIGN, &drpai_data); + if ( -1 == ret ) + { + std::cerr << "[ERROR] Failed to run DRPAI_ASSIGN : errno=" << errno << std::endl; + return PRE_ERROR; + } + ret = write(drpai_fd, data->data(), size); + if ( -1 == ret ) + { + std::cerr << "[ERROR] Failed to write with DRPAI_ASSIGN : errno=" << errno << std::endl; + return PRE_ERROR; + } + + return PRE_SUCCESS; +} + +/***************************************** +* Function Name : ReadFileData +* Description : Loads a drp_param.bin +* Arguments : data = container to store the file contents +* file = filename to be read +* size = data size to be read +* Return value : 0 if succeeded +* not 0 otherwise +******************************************/ +uint8_t PreRuntime::ReadFileData(std::vector *data, std::string file, unsigned long size) +{ + errno = 0; + data->resize(size); + data->clear(); + + std::ifstream ifs(file); + if (!ifs) + { + std::cerr << "[ERROR] Failed to open " << file << ": errno=" << errno << std::endl; + return PRE_ERROR; + } + + /* Store file data to internal vector */ + std::istreambuf_iterator it(ifs); + std::istreambuf_iterator last; + for (; it != last; ++it) + { + data->push_back(*it); + } + /* Check the param_data size is appropriate */ + if (size != data->size()) + { + std::cerr << "[ERROR] Failed to read " << file << ": errno=" << errno << std::endl; + return PRE_ERROR; + } + ifs.close(); + + return PRE_SUCCESS; +} + +/***************************************** +* Function Name : ParseParamInfo +* Description : Loads a drp_param_info.txt. +* Arguments : info_file = filename to be loaded. +* Return value : 0 if succeeded +* not 0 otherwise +******************************************/ +uint8_t PreRuntime::ParseParamInfo(const std::string info_file) +{ + const std::string offset_add = OP_HEAD; + const std::string layer_name = OP_LAYER_NAME; + const std::string drp_lib = OP_LIB; + const std::string param_head = PRAM_HEAD; + const std::string param_value = PARAM_VALUE; + const std::string param_offset = PARAM_OFFSET; + const std::string param_size = PARAM_SIZE; + std::string str = ""; + std::string str_return = ""; + std::string element = ""; + std::string str_value = ""; + s_op_param_t tmp_param; + int drpai_fd = drpai_obj_info.drpai_fd; + drpai_assign_param_t drpai_param; + uint32_t drp_param_info_size; + errno = 0; + + /*Get param info file size*/ + std::ifstream param_file_for_size(info_file, std::ifstream::ate); + drp_param_info_size = static_cast(param_file_for_size.tellg()); + param_file_for_size.close(); + drpai_param.info_size = drp_param_info_size; + drpai_param.obj.address = drpai_obj_info.drpai_address.drp_param_addr + drpai_obj_info.data_inout.start_address; + drpai_param.obj.size = drpai_obj_info.drpai_address.drp_param_size; + + if (0 != ioctl(drpai_fd, DRPAI_ASSIGN_PARAM, &drpai_param)) + { + std::cerr << "[ERROR] Failed to run DRPAI_ASSIGN_PARAM : errno="<< errno << __FILE__ << __LINE__ << std::endl; + return PRE_ERROR; + } + + /* Open param info file */ + std::vector param_info_data; + int ret; + ret = ReadFileData(¶m_info_data, info_file, drp_param_info_size); + if ( PRE_SUCCESS < ret ) + { + std::cerr << "[ERROR] Failed to read file "<< info_file << std::endl; + return PRE_ERROR; + } + uint8_t param_info_array[param_info_data.size()]; + std::copy(param_info_data.begin(),param_info_data.end(),param_info_array); + if ( 0 > write(drpai_fd, param_info_array, drp_param_info_size)) + { + std::cerr << "[ERROR] Failed to write PARAM_INFO to DRP-AI Driver : errno="< addr)|| drpai_mem_addr_end < addr ) + { + std::cerr << "[ERROR] Not in DRP-AI memory area."<< std::endl; + return INVALID_ADDR; + } + if (0 != (addr % 64)) + { + std::cerr << "[ERROR] Not 64-byte aligned."<< std::endl; + return INVALID_ADDR; + } + return addr; +} + +/***************************************** +* Function Name : Load +* Description : Loads PreRuntime Object data. +* Arguments : pre_dir = folder name to be loaded. +* : start_addr = start address that object files are dynamically allocated. +* default value is INVALID_ADDR. +* : mode = pre or post mode. +* default value is MODE_PRE. +* Return value : 0 if succeeded +* not 0 otherwise +******************************************/ +uint8_t PreRuntime::Load(const std::string pre_dir, uint32_t start_addr, uint8_t mode) +{ + return Load(pre_dir, (uint64_t)start_addr); +} + +uint8_t PreRuntime::Load(const std::string pre_dir, uint64_t start_addr) +{ + uint8_t ret = 0; + struct stat statBuf; + std::string tmp_dir = "/"; + std::string dir = pre_dir; + std::string tmp_address_file = dir+"/addr_map.txt"; + const std::string drpai_param_file = dir + "/drp_param_info.txt"; + + /* Delete unnecessary slush */ + if (dir.size() >= tmp_dir.size() && + dir.find(tmp_dir, dir.size() - tmp_dir.size()) != std::string::npos) + { + dir = dir.erase(dir.size()-1); + } + /* Check whether directory exists*/ + if (0 != stat(dir.c_str(),&statBuf)) + { + std::cerr << "[ERROR] Directory " << dir << " not found." << std::endl; + return PRE_ERROR; + } + + /*Check if PreRuntime Object files are generated from PreRuntime Compile Module*/ + if (0 == stat(tmp_address_file.c_str(),&statBuf)) + { + obj_prefix = dir; + } + + /*Define necessary filename*/ + const std::string address_file = dir+"/addr_map.txt"; + + errno = 0; + /*Open DRP-AI Driver*/ + drpai_obj_info.drpai_fd = open("/dev/drpai0", O_RDWR); + if (PRE_SUCCESS > drpai_obj_info.drpai_fd ) + { + std::cerr << "[ERROR] Failed to open DRP-AI Driver : errno=" << errno << std::endl; + return PRE_ERROR; + } + + /* Get DRP-AI Memory Area Address via DRP-AI Driver */ + ret = ioctl(drpai_obj_info.drpai_fd , DRPAI_GET_DRPAI_AREA, &drpai_data0); + if (-1 == ret) + { + std::cerr << "[ERROR] Failed to get DRP-AI Memory Area : errno=" << errno << std::endl; + return PRE_ERROR; + } + + /* Read Address Map List file */ + ret = ReadAddrmapTxt(address_file); + if (PRE_SUCCESS < ret) + { + std::cerr << "[ERROR] Failed to read Address Map List " << address_file << std::endl; + return PRE_ERROR; + } + + if(start_addr == INVALID_ADDR) + { + this->Occupied_size = drpai_obj_info.drpai_address.desc_drp_addr + drpai_obj_info.drpai_address.desc_drp_size; + this->Occupied_size = (Occupied_size + 0xffffff) & 0xff000000; + start_addr = drpai_data0.address + drpai_data0.size - Occupied_size; + } + else + { + this->Occupied_size = drpai_data0.size - (start_addr - drpai_data0.address); + } + + /* Set the DRP virtual start address to 0, indicating the actual 40-bit physical address. */ + errno = 0; + drpai_adrconv_t drpai_adrconv; + this->start_addr_v2h = start_addr; + drpai_adrconv.conv_address = start_addr; + drpai_adrconv.org_address = drpai_obj_info.drpai_address.data_in_addr; //Currently, data_in_addr contained the actual starting address. + drpai_adrconv.size = this->Occupied_size; + drpai_adrconv.mode = DRPAI_ADRCONV_MODE_REPLACE; + if ( PRE_SUCCESS != ioctl(drpai_obj_info.drpai_fd , DRPAI_SET_ADRCONV, &drpai_adrconv)) + { + std::cerr << "[ERROR] Failed to run DRPAI_SET_ADRCONV : errno=" << errno << std::endl; + return PRE_ERROR; + } + + /*Define the start address.*/ + /*drpai_obj_info.drpai_address.data_in_addr maybe 0x00000000. */ + drpai_obj_info.data_inout.start_address = drpai_adrconv.conv_address - drpai_obj_info.drpai_address.data_in_addr; + + /*Parse drp_param_info.txt*/ + ret = ParseParamInfo(drpai_param_file); + if ( PRE_SUCCESS < ret ) + { + std::cerr << "[ERROR] Failed to read param info file: "<< drpai_param_file << std::endl; + return PRE_ERROR; + } + + /*Write binary parameters for drpai.*/ + ret = WritePrerunData(dir); + if ( PRE_SUCCESS != ret ) + { + std::cerr << "[ERROR] Failed to write parameters. "<< std::endl; + return PRE_ERROR; + } + + return PRE_SUCCESS; +} + +/***************************************** +* Function Name : GetResult +* Description : Function to save the DRP-AI output. Uses DRP-AI Driver +* Arguments : output_ptr = pointer to the buffer which stores DRP-AI output +* output_addr = memory address that DRP-AI output is stored. +* output_size = memory size of DRP-AI output +* Return value : 0 if succeeded +* not 0 otherwise +******************************************/ +uint8_t PreRuntime::GetResult(unsigned long output_addr, unsigned long output_size) +{ + int8_t ret = 0; + drpai_data_t drpai_data; + drpai_data.address = output_addr; + drpai_data.size = output_size; + + /*Free internal buffer if its memory is already allocated */ + if(internal_buffer != NULL ) + { + free(internal_buffer); + } + + internal_buffer = (uint32_t*) malloc(drpai_data.size); + + if(internal_buffer == NULL) + { + std::cerr << "[ERROR] Failed to malloc PreRuntime internal buffer." < l2 ? l1 : l2; + float r1 = x1 + w1/2; + float r2 = x2 + w2/2; + float right = r1 < r2 ? r1 : r2; + return right - left; +} + +/***************************************** +* Function Name : box_intersection +* Description : Function to compute the area of intersection of Box a and b +* Arguments : a = Box 1 +* b = Box 2 +* Return value : area of intersection +******************************************/ +float box_intersection(Box a, Box b) +{ + float w = overlap(a.x, a.w, b.x, b.w); + float h = overlap(a.y, a.h, b.y, b.h); + if(w < 0 || h < 0) + { + return 0; + } + float area = w*h; + return area; +} + +/***************************************** +* Function Name : box_union +* Description : Function to compute the area of union of Box a and b +* Arguments : a = Box 1 +* b = Box 2 +* Return value : area of union +******************************************/ +float box_union(Box a, Box b) +{ + float i = box_intersection(a, b); + float u = a.w*a.h + b.w*b.h - i; + return u; +} + +/***************************************** +* Function Name : box_iou +* Description : Function to compute the Intersection over Union (IoU) of Box a and b +* Arguments : a = Box 1 +* b = Box 2 +* Return value : IoU +******************************************/ +float box_iou(Box a, Box b) +{ + return box_intersection(a, b)/box_union(a, b); +} + +/***************************************** +* Function Name : filter_boxes_nms +* Description : Apply Non-Maximum Suppression (NMS) to get rid of overlapped rectangles. +* Arguments : det= detected rectangles +* size = number of detections stored in det +* th_nms = threshold for nms +* Return value : - +******************************************/ +void filter_boxes_nms(std::vector &det, int32_t size, float th_nms) +{ + int32_t count = size; + int32_t i = 0; + int32_t j = 0; + Box a; + Box b; + float b_intersection = 0; + for (i = 0; i < count; i++) + { + a = det[i].bbox; + for (j = 0; j < count; j++) + { + if (i == j) + { + continue; + } + if (det[i].c != det[j].c) + { + continue; + } + b = det[j].bbox; + b_intersection = box_intersection(a, b); + if ((box_iou(a, b)>th_nms) || (b_intersection >= a.h * a.w - 1) || (b_intersection >= b.h * b.w - 1)) + { + if (det[i].prob > det[j].prob) + { + det[j].prob= 0; + } + else + { + det[i].prob= 0; + } + } + } + } + return; +} diff --git a/R01_object_detection/src_v2h/box.h b/R01_object_detection/src_v2h/box.h new file mode 100755 index 0000000..dd5705d --- /dev/null +++ b/R01_object_detection/src_v2h/box.h @@ -0,0 +1,60 @@ +/*********************************************************************************************************************** +* DISCLAIMER +* This software is supplied by Renesas Electronics Corporation and is only intended for use with Renesas products. No +* other uses are authorized. This software is owned by Renesas Electronics Corporation and is protected under all +* applicable laws, including copyright laws. +* THIS SOFTWARE IS PROVIDED "AS IS" AND RENESAS MAKES NO WARRANTIES REGARDING +* THIS SOFTWARE, WHETHER EXPRESS, IMPLIED OR STATUTORY, INCLUDING BUT NOT LIMITED TO WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. ALL SUCH WARRANTIES ARE EXPRESSLY DISCLAIMED. TO THE MAXIMUM +* EXTENT PERMITTED NOT PROHIBITED BY LAW, NEITHER RENESAS ELECTRONICS CORPORATION NOR ANY OF ITS AFFILIATED COMPANIES +* SHALL BE LIABLE FOR ANY DIRECT, INDIRECT, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES FOR ANY REASON RELATED TO THIS +* SOFTWARE, EVEN IF RENESAS OR ITS AFFILIATES HAVE BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. +* Renesas reserves the right, without notice, to make changes to this software and to discontinue the availability of +* this software. By using this software, you agree to the additional terms and conditions found by accessing the +* following link: +* http://www.renesas.com/disclaimer +* +* Copyright (C) 2024 Renesas Electronics Corporation. All rights reserved. +***********************************************************************************************************************/ +/*********************************************************************************************************************** +* File Name : box.h +* Version : v3.00 +* Description : RZ/V2H AI SDK Sample Application for Object Detection +***********************************************************************************************************************/ + +#ifndef BOX_H +#define BOX_H + +#include +#include +#include +#include + +/***************************************** +* Box : Bounding box coordinates and its size +******************************************/ +typedef struct +{ + float x, y, w, h; +} Box; + +/***************************************** +* detection : Detected result +******************************************/ +typedef struct detection +{ + Box bbox; + int32_t c; + float prob; +} detection; + +/***************************************** +* Functions +******************************************/ +float box_iou(Box a, Box b); +float overlap(float x1, float w1, float x2, float w2); +float box_intersection(Box a, Box b); +float box_union(Box a, Box b); +void filter_boxes_nms(std::vector &det, int32_t size, float th_nms); + +#endif diff --git a/R01_object_detection/src_v2h/define.h b/R01_object_detection/src_v2h/define.h new file mode 100755 index 0000000..76712bf --- /dev/null +++ b/R01_object_detection/src_v2h/define.h @@ -0,0 +1,223 @@ +/* + * Original Code (C) Copyright Edgecortix, Inc. 2022 + * Modified Code (C) Copyright Renesas Electronics Corporation 2023 + * + * *1 DRP-AI TVM is powered by EdgeCortix MERA(TM) Compiler Framework. + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + * + */ +/*********************************************************************************************************************** +* DISCLAIMER +* This software is supplied by Renesas Electronics Corporation and is only intended for use with Renesas products. No +* other uses are authorized. This software is owned by Renesas Electronics Corporation and is protected under all +* applicable laws, including copyright laws. +* THIS SOFTWARE IS PROVIDED "AS IS" AND RENESAS MAKES NO WARRANTIES REGARDING +* THIS SOFTWARE, WHETHER EXPRESS, IMPLIED OR STATUTORY, INCLUDING BUT NOT LIMITED TO WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. ALL SUCH WARRANTIES ARE EXPRESSLY DISCLAIMED. TO THE MAXIMUM +* EXTENT PERMITTED NOT PROHIBITED BY LAW, NEITHER RENESAS ELECTRONICS CORPORATION NOR ANY OF ITS AFFILIATED COMPANIES +* SHALL BE LIABLE FOR ANY DIRECT, INDIRECT, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES FOR ANY REASON RELATED TO THIS +* SOFTWARE, EVEN IF RENESAS OR ITS AFFILIATES HAVE BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. +* Renesas reserves the right, without notice, to make changes to this software and to discontinue the availability of +* this software. By using this software, you agree to the additional terms and conditions found by accessing the +* following link: +* http://www.renesas.com/disclaimer +* +* Copyright (C) 2024 Renesas Electronics Corporation. All rights reserved. +***********************************************************************************************************************/ +/*********************************************************************************************************************** +* File Name : define.h +* Version : v3.00 +* Description : RZ/V2H AI SDK Sample Application for Object Detection +***********************************************************************************************************************/ + +#ifndef DEFINE_MACRO_H +#define DEFINE_MACRO_H + +/*Uncomment to display the camera framerate on application window. */ +// #define DISP_CAM_FRAME_RATE +/***************************************** +* includes +******************************************/ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +/*Camera control and GUI control*/ +#include +#include +#ifdef DISP_CAM_FRAME_RATE +#include +#endif + +/***************************************** +* Static Variables for YOLOv3 +* Following variables need to be changed in order to custormize the AI model +* - model_dir = directory name of DRP-AI TVM[*1] Model Object files +******************************************/ +/* Model Binary */ +const static std::string model_dir = "yolov3_onnx"; +/* Pre-processing Runtime Object */ +const static std::string pre_dir = model_dir + "/preprocess"; +/* Anchor box information */ +const static double anchors[] = +{ + 10, 13, + 16, 30, + 33, 23, + 30, 61, + 62, 45, + 59, 119, + 116, 90, + 156, 198, + 373, 326 + +}; +/* Class labels to be classified */ +const static std::string label_list = "coco-labels-2014_2017.txt"; +/* Empty since labels will be loaded from label_list file */ +static std::vector label_file_map = {}; + +/***************************************** +* Macro for YOLOv3 +******************************************/ + +/* Number of class to be detected */ +#define NUM_CLASS (80) +/* Number for [region] layer num parameter */ +#define NUM_BB (3) +#define NUM_INF_OUT_LAYER (3) +/* Thresholds */ +#define TH_PROB (0.5f) +#define TH_NMS (0.5f) +/* Size of input image to the model */ +#define MODEL_IN_W (416) +#define MODEL_IN_H (416) + +/* Number of grids in the image. The length of this array MUST match with the NUM_INF_OUT_LAYER */ +const static uint8_t num_grids[] = { 13, 26, 52 }; +/* Number of DRP-AI output */ +const static uint32_t INF_OUT_SIZE = (NUM_CLASS + 5) * NUM_BB * num_grids[0] * num_grids[0] + + (NUM_CLASS + 5) * NUM_BB * num_grids[1] * num_grids[1] + + (NUM_CLASS + 5) * NUM_BB * num_grids[2] * num_grids[2]; + +/***************************************** +* Macro for Application +******************************************/ +/*Maximum DRP-AI Timeout threshold*/ +#define DRPAI_TIMEOUT (5) + +/*Camera Capture Image Information*/ +#define CAM_IMAGE_WIDTH (640) +#define CAM_IMAGE_HEIGHT (480) +#define CAM_IMAGE_CHANNEL_BGR (3) +/*Camera Capture Information */ +#define INPUT_CAM_NAME "USB Camera" +#define CAPTURE_STABLE_COUNT (8) + +/*DRP-AI Input image information*/ +/*** DRP-AI input is assigned to the buffer having the size of CAM_IMAGE_WIDTH^2 */ +#define DRPAI_IN_WIDTH (CAM_IMAGE_WIDTH) +#define DRPAI_IN_HEIGHT (CAM_IMAGE_WIDTH) + +/*Wayland Display Image Information*/ +#define IMAGE_OUTPUT_WIDTH (1920) +#define IMAGE_OUTPUT_HEIGHT (1080) +#define IMAGE_OUTPUT_CHANNEL_BGRA (4) + +/*DRP-AI Input image information*/ +#define DRPAI_OUT_WIDTH (IMAGE_OUTPUT_WIDTH) +#define DRPAI_OUT_HEIGHT (IMAGE_OUTPUT_HEIGHT) + +/*Image:: Information for drawing on image*/ +#define CHAR_SCALE_LARGE (1.0) +#define CHAR_SCALE_SMALL (0.9) +#define CHAR_SCALE_VERY_SMALL (0.6) +#define CHAR_THICKNESS (2) +#define CHAR_SCALE_BB (0.4) +#define CHAR_THICKNESS_BB (1) +#define LINE_HEIGHT (30) /*in pixel*/ +#define LINE_HEIGHT_OFFSET (20) /*in pixel*/ +#define TEXT_WIDTH_OFFSET (10) /*in pixel*/ +#define WHITE_DATA (0xFFFFFFu) /* in RGB */ +#define BLACK_DATA (0x000000u) /* in RGB */ +#define GREEN_DATA (0x00FF00u) /* in RGB */ +#define RGB_FILTER (0x0000FFu) /* in RGB */ +#define BOX_LINE_SIZE (1) /*in pixel*/ +#define BOX_DOUBLE_LINE_SIZE (1) /*in pixel*/ +#define ALIGHN_LEFT (1) +#define ALIGHN_RIGHT (2) +/*For termination method display*/ +#define TEXT_START_X (1440) + +/* DRP_MAX_FREQ and DRPAI_FREQ are the */ +/* frequency settings for DRP-AI. */ +/* Basically use the default values */ + +#define DRP_MAX_FREQ (2) +/* DRP_MAX_FREQ can be set from 2 to 127 */ +/* 2: 420MHz */ +/* 3: 315MHz */ +/* ... */ +/* 127: 9.84MHz */ +/* Calculation Formula: */ +/* 1260MHz /(DRP_MAX_FREQ + 1) */ + +#define DRPAI_FREQ (2) +/* DRPAI_FREQ can be set from 1 to 127 */ +/* 1,2: 1GHz */ +/* 3: 630MHz */ +/* 4: 420MHz */ +/* 5: 315MHz */ +/* ... */ +/* 127: 10MHz */ +/* Calculation Formula: */ +/* 1260MHz /(DRPAI_FREQ - 1) */ +/* (When DRPAI_FREQ = 3 or more.) */ + +/*Timer Related*/ +#define CAPTURE_TIMEOUT (20) /* seconds */ +#define AI_THREAD_TIMEOUT (20) /* seconds */ +#define IMAGE_THREAD_TIMEOUT (20) /* seconds */ +#define DISPLAY_THREAD_TIMEOUT (20) /* seconds */ +#define KEY_THREAD_TIMEOUT (5) /* seconds */ +#define TIME_COEF (1) +/*Waiting Time*/ +#define WAIT_TIME (1000) /* microseconds */ + +/*Array size*/ +#define SIZE_OF_ARRAY(array) (sizeof(array)/sizeof(array[0])) + +#endif diff --git a/R01_object_detection/src_v2h/dmabuf.cpp b/R01_object_detection/src_v2h/dmabuf.cpp new file mode 100755 index 0000000..845d0d3 --- /dev/null +++ b/R01_object_detection/src_v2h/dmabuf.cpp @@ -0,0 +1,90 @@ +/*********************************************************************************************************************** +* DISCLAIMER +* This software is supplied by Renesas Electronics Corporation and is only intended for use with Renesas products. No +* other uses are authorized. This software is owned by Renesas Electronics Corporation and is protected under all +* applicable laws, including copyright laws. +* THIS SOFTWARE IS PROVIDED "AS IS" AND RENESAS MAKES NO WARRANTIES REGARDING +* THIS SOFTWARE, WHETHER EXPRESS, IMPLIED OR STATUTORY, INCLUDING BUT NOT LIMITED TO WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. ALL SUCH WARRANTIES ARE EXPRESSLY DISCLAIMED. TO THE MAXIMUM +* EXTENT PERMITTED NOT PROHIBITED BY LAW, NEITHER RENESAS ELECTRONICS CORPORATION NOR ANY OF ITS AFFILIATED COMPANIES +* SHALL BE LIABLE FOR ANY DIRECT, INDIRECT, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES FOR ANY REASON RELATED TO THIS +* SOFTWARE, EVEN IF RENESAS OR ITS AFFILIATES HAVE BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. +* Renesas reserves the right, without notice, to make changes to this software and to discontinue the availability of +* this software. By using this software, you agree to the additional terms and conditions found by accessing the +* following link: +* http://www.renesas.com/disclaimer +* +* Copyright (C) 2024 Renesas Electronics Corporation. All rights reserved. +***********************************************************************************************************************/ +/*********************************************************************************************************************** +* File Name : dmabuf.cpp +* Version : v3.00 +* Description : RZ/V2H AI SDK Sample Application for Object Detection +***********************************************************************************************************************/ + +/***************************************** +* Includes +******************************************/ +#include "dmabuf.h" + +/***************************************** +* Function Name : buffer_alloc_dmabuf +* Description : Allocate a DMA buffer in continuous memory area. +* Arguments : buffer = pointer to the dma_buffer struct +* buf_size = size of the allocation +* Return value : 0 if succeeded +* not 0 otherwise +******************************************/ +int8_t buffer_alloc_dmabuf( dma_buffer *buffer, int buf_size) +{ + MMNGR_ID id; + uint32_t phard_addr; + void *puser_virt_addr; + int m_dma_fd; + + buffer->size = buf_size; + mmngr_alloc_in_user_ext(&id, buffer->size, &phard_addr, &puser_virt_addr, MMNGR_VA_SUPPORT_CACHED, NULL); + memset((void*)puser_virt_addr, 0, buffer->size); + buffer->idx = id; + buffer->mem = (void *)puser_virt_addr; + buffer->phy_addr = phard_addr; + if (!buffer->mem) + { + return -1; + } + + mmngr_export_start_in_user_ext(&id, buffer->size, phard_addr, &m_dma_fd, NULL); + buffer->dbuf_fd = m_dma_fd; + return 0; +} + +/***************************************** +* Function Name : buffer_free_dmabuf +* Description : free a DMA buffer in continuous memory area. +* MUST be called if buffer_alloc_dmabuf is called. +* Arguments : buffer = pointer to the dma_buffer struct +* Return value : - +******************************************/ +void buffer_free_dmabuf(dma_buffer *buffer) +{ + mmngr_free_in_user_ext(buffer->idx); + return; +} + +/***************************************** +* Function Name : buffer_flush_dmabuf +* Description : flush a DMA buffer in continuous memory area +* MUST be called when writing data to DMA buffer +* Arguments : idx = id of the buffer to be flushed. +* size = size to be flushed. +* Return value : 0 if succeeded +* not 0 otherwise +******************************************/ +int buffer_flush_dmabuf(uint32_t idx, uint32_t size) +{ + int mm_ret = 0; + + /* Flush capture image area cache */ + mm_ret = mmngr_flush(idx, 0, size); + return mm_ret; +} \ No newline at end of file diff --git a/R01_object_detection/src_v2h/dmabuf.h b/R01_object_detection/src_v2h/dmabuf.h new file mode 100755 index 0000000..cddd683 --- /dev/null +++ b/R01_object_detection/src_v2h/dmabuf.h @@ -0,0 +1,64 @@ +/*********************************************************************************************************************** +* DISCLAIMER +* This software is supplied by Renesas Electronics Corporation and is only intended for use with Renesas products. No +* other uses are authorized. This software is owned by Renesas Electronics Corporation and is protected under all +* applicable laws, including copyright laws. +* THIS SOFTWARE IS PROVIDED "AS IS" AND RENESAS MAKES NO WARRANTIES REGARDING +* THIS SOFTWARE, WHETHER EXPRESS, IMPLIED OR STATUTORY, INCLUDING BUT NOT LIMITED TO WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. ALL SUCH WARRANTIES ARE EXPRESSLY DISCLAIMED. TO THE MAXIMUM +* EXTENT PERMITTED NOT PROHIBITED BY LAW, NEITHER RENESAS ELECTRONICS CORPORATION NOR ANY OF ITS AFFILIATED COMPANIES +* SHALL BE LIABLE FOR ANY DIRECT, INDIRECT, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES FOR ANY REASON RELATED TO THIS +* SOFTWARE, EVEN IF RENESAS OR ITS AFFILIATES HAVE BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. +* Renesas reserves the right, without notice, to make changes to this software and to discontinue the availability of +* this software. By using this software, you agree to the additional terms and conditions found by accessing the +* following link: +* http://www.renesas.com/disclaimer +* +* Copyright (C) 2024 Renesas Electronics Corporation. All rights reserved. +***********************************************************************************************************************/ +/*********************************************************************************************************************** +* File Name : dmabuf.h +* Version : v3.00 +* Description : RZ/V2H AI SDK Sample Application for Object Detection +***********************************************************************************************************************/ + +#ifndef DMABUF_H +#define DMABUF_H + +#include "define.h" + +/* This block of code is only accessible from C code. */ +#ifdef __cplusplus +extern "C" { +#endif +#include "mmngr_user_public.h" +#include "mmngr_buf_user_public.h" +#ifdef __cplusplus +} +#endif + +/***************************************** +* dma_buffer : dma buffer itself and its feature +******************************************/ +typedef struct +{ + /* The index of the buffer. */ + uint32_t idx; + /* The file descriptor for the DMA buffer. */ + uint32_t dbuf_fd; + /* The size of the buffer in bytes. */ + uint32_t size; + /* The physical address of DMA buffer. */ + uint32_t phy_addr; + /* The pointer to the memory for the buffer. */ + void *mem; +}dma_buffer; + +/***************************************** +* Functions +******************************************/ +int8_t buffer_alloc_dmabuf(dma_buffer *buffer,int buf_size); +int buffer_flush_dmabuf(uint32_t idx, uint32_t size); +void buffer_free_dmabuf(dma_buffer *buffer); + +#endif //DMABUF_H diff --git a/R01_object_detection/src_v2h/image.cpp b/R01_object_detection/src_v2h/image.cpp new file mode 100755 index 0000000..82b6667 --- /dev/null +++ b/R01_object_detection/src_v2h/image.cpp @@ -0,0 +1,297 @@ +/*********************************************************************************************************************** +* DISCLAIMER +* This software is supplied by Renesas Electronics Corporation and is only intended for use with Renesas products. No +* other uses are authorized. This software is owned by Renesas Electronics Corporation and is protected under all +* applicable laws, including copyright laws. +* THIS SOFTWARE IS PROVIDED "AS IS" AND RENESAS MAKES NO WARRANTIES REGARDING +* THIS SOFTWARE, WHETHER EXPRESS, IMPLIED OR STATUTORY, INCLUDING BUT NOT LIMITED TO WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. ALL SUCH WARRANTIES ARE EXPRESSLY DISCLAIMED. TO THE MAXIMUM +* EXTENT PERMITTED NOT PROHIBITED BY LAW, NEITHER RENESAS ELECTRONICS CORPORATION NOR ANY OF ITS AFFILIATED COMPANIES +* SHALL BE LIABLE FOR ANY DIRECT, INDIRECT, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES FOR ANY REASON RELATED TO THIS +* SOFTWARE, EVEN IF RENESAS OR ITS AFFILIATES HAVE BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. +* Renesas reserves the right, without notice, to make changes to this software and to discontinue the availability of +* this software. By using this software, you agree to the additional terms and conditions found by accessing the +* following link: +* http://www.renesas.com/disclaimer +* +* Copyright (C) 2024 Renesas Electronics Corporation. All rights reserved. +***********************************************************************************************************************/ +/*********************************************************************************************************************** +* File Name : image.cpp +* Version : v3.00 +* Description : RZ/V2H AI SDK Sample Application for Object Detection +***********************************************************************************************************************/ + +/***************************************** +* Includes +******************************************/ +#include "image.h" + +Image::Image() +{ + +} + + +Image::~Image() +{ +} + +/***************************************** +* Function Name : get_H +* Description : Function to get the image height +* This function is NOT used currently. +* Arguments : - +* Return value : img_h = current image height +******************************************/ +uint32_t Image::get_H() +{ + return img_h; +} + + +/***************************************** +* Function Name : get_W +* Description : Function to get the image width +* This function is NOT used currently. +* Arguments : - +* Return value : img_w = current image width +******************************************/ +uint32_t Image::get_W() +{ + return img_w; +} + + +/***************************************** +* Function Name : get_C +* Description : Function to set the number of image channel +* This function is NOT used currently. +* Arguments : c = new number of image channel to be set +* Return value : - +******************************************/ +uint32_t Image::get_C() +{ + return img_c; +} + + +/***************************************** +* Function Name : get_mat +* Description : Function to return the cv::Mat +* Arguments : - +* Return value : cv::Mat +******************************************/ +cv::Mat Image::get_mat() +{ + return img_mat; +} + + +/***************************************** +* Function Name : set_mat +* Description : Function to register cv::Mat to Image class +* Arguments : input_mat = input cv::Mat to be registered. +* Return value : - +******************************************/ +void Image::set_mat(const cv::Mat& input_mat) +{ + img_mat = input_mat.clone(); +} + + +/***************************************** +* Function Name : init +* Description : Function to initialize Image class +* Arguments : w = input image width in YUYV +* h = input image height in YUYV +* c = input image channel in YUYV +* ow = output image width in BGRA to be displayed via Wayland +* oh = output image height in BGRA to be displayed via Wayland +* oc = output image channel in BGRA to be displayed via Wayland +* Return value : 0 if succeeded +* not 0 otherwise +******************************************/ +uint8_t Image::init(uint32_t w, uint32_t h, uint32_t c, + uint32_t ow, uint32_t oh, uint32_t oc) +{ + /*Initialize input image information */ + img_w = w; + img_h = h; + img_c = c; + /*Initialize output image information*/ + out_w = ow; + out_h = oh; + out_c = oc; + return 0; +} + +/***************************************** +* Function Name : write_string_rgb +* Description : OpenCV putText() in RGB +* Arguments : str = string to be drawn +* align_type = left-align if 1 and right-align if 2 +* x = bottom left coordinate X of string to be drawn +* y = bottom left coordinate Y of string to be drawn +* scale = scale for letter size +* color = color of bounding box. must be in RGB, e.g. white = 0xFFFFFF +* Return Value : - +******************************************/ +void Image::write_string_rgb(std::string str, uint8_t align_type, uint32_t x, uint32_t y, float scale, uint32_t color) +{ + uint8_t thickness = CHAR_THICKNESS; + /*Extract RGB information*/ + uint8_t r = (color >> 16) & RGB_FILTER; + uint8_t g = (color >> 8) & RGB_FILTER; + uint8_t b = color & RGB_FILTER; + int ptx = 0; + int pty = 0; + /*OpenCV image data is in BGRA */ + cv::Mat bgra_image = img_mat.clone(); + + int baseline = 0; + cv::Size size = cv::getTextSize(str.c_str(), cv::FONT_HERSHEY_SIMPLEX, scale, thickness + 2, &baseline); + if (align_type == 1) + { + ptx = x; + pty = y; + } + else if (align_type == 2) + { + ptx = out_w - (size.width + x); + pty = y; + } + /*Color must be in BGR order*/ + cv::putText(bgra_image, str.c_str(), cv::Point(ptx, pty), cv::FONT_HERSHEY_SIMPLEX, + scale, cv::Scalar(0x00, 0x00, 0x00, 0xFF), thickness + 2); + cv::putText(bgra_image, str.c_str(), cv::Point(ptx, pty), cv::FONT_HERSHEY_SIMPLEX, + scale, cv::Scalar(b, g, r, 0xFF), thickness); + + img_mat = bgra_image.clone(); + +} + +/***************************************** +* Function Name : write_string_rgb_boundingbox +* Description : Draw bounding box with label in RGB +* Arguments : str = string to be drawn +* align_type = left-align if 1 and right-align if 2 +* x_min = top left coordinate X of box to be drawn +* y_min = top left coordinate Y of box to be drawn +* x_max = bottom right coordinate X of box to be drawn +* y_max = bottom right coordinate Y of box to be drawn +* scale = scale for letter size +* color = color of bounding box. must be in RGB, e.g. white = 0xFFFFFF +* str_color = color of letter. must be in RGB, e.g. white = 0xFFFFFF +* Return Value : - +******************************************/ +void Image::write_string_rgb_boundingbox(std::string str, uint8_t align_type, + uint32_t x_min, uint32_t y_min, uint32_t x_max, uint32_t y_max, + float scale, uint32_t color, uint32_t str_color) +{ + uint8_t thickness = CHAR_THICKNESS_BB; + /*Extract RGB information*/ + uint8_t r = (color >> 16) & RGB_FILTER; + uint8_t g = (color >> 8) & RGB_FILTER; + uint8_t b = color & RGB_FILTER; + + uint8_t r_str = (str_color >> 16) & RGB_FILTER; + uint8_t g_str = (str_color >> 8) & RGB_FILTER; + uint8_t b_str = str_color & RGB_FILTER; + + int ptx = 0; + int pty = 0; + /*OpenCV image data is in BGRA */ + cv::Mat bgra_image = img_mat.clone(); + int baseline = 0; + + /*Color must be in BGR order*/ + /*Draw Bounding Box with white double line */ + cv::rectangle(bgra_image, cv::Point(x_min+BOX_LINE_SIZE,y_min+BOX_LINE_SIZE), + cv::Point(x_max-BOX_LINE_SIZE,y_max-BOX_LINE_SIZE), + cv::Scalar(0xFF, 0xFF, 0xFF, 0xFF), BOX_DOUBLE_LINE_SIZE); + cv::rectangle(bgra_image, cv::Point(x_min,y_min), + cv::Point(x_max,y_max), + cv::Scalar(b, g, r, 0xFF), BOX_LINE_SIZE); + + cv::Size size = cv::getTextSize(str.c_str(), cv::FONT_HERSHEY_SIMPLEX, scale, thickness + 2, &baseline); + if (align_type == align_l) + { + ptx = x_min; + pty = y_min; + } + else if (align_type == align_r) + { + ptx = img_w - (size.width + x_min); + pty = y_min; + } + /*Draw label rectangle*/ + cv::rectangle(bgra_image, cv::Point(ptx-BOX_LINE_SIZE+1,pty+size.height+2), + cv::Point(ptx+size.width,pty), cv::Scalar(b, g, r, 0xFF), cv::FILLED); + /*Draw text as bounding box label in BLACK*/ + cv::putText(bgra_image, str.c_str(), cv::Point(ptx, pty+size.height), + cv::FONT_HERSHEY_SIMPLEX, scale, cv::Scalar(b_str, g_str, r_str, 0xFF), thickness); + + img_mat = bgra_image.clone(); +} +/***************************************** +* Function Name : draw_rect +* Description : Draw a rectangle +* Arguments : x = X coordinate of the center of rectangle +* y = Y coordinate of the center of rectangle +* w = width of the rectangle +* h = height of the rectangle +* str = string to label the rectangle +* color = color code for bounding box +* label_color = color code for label text +* Return Value : - +******************************************/ +void Image::draw_rect(int32_t x, int32_t y, int32_t w, int32_t h, const char * str,uint32_t color,uint32_t label_color) +{ + int32_t x_min = x - round(w / 2.); + int32_t y_min = y - round(h / 2.); + int32_t x_max = x + round(w / 2.) - 1; + int32_t y_max = y + round(h / 2.) - 1; + /* Check the bounding box is in the image range */ + x_min = x_min < 1 ? 1 : x_min; + x_max = ((img_w - 2) < x_max) ? (img_w - 2) : x_max; + y_min = y_min < 1 ? 1 : y_min; + y_max = ((img_h - 2) < y_max) ? (img_h - 2) : y_max; + + /* Draw the bounding box and class and probability*/ + write_string_rgb_boundingbox(str,1,x_min, y_min,x_max,y_max,CHAR_SCALE_BB,color, label_color); + + return; +} + + +/***************************************** +* Function Name : convert_size +* Description : Scale up the input data to the intermediate data using OpenCV. +* To convert to the final output size (1280x720), fill the right margin of the +* intermediate data (960x720) with black. +* Arguments : - +* Return value : - +******************************************/ +void Image::convert_size(int in_w, int resize_w, bool is_padding) +{ + if (in_w == resize_w) + { + return; + } + + cv::Mat org_image = img_mat; + cv::Mat resize_image; + + if (is_padding) + { + cv::resize(org_image, resize_image, cv::Size(), 1.0 * out_h / img_h, 1.0 * out_h / img_h); + copyMakeBorder(resize_image, resize_image, 0, 0, 0, out_w - resize_image.cols, cv::BORDER_CONSTANT, cv::Scalar(0, 0, 0, 255)); + img_mat = resize_image; + } + else + { + cv::resize(org_image, resize_image, cv::Size(), 1.0 * resize_w / in_w, 1.0 * resize_w / in_w); + img_mat = resize_image; + } +} diff --git a/R01_object_detection/src_v2h/image.h b/R01_object_detection/src_v2h/image.h new file mode 100755 index 0000000..0560dd7 --- /dev/null +++ b/R01_object_detection/src_v2h/image.h @@ -0,0 +1,68 @@ +/*********************************************************************************************************************** +* DISCLAIMER +* This software is supplied by Renesas Electronics Corporation and is only intended for use with Renesas products. No +* other uses are authorized. This software is owned by Renesas Electronics Corporation and is protected under all +* applicable laws, including copyright laws. +* THIS SOFTWARE IS PROVIDED "AS IS" AND RENESAS MAKES NO WARRANTIES REGARDING +* THIS SOFTWARE, WHETHER EXPRESS, IMPLIED OR STATUTORY, INCLUDING BUT NOT LIMITED TO WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. ALL SUCH WARRANTIES ARE EXPRESSLY DISCLAIMED. TO THE MAXIMUM +* EXTENT PERMITTED NOT PROHIBITED BY LAW, NEITHER RENESAS ELECTRONICS CORPORATION NOR ANY OF ITS AFFILIATED COMPANIES +* SHALL BE LIABLE FOR ANY DIRECT, INDIRECT, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES FOR ANY REASON RELATED TO THIS +* SOFTWARE, EVEN IF RENESAS OR ITS AFFILIATES HAVE BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. +* Renesas reserves the right, without notice, to make changes to this software and to discontinue the availability of +* this software. By using this software, you agree to the additional terms and conditions found by accessing the +* following link: +* http://www.renesas.com/disclaimer +* +* Copyright (C) 2024 Renesas Electronics Corporation. All rights reserved. +***********************************************************************************************************************/ +/*********************************************************************************************************************** +* File Name : image.h +* Version : v3.00 +* Description : RZ/V2H AI SDK Sample Application for Object Detection +***********************************************************************************************************************/ + +#ifndef IMAGE_H +#define IMAGE_H + +#include "define.h" +#include + +class Image +{ + public: + Image(); + ~Image(); + + cv::Mat img_mat; + + void write_string_rgb(std::string str, uint8_t align_type, uint32_t x, uint32_t y, float size, uint32_t color); + void write_string_rgb_boundingbox(std::string str, uint8_t align_type, + uint32_t x_min, uint32_t y_min, uint32_t x_max, uint32_t y_max, + float scale, uint32_t color, uint32_t str_color); + void draw_rect(int32_t x, int32_t y, int32_t w, int32_t h, const char* str,uint32_t color, uint32_t label_color); + + uint32_t get_H(); + uint32_t get_W(); + uint32_t get_C(); + + uint8_t init(uint32_t w, uint32_t h, uint32_t c, uint32_t ow, uint32_t oh, uint32_t oc); + void convert_size(int in_w, int resize_w, bool is_padding); + void set_mat(const cv::Mat& input_mat); + cv::Mat get_mat(); + + private: + /* Input Image (BGR from camera) Information */ + uint32_t img_h; + uint32_t img_w; + uint32_t img_c; + /* Output Image (BGRA for Wayland) Information */ + uint32_t out_h; + uint32_t out_w; + uint32_t out_c; + + uint8_t align_l = ALIGHN_LEFT; + uint8_t align_r = ALIGHN_RIGHT; +}; + +#endif diff --git a/R01_object_detection/src_v2h/main.cpp b/R01_object_detection/src_v2h/main.cpp new file mode 100755 index 0000000..25c6b76 --- /dev/null +++ b/R01_object_detection/src_v2h/main.cpp @@ -0,0 +1,1533 @@ +/* + * Original Code (C) Copyright Edgecortix, Inc. 2022 + * Modified Code (C) Copyright Renesas Electronics Corporation 2023 + * + * *1 DRP-AI TVM is powered by EdgeCortix MERA(TM) Compiler Framework. + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + * + */ +/*********************************************************************************************************************** +* DISCLAIMER +* This software is supplied by Renesas Electronics Corporation and is only intended for use with Renesas products. No +* other uses are authorized. This software is owned by Renesas Electronics Corporation and is protected under all +* applicable laws, including copyright laws. +* THIS SOFTWARE IS PROVIDED "AS IS" AND RENESAS MAKES NO WARRANTIES REGARDING +* THIS SOFTWARE, WHETHER EXPRESS, IMPLIED OR STATUTORY, INCLUDING BUT NOT LIMITED TO WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. ALL SUCH WARRANTIES ARE EXPRESSLY DISCLAIMED. TO THE MAXIMUM +* EXTENT PERMITTED NOT PROHIBITED BY LAW, NEITHER RENESAS ELECTRONICS CORPORATION NOR ANY OF ITS AFFILIATED COMPANIES +* SHALL BE LIABLE FOR ANY DIRECT, INDIRECT, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES FOR ANY REASON RELATED TO THIS +* SOFTWARE, EVEN IF RENESAS OR ITS AFFILIATES HAVE BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. +* Renesas reserves the right, without notice, to make changes to this software and to discontinue the availability of +* this software. By using this software, you agree to the additional terms and conditions found by accessing the +* following link: +* http://www.renesas.com/disclaimer +* +* Copyright (C) 2024 Renesas Electronics Corporation. All rights reserved. +***********************************************************************************************************************/ +/*********************************************************************************************************************** +* File Name : main.cpp +* Version : v3.00 +* Description : RZ/V2H AI SDK Sample Application for Object Detection +***********************************************************************************************************************/ + +/***************************************** +* Includes +******************************************/ +/*DRP-AI TVM[*1] Runtime*/ +#include "MeraDrpRuntimeWrapper.h" +/*Pre-processing Runtime Header*/ +#include "PreRuntime.h" +/*Definition of Macros & other variables*/ +#include "define.h" +/*Image control*/ +#include "image.h" +/*Wayland control*/ +#include "wayland.h" +/*box drawing*/ +#include "box.h" +/*dmabuf for Pre-processing Runtime input data*/ +#include "dmabuf.h" +/*Mutual exclusion*/ +#include + +/***************************************** +* Global Variables +******************************************/ +/*Multithreading*/ +static sem_t terminate_req_sem; +static pthread_t ai_inf_thread; +static pthread_t kbhit_thread; +static pthread_t capture_thread; +static pthread_t img_thread; +static pthread_t hdmi_thread; +static std::mutex mtx; + +/*Flags*/ +static std::atomic inference_start (0); +static std::atomic img_obj_ready (0); +static std::atomic hdmi_obj_ready (0); +/*Global Variables*/ +static float drpai_output_buf[INF_OUT_SIZE]; + +static Image img; +/*Image to be displayed on GUI*/ +cv::Mat input_image; +cv::Mat capture_image; +cv::Mat proc_image; +cv::Mat display_image; + +/*GStreamer pipeline for camera capture*/ +static std::string gstreamer_pipeline = ""; + +/*AI Inference for DRP-AI*/ +/* DRP-AI TVM[*1] Runtime object */ +MeraDrpRuntimeWrapper runtime; +/* Pre-processing Runtime object */ +PreRuntime preruntime; +/*MMNGR buffer for DRP-AI Pre-processing*/ +static dma_buffer *drpai_buf; + +/*Processing Time*/ +static double pre_time = 0; +static double post_time = 0; +static double ai_time = 0; + +#ifdef DISP_CAM_FRAME_RATE +static double cap_fps = 0; +static double proc_time_capture = 0; +static uint32_t array_cap_time[30] = {1000,1000,1000,1000,1000,1000,1000,1000,1000,1000,1000,1000,1000,1000,1000,1000,1000,1000,1000,1000,1000,1000,1000,1000,1000,1000,1000,1000,1000,1000}; +#endif /* DISP_CAM_FRAME_RATE */ + +/*DRP-AI Frequency setting*/ +static int32_t drp_max_freq; +static int32_t drpai_freq; + +static Wayland wayland; +static std::vector det; +static std::vector print_det; + +/***************************************** + * Function Name : query_device_status + * Description : function to check USB/MIPI device is connectod. + * Return value : media_port, media port that device is connectod. + ******************************************/ +std::string query_device_status(std::string device_type) +{ + std::string media_port = ""; + /* Linux command to be executed */ + const char* command = "v4l2-ctl --list-devices"; + /* Open a pipe to the command and execute it */ + errno = 0; + FILE* pipe = popen(command, "r"); + if (!pipe) + { + fprintf(stderr, "[ERROR] Unable to open the pipe.\n", errno); + return media_port; + } + /* Read the command output line by line */ + char buffer[128]; + size_t found; + while (nullptr != fgets(buffer, sizeof(buffer), pipe)) + { + std::string response = std::string(buffer); + found = response.find(device_type); + if (std::string::npos != found) + { + fgets(buffer, sizeof(buffer), pipe); + media_port = std::string(buffer); + pclose(pipe); + /* return media port*/ + return media_port; + } + } + pclose(pipe); + /* return media port*/ + return media_port; +} + +/***************************************** +* Function Name : float16_to_float32 +* Description : Function by Edgecortex. Cast uint16_t a into float value. +* Arguments : a = uint16_t number +* Return value : float = float32 number +******************************************/ +float float16_to_float32(uint16_t a) +{ + return __extendXfYf2__(a); +} + +/***************************************** +* Function Name : timedifference_msec +* Description : compute the time differences in ms between two moments +* Arguments : t0 = start time +* t1 = stop time +* Return value : the time difference in ms +******************************************/ +static double timedifference_msec(struct timespec t0, struct timespec t1) +{ + return (t1.tv_sec - t0.tv_sec) * 1000.0 + (t1.tv_nsec - t0.tv_nsec) / 1000000.0; +} + +/***************************************** +* Function Name : wait_join +* Description : waits for a fixed amount of time for the thread to exit +* Arguments : p_join_thread = thread that the function waits for to Exit +* join_time = the timeout time for the thread for exiting +* Return value : 0 if successful +* not 0 otherwise +******************************************/ +static int8_t wait_join(pthread_t *p_join_thread, uint32_t join_time) +{ + int8_t ret_err; + struct timespec join_timeout; + ret_err = clock_gettime(CLOCK_REALTIME, &join_timeout); + if ( 0 == ret_err ) + { + join_timeout.tv_sec += join_time; + ret_err = pthread_timedjoin_np(*p_join_thread, NULL, &join_timeout); + } + return ret_err; +} + +/***************************************** +* Function Name : load_label_file +* Description : Load label list text file and return the label list that contains the label. +* Arguments : label_file_name = filename of label list. must be in txt format +* Return value : std::vector list = list contains labels +* empty if error occured +******************************************/ +std::vector load_label_file(std::string label_file_name) +{ + std::vector list = {}; + std::vector empty = {}; + std::ifstream infile(label_file_name); + + if (!infile.is_open()) + { + return list; + } + + std::string line = ""; + while (getline(infile,line)) + { + list.push_back(line); + if (infile.fail()) + { + return empty; + } + } + + return list; +} + +/***************************************** +* Function Name : get_result +* Description : Get DRP-AI Output from memory via DRP-AI Driver +* Arguments : - +* Return value : 0 if succeeded +* not 0 otherwise +******************************************/ +int8_t get_result() +{ + int8_t ret = 0; + int32_t i = 0; + int32_t output_num = 0; + std::tuple output_buffer; + int64_t output_size; + uint32_t size_count = 0; + /* Get the number of output of the target model. */ + output_num = runtime.GetNumOutput(); + size_count = 0; + /*GetOutput loop*/ + for (i = 0;i(output_buffer). */ + output_size = std::get<2>(output_buffer); + + /*Output Data Type = std::get<0>(output_buffer)*/ + if (InOutDataType::FLOAT16 == std::get<0>(output_buffer)) + { + /*Output Data = std::get<1>(output_buffer)*/ + uint16_t* data_ptr = reinterpret_cast(std::get<1>(output_buffer)); + for (int j = 0; j(output_buffer)) + { + /*Output Data = std::get<1>(output_buffer)*/ + float* data_ptr = reinterpret_cast(std::get<1>(output_buffer)); + for (int j = 0; j det_buff; + + /* Following variables are required for correct_yolo_boxes in Darknet implementation*/ + /* Note: This implementation refers to the "darknet detector test" */ + float new_w, new_h; + float correct_w = 1.; + float correct_h = 1.; + if ((float) (MODEL_IN_W / correct_w) < (float) (MODEL_IN_H/correct_h) ) + { + new_w = (float) MODEL_IN_W; + new_h = correct_h * MODEL_IN_W / correct_w; + } + else + { + new_w = correct_w * MODEL_IN_H / correct_h; + new_h = MODEL_IN_H; + } + int32_t n = 0; + int32_t b = 0; + int32_t y = 0; + int32_t x = 0; + int32_t offs = 0; + int32_t i = 0; + float tx = 0; + float ty = 0; + float tw = 0; + float th = 0; + float tc = 0; + float center_x = 0; + float center_y = 0; + float box_w = 0; + float box_h = 0; + float objectness = 0; + uint8_t num_grid = 0; + uint8_t anchor_offset = 0; + float classes[NUM_CLASS]; + float max_pred = 0; + int32_t pred_class = -1; + float probability = 0; + detection d; + /*Post Processing Start*/ + for (n = 0; n < NUM_INF_OUT_LAYER; n++) + { + num_grid = num_grids[n]; + anchor_offset = 2 * NUM_BB * (NUM_INF_OUT_LAYER - (n + 1)); + + for(b = 0; b < NUM_BB; b++) + { + for(y = 0; y < num_grid; y++) + { + for(x = 0; x < num_grid; x++) + { + offs = yolo_offset(n, b, y, x); + tc = floatarr[yolo_index(n, offs, 4)]; + tx = floatarr[offs]; + ty = floatarr[yolo_index(n, offs, 1)]; + tw = floatarr[yolo_index(n, offs, 2)]; + th = floatarr[yolo_index(n, offs, 3)]; + /* Compute the bounding box */ + /*get_yolo_box*/ + center_x = ((float) x + sigmoid(tx)) / (float) num_grid; + center_y = ((float) y + sigmoid(ty)) / (float) num_grid; + box_w = (float) exp(tw) * anchors[anchor_offset+2*b+0] / (float) MODEL_IN_W; + box_h = (float) exp(th) * anchors[anchor_offset+2*b+1] / (float) MODEL_IN_W; + /* Adjustment for VGA size */ + /* correct_yolo_boxes */ + center_x = (center_x - (MODEL_IN_W - new_w) / 2. / MODEL_IN_W) / ((float) new_w / MODEL_IN_W); + center_y = (center_y - (MODEL_IN_H - new_h) / 2. / MODEL_IN_H) / ((float) new_h / MODEL_IN_H); + box_w *= (float) (MODEL_IN_W / new_w); + box_h *= (float) (MODEL_IN_H / new_h); + center_x = round(center_x * DRPAI_IN_WIDTH); + center_y = round(center_y * DRPAI_IN_HEIGHT); + box_w = round(box_w * DRPAI_IN_WIDTH); + box_h = round(box_h * DRPAI_IN_HEIGHT); + objectness = sigmoid(tc); + Box bb = {center_x, center_y, box_w, box_h}; + /* Get the class prediction */ + for (i = 0; i < NUM_CLASS; i++) + { + classes[i] = sigmoid(floatarr[yolo_index(n, offs, 5+i)]); + } + max_pred = 0; + pred_class = -1; + for (i = 0; i < NUM_CLASS; i++) + { + if (classes[i] > max_pred) + { + pred_class = i; + max_pred = classes[i]; + } + } + /* Store the result into the list if the probability is more than the threshold */ + probability = max_pred * objectness; + if (probability > TH_PROB) + { + d = {bb, pred_class, probability}; + det_buff.push_back(d); + } + } + } + } + } + /* Non-Maximum Supression filter */ + filter_boxes_nms(det_buff, det_buff.size(), TH_NMS); + + mtx.lock(); + det.clear(); + copy(det_buff.begin(), det_buff.end(), back_inserter(det)); + mtx.unlock(); + return; +} + +/***************************************** +* Function Name : draw_bounding_box +* Description : Draw bounding box on image. +* Must be called before resizing the display image. +* Arguments : - +* Return value : 0 if succeeded +* not 0 otherwise +******************************************/ +void draw_bounding_box(void) +{ + std::vector det_buff; + std::stringstream stream; + std::string result_str; + int32_t i = 0; + uint32_t color = GREEN_DATA; + uint32_t label_color = BLACK_DATA; + + mtx.lock(); + copy(det.begin(), det.end(), back_inserter(det_buff)); + mtx.unlock(); + + print_det.clear(); + /* Draw bounding box on RGB image. */ + for (i = 0; i < det_buff.size(); i++) + { + /* Skip the overlapped bounding boxes */ + if (det_buff[i].prob == 0) continue; + print_det.push_back(det_buff[i]); + /* Clear string stream for bounding box labels */ + stream.str(""); + /* Draw the bounding box on the image */ + stream << std::fixed << std::setprecision(2) << det_buff[i].prob; + result_str = label_file_map[det_buff[i].c]+ " "+ stream.str(); + img.draw_rect((int)det_buff[i].bbox.x, (int)det_buff[i].bbox.y, + (int)det_buff[i].bbox.w, (int)det_buff[i].bbox.h, + result_str.c_str(), color, label_color); + } + return; +} + +/***************************************** +* Function Name : print_result +* Description : print the result on display. +* Arguments : - +* Return value : 0 if succeeded +* not 0 otherwise +******************************************/ +int8_t print_result(Image* img) +{ + std::stringstream stream; + std::string str = ""; + uint32_t total_time = ai_time + pre_time + post_time; + uint8_t str_count = 1; + uint8_t time_width = 5; + uint8_t time_precision = 1; + uint8_t result_width = 5; + uint8_t result_precision = 1; + uint32_t i = 0; + + /* Draw Total Time Result on BGR image.*/ + stream.str(""); + stream << "Total AI Time [ms]: " << std::setw(time_width)<< std::setfill(' ') << std::fixed << std::setprecision(time_precision) << std::round(total_time * 10) / 10; + str = stream.str(); + img->write_string_rgb(str, ALIGHN_LEFT, TEXT_START_X + TEXT_WIDTH_OFFSET, LINE_HEIGHT_OFFSET + (LINE_HEIGHT * str_count++), CHAR_SCALE_LARGE, WHITE_DATA); + + /* Draw Inference Time on BGR image.*/ + stream.str(""); + stream << " Inference : " << std::setw(time_width) << std::fixed << std::setprecision(time_precision) << std::round(ai_time * 10) / 10; + str = stream.str(); + img->write_string_rgb(str, ALIGHN_LEFT, TEXT_START_X + TEXT_WIDTH_OFFSET, LINE_HEIGHT_OFFSET + (LINE_HEIGHT * str_count++), CHAR_SCALE_SMALL, WHITE_DATA); + + /* Draw PreProcess Time on BGR image.*/ + stream.str(""); + stream << " PreProcess : " << std::setw(time_width) << std::fixed << std::setprecision(time_precision) << std::round(pre_time * 10) / 10; + str = stream.str(); + img->write_string_rgb(str, ALIGHN_LEFT, TEXT_START_X + TEXT_WIDTH_OFFSET, LINE_HEIGHT_OFFSET + (LINE_HEIGHT * str_count++), CHAR_SCALE_SMALL, WHITE_DATA); + + /* Draw PostProcess Time on BGR image.*/ + stream.str(""); + stream << " PostProcess : " << std::setw(time_width) << std::fixed << std::setprecision(time_precision) << std::round(post_time * 10) / 10; + str = stream.str(); + img->write_string_rgb(str, ALIGHN_LEFT, TEXT_START_X + TEXT_WIDTH_OFFSET, LINE_HEIGHT_OFFSET + (LINE_HEIGHT * str_count++), CHAR_SCALE_SMALL, WHITE_DATA); + + /* Insert empty lines.*/ + str_count++; +#ifdef DISP_CAM_FRAME_RATE + /* Draw Camera Frame Rate on BGR image.*/ + uint8_t framerate_width = 3; + stream.str(""); + stream << "Camera Frame Rate [fps]: " << std::setw(framerate_width) << (uint32_t)cap_fps; + str = stream.str(); + img->write_string_rgb(str, ALIGHN_LEFT, TEXT_START_X + TEXT_WIDTH_OFFSET, LINE_HEIGHT_OFFSET + (LINE_HEIGHT * str_count++), CHAR_SCALE_SMALL, WHITE_DATA); +#endif /* DISP_CAM_FRAME_RATE */ + /* Insert empty lines.*/ + str_count++; + + /* Draw the detected results*/ + for (i = 0; i < print_det.size(); i++) + { + stream.str(""); + stream << label_file_map[print_det[i].c].c_str() << " " << std::setw(result_width) << std::fixed << std::setprecision(result_precision) << round(print_det[i].prob*100) << "%"; + str = stream.str(); + img->write_string_rgb(str, ALIGHN_LEFT, TEXT_START_X + TEXT_WIDTH_OFFSET, LINE_HEIGHT_OFFSET + (LINE_HEIGHT * str_count++), CHAR_SCALE_SMALL, WHITE_DATA); + } + + /* Draw the termination method at the bottom.*/ + stream.str(""); + stream << "To terminate the application,"; + str = stream.str(); + img->write_string_rgb(str, ALIGHN_LEFT, TEXT_START_X + TEXT_WIDTH_OFFSET, + IMAGE_OUTPUT_HEIGHT - LINE_HEIGHT*3, CHAR_SCALE_VERY_SMALL, WHITE_DATA); + stream.str(""); + stream << "press [Super]+[Tab] and press ENTER key."; + str = stream.str(); + img->write_string_rgb(str, ALIGHN_LEFT, TEXT_START_X + TEXT_WIDTH_OFFSET, + IMAGE_OUTPUT_HEIGHT - LINE_HEIGHT*2, CHAR_SCALE_VERY_SMALL, WHITE_DATA); + return 0; +} + +/***************************************** +* Function Name : R_Inf_Thread +* Description : Executes the DRP-AI inference thread +* Arguments : threadid = thread identification +* Return value : - +******************************************/ +void *R_Inf_Thread(void *threadid) +{ + /*Semaphore Variable*/ + int32_t inf_sem_check = 0; + + /*Variable for getting Inference output data*/ + void* output_ptr; + uint32_t out_size; + + /*Variable for Pre-processing parameter configuration*/ + s_preproc_param_t in_param; + + /*Inference Variables*/ + fd_set rfds; + struct timespec tv; + int8_t inf_status = 0; + + /*Variable for checking return value*/ + int8_t ret = 0; + /*Variable for Performance Measurement*/ + static struct timespec start_time; + static struct timespec inf_end_time; + static struct timespec pre_start_time; + static struct timespec pre_end_time; + static struct timespec post_start_time; + static struct timespec post_end_time; + + printf("Inference Thread Starting\n"); + in_param.pre_in_shape_w = DRPAI_IN_WIDTH; + in_param.pre_in_shape_h = DRPAI_IN_HEIGHT; + + printf("Inference Loop Starting\n"); + /*Inference Loop Start*/ + while(1) + { + while(1) + { + /*Gets the Termination request semaphore value. If different then 1 Termination was requested*/ + /*Checks if sem_getvalue is executed wihtout issue*/ + errno = 0; + ret = sem_getvalue(&terminate_req_sem, &inf_sem_check); + if (0 != ret) + { + fprintf(stderr, "[ERROR] Failed to get Semaphore Value: errno=%d\n", errno); + goto err; + } + /*Checks the semaphore value*/ + if (1 != inf_sem_check) + { + goto ai_inf_end; + } + /*Checks if image frame from Capture Thread is ready.*/ + if (inference_start.load()) + { + break; + } + usleep(WAIT_TIME); + } + + /*Gets Pre-process Start time*/ + ret = timespec_get(&pre_start_time, TIME_UTC); + if (0 == ret) + { + fprintf(stderr, "[ERROR] Failed to get Pre-process Start Time\n"); + goto err; + } + + in_param.pre_in_addr = (uintptr_t) drpai_buf->phy_addr; + + ret = preruntime.Pre(&in_param, &output_ptr, &out_size); + if (0 < ret) + { + fprintf(stderr, "[ERROR] Failed to run Pre-processing Runtime Pre()\n"); + goto err; + } + /*Gets AI Pre-process End Time*/ + ret = timespec_get(&pre_end_time, TIME_UTC); + if ( 0 == ret) + { + fprintf(stderr, "[ERROR] Failed to Get Pre-process End Time\n"); + goto err; + } + + /*Set Pre-processing output to be inference input. */ + runtime.SetInput(0, (float*)output_ptr); + + /*Pre-process Time Result*/ + pre_time = (timedifference_msec(pre_start_time, pre_end_time) * TIME_COEF); + + /*Gets inference starting time*/ + ret = timespec_get(&start_time, TIME_UTC); + if (0 == ret) + { + fprintf(stderr, "[ERROR] Failed to get Inference Start Time\n"); + goto err; + } + + runtime.Run(); + + /*Gets AI Inference End Time*/ + ret = timespec_get(&inf_end_time, TIME_UTC); + if ( 0 == ret) + { + fprintf(stderr, "[ERROR] Failed to Get Inference End Time\n"); + goto err; + } + /*Inference Time Result*/ + ai_time = (timedifference_msec(start_time, inf_end_time) * TIME_COEF); + + /*Gets Post-process starting time*/ + ret = timespec_get(&post_start_time, TIME_UTC); + if (0 == ret) + { + fprintf(stderr, "[ERROR] Failed to get Post-process Start Time\n"); + goto err; + } + + inference_start.store(0); + + /*Process to read the DRPAI output data.*/ + ret = get_result(); + if (0 != ret) + { + fprintf(stderr, "[ERROR] Failed to get result from memory.\n"); + goto err; + } + + /*CPU Post-Processing For YOLOv3*/ + R_Post_Proc(drpai_output_buf); + + /*Gets Post-process End Time*/ + ret = timespec_get(&post_end_time, TIME_UTC); + if ( 0 == ret) + { + fprintf(stderr, "[ERROR] Failed to Get Post-process End Time\n"); + goto err; + } + /*Post-process Time Result*/ + + post_time = (timedifference_msec(post_start_time, post_end_time)*TIME_COEF); + } + /*End of Inference Loop*/ + +/*Error Processing*/ +err: + /*Set Termination Request Semaphore to 0*/ + sem_trywait(&terminate_req_sem); + goto ai_inf_end; +/*AI Thread Termination*/ +ai_inf_end: + /*To terminate the loop in Capture Thread.*/ + printf("AI Inference Thread Terminated\n"); + pthread_exit(NULL); +} + +/***************************************** +* Function Name : R_Capture_Thread +* Description : Executes the V4L2 capture with Capture thread. +* Arguments : threadid = thread identification +* Return value : - +******************************************/ +void *R_Capture_Thread(void *threadid) +{ + std::string &gstream = gstreamer_pipeline; + printf("[INFO] GStreamer pipeline: %s\n", gstream.c_str()); + + /*Semaphore Variable*/ + int32_t capture_sem_check = 0; + int8_t ret = 0; + /* Counter to wait for the camera to stabilize */ + uint8_t capture_stabe_cnt = CAPTURE_STABLE_COUNT; + +#ifdef DISP_CAM_FRAME_RATE + int32_t cap_cnt = -1; + static struct timespec capture_time; + static struct timespec capture_time_prev = { .tv_sec = 0, .tv_nsec = 0, }; +#endif /* DISP_CAM_FRAME_RATE */ + + cv::VideoCapture g_cap; + cv::Mat g_frame; + cv::Mat padding_frame(CAM_IMAGE_WIDTH - CAM_IMAGE_HEIGHT, CAM_IMAGE_WIDTH, CV_8UC3); + + printf("Capture Thread Starting\n"); + + g_cap.open(gstream, cv::CAP_GSTREAMER); + if (!g_cap.isOpened()) + { + fprintf(stderr, "[ERROR] Failed to open camera.\n"); + goto err; + } + + while(1) + { + /*Gets the Termination request semaphore value. If different then 1 Termination was requested*/ + /*Checks if sem_getvalue is executed wihtout issue*/ + errno = 0; + ret = sem_getvalue(&terminate_req_sem, &capture_sem_check); + if (0 != ret) + { + fprintf(stderr, "[ERROR] Failed to get Semaphore Value: errno=%d\n", errno); + goto err; + } + /*Checks the semaphore value*/ + if (1 != capture_sem_check) + { + goto capture_end; + } + + /* Capture camera image and stop updating the capture buffer */ + + g_cap.read(g_frame); +#ifdef DISP_CAM_FRAME_RATE + cap_cnt++; + ret = timespec_get(&capture_time, TIME_UTC); + proc_time_capture = (timedifference_msec(capture_time_prev, capture_time) * TIME_COEF); + capture_time_prev = capture_time; + + int idx = cap_cnt % SIZE_OF_ARRAY(array_cap_time); + array_cap_time[idx] = (uint32_t)proc_time_capture; + int arraySum = std::accumulate(array_cap_time, array_cap_time + SIZE_OF_ARRAY(array_cap_time), 0); + double arrayAvg = 1.0 * arraySum / SIZE_OF_ARRAY(array_cap_time); + cap_fps = 1.0 / arrayAvg * 1000.0 + 0.5; +#endif /* DISP_CAM_FRAME_RATE */ + + /* Breaking the loop if no video frame is detected */ + if (g_frame.empty()) + { + fprintf(stderr, "[ERROR] Failed to get capture image.\n"); + goto err; + } + else + { + /* Do not process until the camera stabilizes, because the image is unreliable until the camera stabilizes. */ + if( capture_stabe_cnt > 0 ) + { + capture_stabe_cnt--; + } + else + { + if (!inference_start.load()) + { + /* Copy captured image to Image object. This will be used in Main Thread. */ + mtx.lock(); + /*Image: CAM_IMAGE_WIDTH*CAM_IMAGE_HEIGHT (BGR) */ + input_image = g_frame.clone(); + + /*Add padding for keeping the aspect ratio: CAM_IMAGE_WIDTH*CAM_IMAGE_WIDTH (BGR) */ + cv::vconcat(input_image, padding_frame, input_image); + + /*Copy input data to drpai_buf for DRP-AI Pre-processing Runtime.*/ + memcpy( drpai_buf->mem, input_image.data, drpai_buf->size); + /* Flush buffer */ + ret = buffer_flush_dmabuf(drpai_buf->idx, drpai_buf->size); + if (0 != ret) + { + goto err; + } + mtx.unlock(); + inference_start.store(1); /* Flag for AI Inference Thread. */ + } + + if (!img_obj_ready.load()) + { + mtx.lock(); + capture_image = g_frame.clone(); + img.set_mat(capture_image); + mtx.unlock(); + img_obj_ready.store(1); /* Flag for Img Thread. */ + } + } + } + } /*End of Loop*/ + +/*Error Processing*/ +err: + sem_trywait(&terminate_req_sem); + goto capture_end; + +capture_end: + g_cap.release(); + /*To terminate the loop in AI Inference Thread.*/ + inference_start.store(1); + + printf("Capture Thread Terminated\n"); + pthread_exit(NULL); +} + +/***************************************** +* Function Name : R_Img_Thread +* Description : Executes img proc with img thread +* Arguments : threadid = thread identification +* Return value : - +******************************************/ +void *R_Img_Thread(void *threadid) +{ + /*Semaphore Variable*/ + int32_t hdmi_sem_check = 0; + /*Variable for checking return value*/ + int8_t ret = 0; + + timespec start_time; + timespec end_time; + + /*Check the aspect ratio of camera input and display.*/ + bool display_padding = false; + float camera_ratio = (float) CAM_IMAGE_WIDTH / CAM_IMAGE_HEIGHT; + float display_ratio = (float) IMAGE_OUTPUT_WIDTH / IMAGE_OUTPUT_HEIGHT; + if (camera_ratio != display_ratio) + { + /*If different, set padding on Wayland display*/ + display_padding = true; + } + + printf("Image Thread Starting\n"); + while(1) + { + /*Gets The Termination Request Semaphore Value, If Different Then 1 Termination Is Requested*/ + /*Checks If sem_getvalue Is Executed Without Issue*/ + errno = 0; + ret = sem_getvalue(&terminate_req_sem, &hdmi_sem_check); + if (0 != ret) + { + fprintf(stderr, "[ERROR] Failed to get Semaphore Value: errno=%d\n", errno); + goto err; + } + /*Checks the semaphore value*/ + if (1 != hdmi_sem_check) + { + goto hdmi_end; + } + /* Check img_obj_ready flag which is set in Capture Thread. */ + if (img_obj_ready.load()) + { + /* Draw bounding box on image. */ + draw_bounding_box(); + + /* Convert output image size. */ + img.convert_size(CAM_IMAGE_WIDTH, DRPAI_OUT_WIDTH, display_padding); + + /*Displays AI Inference Results on display.*/ + print_result(&img); + + proc_image = img.get_mat().clone(); + + if (!hdmi_obj_ready.load()) + { + hdmi_obj_ready.store(1); /* Flag for Display Thread. */ + } + img_obj_ready.store(0); + } + usleep(WAIT_TIME); //wait 1 tick time + } /*End Of Loop*/ + +/*Error Processing*/ +err: + /*Set Termination Request Semaphore To 0*/ + sem_trywait(&terminate_req_sem); + goto hdmi_end; + +hdmi_end: + /*To terminate the loop in Capture Thread.*/ + img_obj_ready.store(0); + printf("Img Thread Terminated\n"); + pthread_exit(NULL); +} + +/***************************************** +* Function Name : R_Display_Thread +* Description : Executes the HDMI Display with Display thread +* Arguments : threadid = thread identification +* Return value : - +******************************************/ +void *R_Display_Thread(void *threadid) +{ + /*Semaphore Variable*/ + int32_t hdmi_sem_check = 0; + /*Variable for checking return value*/ + int8_t ret = 0; + /* Initialize waylad */ + ret = wayland.init(IMAGE_OUTPUT_WIDTH, IMAGE_OUTPUT_HEIGHT, IMAGE_OUTPUT_CHANNEL_BGRA); + if(0 != ret) + { + fprintf(stderr, "[ERROR] Failed to initialize Image for Wayland\n"); + goto err; + } + printf("Display Thread Starting\n"); + while(1) + { + /*Gets The Termination Request Semaphore Value, If Different Then 1 Termination Is Requested*/ + /*Checks If sem_getvalue Is Executed Without Issue*/ + errno = 0; + ret = sem_getvalue(&terminate_req_sem, &hdmi_sem_check); + if (0 != ret) + { + fprintf(stderr, "[ERROR] Failed to get Semaphore Value: errno=%d\n", errno); + goto err; + } + /*Checks the semaphore value*/ + if (1 != hdmi_sem_check) + { + goto hdmi_end; + } + /* Check hdmi_obj_ready flag which is set in Capture Thread. */ + if (hdmi_obj_ready.load()) + { + /*Update Wayland*/ + display_image = proc_image; + cv::cvtColor(display_image, display_image, cv::COLOR_BGR2BGRA); + wayland.commit(display_image.data, NULL); + hdmi_obj_ready.store(0); + } + usleep(WAIT_TIME); //wait 1 tick timedg + } /*End Of Loop*/ + +/*Error Processing*/ +err: + /*Set Termination Request Semaphore To 0*/ + sem_trywait(&terminate_req_sem); + goto hdmi_end; + +hdmi_end: + /*To terminate the loop in Capture Thread.*/ + hdmi_obj_ready.store(0); + printf("Display Thread Terminated\n"); + pthread_exit(NULL); +} + + +/***************************************** +* Function Name : R_Kbhit_Thread +* Description : Executes the Keyboard hit thread (checks if enter key is hit) +* Arguments : threadid = thread identification +* Return value : - +******************************************/ +void *R_Kbhit_Thread(void *threadid) +{ + /*Semaphore Variable*/ + int32_t kh_sem_check = 0; + /*Variable to store the getchar() value*/ + int32_t c = 0; + /*Variable for checking return value*/ + int8_t ret = 0; + + printf("Key Hit Thread Starting\n"); + + printf("************************************************\n"); + printf("* Press ENTER key to quit. *\n"); + printf("************************************************\n"); + + /*Set Standard Input to Non Blocking*/ + errno = 0; + ret = fcntl(0, F_SETFL, O_NONBLOCK); + if (-1 == ret) + { + fprintf(stderr, "[ERROR] Failed to run fctnl(): errno=%d\n", errno); + goto err; + } + + while(1) + { + /*Gets the Termination request semaphore value. If different then 1 Termination was requested*/ + /*Checks if sem_getvalue is executed wihtout issue*/ + errno = 0; + ret = sem_getvalue(&terminate_req_sem, &kh_sem_check); + if (0 != ret) + { + fprintf(stderr, "[ERROR] Failed to get Semaphore Value: errno=%d\n", errno); + goto err; + } + /*Checks the semaphore value*/ + if (1 != kh_sem_check) + { + goto key_hit_end; + } + + c = getchar(); + if (EOF != c) + { + /* When key is pressed. */ + printf("[INFO] Key Detected.\n"); + goto err; + } + else + { + /* When nothing is pressed. */ + usleep(WAIT_TIME); + } + } + +/*Error Processing*/ +err: + /*Set Termination Request Semaphore to 0*/ + sem_trywait(&terminate_req_sem); + goto key_hit_end; + +key_hit_end: + printf("Key Hit Thread Terminated\n"); + pthread_exit(NULL); +} + +/***************************************** +* Function Name : R_Main_Process +* Description : Runs the main process loop +* Arguments : - +* Return value : 0 if succeeded +* not 0 otherwise +******************************************/ +int8_t R_Main_Process() +{ + /*Main Process Variables*/ + int8_t main_ret = 0; + /*Semaphore Related*/ + int32_t sem_check = 0; + /*Variable for checking return value*/ + int8_t ret = 0; + + printf("Main Loop Starts\n"); + while(1) + { + /*Gets the Termination request semaphore value. If different then 1 Termination was requested*/ + errno = 0; + ret = sem_getvalue(&terminate_req_sem, &sem_check); + if (0 != ret) + { + fprintf(stderr, "[ERROR] Failed to get Semaphore Value: errno=%d\n", errno); + goto err; + } + /*Checks the semaphore value*/ + if (1 != sem_check) + { + goto main_proc_end; + } + /*Wait for 1 TICK.*/ + usleep(WAIT_TIME); + } + +/*Error Processing*/ +err: + sem_trywait(&terminate_req_sem); + main_ret = 1; + goto main_proc_end; +/*Main Processing Termination*/ +main_proc_end: + printf("Main Process Terminated\n"); + return main_ret; +} + +/***************************************** +* Function Name : get_drpai_start_addr +* Description : Function to get the start address of DRPAImem. +* Arguments : drpai_fd: DRP-AI file descriptor +* Return value : If non-zero, DRP-AI memory start address. +* 0 is failure. +******************************************/ +uint64_t get_drpai_start_addr(int drpai_fd) +{ + int ret = 0; + drpai_data_t drpai_data; + + errno = 0; + + /* Get DRP-AI Memory Area Address via DRP-AI Driver */ + ret = ioctl(drpai_fd , DRPAI_GET_DRPAI_AREA, &drpai_data); + if (-1 == ret) + { + fprintf(stderr, "[ERROR] Failed to get DRP-AI Memory Area : errno=%d\n", errno); + return 0; + } + + return drpai_data.address; +} + +/***************************************** +* Function Name : set_drpai_freq +* Description : Function to set the DRP and DRP-AI frequency. +* Arguments : drpai_fd: DRP-AI file descriptor +* Return value : 0 if succeeded +* not 0 otherwise +******************************************/ +int set_drpai_freq(int drpai_fd) +{ + int ret = 0; + uint32_t data; + + errno = 0; + data = drp_max_freq; + ret = ioctl(drpai_fd , DRPAI_SET_DRP_MAX_FREQ, &data); + if (-1 == ret) + { + fprintf(stderr, "[ERROR] Failed to set DRP Max Frequency : errno=%d\n", errno); + return -1; + } + + errno = 0; + data = drpai_freq; + ret = ioctl(drpai_fd , DRPAI_SET_DRPAI_FREQ, &data); + if (-1 == ret) + { + fprintf(stderr, "[ERROR] Failed to set DRP-AI Frequency : errno=%d\n", errno); + return -1; + } + return 0; +} + +/***************************************** +* Function Name : init_drpai +* Description : Function to initialize DRP-AI. +* Arguments : drpai_fd: DRP-AI file descriptor +* Return value : If non-zero, DRP-AI memory start address. +* 0 is failure. +******************************************/ +uint64_t init_drpai(int drpai_fd) +{ + int ret = 0; + uint64_t drpai_addr = 0; + + /*Get DRP-AI memory start address*/ + drpai_addr = get_drpai_start_addr(drpai_fd); + + if (drpai_addr == 0) + { + return 0; + } + + /*Set DRP-AI frequency*/ + ret = set_drpai_freq(drpai_fd); + if (ret != 0) + { + return 0; + } + + return drpai_addr; +} + +int32_t main(int32_t argc, char * argv[]) +{ + int8_t main_proc = 0; + int8_t ret = 0; + int8_t ret_main = 0; + /*Multithreading Variables*/ + int32_t create_thread_ai = -1; + int32_t create_thread_key = -1; + int32_t create_thread_capture = -1; + int32_t create_thread_img = -1; + int32_t create_thread_hdmi = -1; + int32_t sem_create = -1; + + InOutDataType input_data_type; + bool runtime_status = false; + int drpai_fd; + + std::string media_port = query_device_status("usb"); + gstreamer_pipeline = "v4l2src device=" + media_port +" ! video/x-raw, width="+std::to_string(CAM_IMAGE_WIDTH)+", height="+std::to_string(CAM_IMAGE_HEIGHT)+" ,framerate=30/1 ! videoconvert ! appsink -v"; + + /*Disable OpenCV Accelerator due to the use of multithreading */ + unsigned long OCA_list[16]; + for (int i=0; i < 16; i++) OCA_list[i] = 0; + OCA_Activate( &OCA_list[0] ); + + printf("RZ/V2H AI SDK Sample Application\n"); + printf("Model : Darknet YOLOv3 | %s\n", model_dir.c_str()); + printf("Input : %s\n", INPUT_CAM_NAME); + + /* DRP-AI Frequency Setting */ + /* Usually, users can use default values. */ + if (2 <= argc) + { + drp_max_freq = atoi(argv[1]); + printf("Argument : = %d\n", drp_max_freq); + } + else + { + drp_max_freq = DRP_MAX_FREQ; + } + if (3 <= argc) + { + drpai_freq = atoi(argv[2]); + printf("Argument : = %d\n", drpai_freq); + } + else + { + drpai_freq = DRPAI_FREQ; + } + + uint64_t drpaimem_addr_start = 0; + + /*Load Label from label_list file*/ + label_file_map = load_label_file(label_list); + if (label_file_map.empty()) + { + fprintf(stderr,"[ERROR] Failed to load label file: %s\n", label_list.c_str()); + ret_main = -1; + goto end_main; + } + + /*DRP-AI Driver initialization*/ + errno = 0; + drpai_fd = open("/dev/drpai0", O_RDWR); + if (0 > drpai_fd) + { + fprintf(stderr, "[ERROR] Failed to open DRP-AI Driver : errno=%d\n", errno); + ret_main = -1; + goto end_main; + } + /*Get DRP-AI memory area start address*/ + drpaimem_addr_start = init_drpai(drpai_fd); + if ((uint64_t)NULL == drpaimem_addr_start) + { + fprintf(stderr, "[ERROR] Failed to get DRP-AI memory area start address.\n"); + goto end_close_drpai; + } + + /*Load pre_dir object to DRP-AI */ + ret = preruntime.Load(pre_dir); + if (0 < ret) + { + fprintf(stderr, "[ERROR] Failed to run Pre-processing Runtime Load().\n"); + ret_main = -1; + goto end_close_drpai; + } + + /*Load model_dir for DRP-AI inference */ + runtime_status = runtime.LoadModel(model_dir, drpaimem_addr_start); + + if(!runtime_status) + { + fprintf(stderr, "[ERROR] Failed to load model.\n"); + goto end_close_drpai; + } + + /*Get input data */ + input_data_type = runtime.GetInputDataType(0); + if (InOutDataType::FLOAT32 == input_data_type) + { + /*Do nothing*/ + } + else if (InOutDataType::FLOAT16 == input_data_type) + { + fprintf(stderr, "[ERROR] Input data type : FP16.\n"); + /*If your model input data type is FP16, use std::vector for reading input data. */ + goto end_close_drpai; + } + else + { + fprintf(stderr, "[ERROR] Input data type : neither FP32 nor FP16.\n"); + goto end_close_drpai; + } + + /*Initialize buffer for DRP-AI Pre-processing Runtime. */ + drpai_buf = (dma_buffer*)malloc(sizeof(dma_buffer)); + ret = buffer_alloc_dmabuf(drpai_buf,CAM_IMAGE_WIDTH*CAM_IMAGE_WIDTH*CAM_IMAGE_CHANNEL_BGR); + if (-1 == ret) + { + fprintf(stderr, "[ERROR] Failed to Allocate DMA buffer for the drpai_buf\n"); + goto end_free_malloc; + } + + /*Initialize Image object.*/ + ret = img.init(CAM_IMAGE_WIDTH, CAM_IMAGE_HEIGHT, CAM_IMAGE_CHANNEL_BGR, + IMAGE_OUTPUT_WIDTH, IMAGE_OUTPUT_HEIGHT, IMAGE_OUTPUT_CHANNEL_BGRA); + if (0 != ret) + { + fprintf(stderr, "[ERROR] Failed to initialize Image object.\n"); + ret_main = ret; + goto end_close_dmabuf; + } + + /*Termination Request Semaphore Initialization*/ + /*Initialized value at 1.*/ + sem_create = sem_init(&terminate_req_sem, 0, 1); + if (0 != sem_create) + { + fprintf(stderr, "[ERROR] Failed to Initialize Termination Request Semaphore.\n"); + ret_main = -1; + goto end_threads; + } + + /*Create Key Hit Thread*/ + create_thread_key = pthread_create(&kbhit_thread, NULL, R_Kbhit_Thread, NULL); + if (0 != create_thread_key) + { + fprintf(stderr, "[ERROR] Failed to create Key Hit Thread.\n"); + ret_main = -1; + goto end_threads; + } + /*Create Inference Thread*/ + create_thread_ai = pthread_create(&ai_inf_thread, NULL, R_Inf_Thread, NULL); + if (0 != create_thread_ai) + { + sem_trywait(&terminate_req_sem); + fprintf(stderr, "[ERROR] Failed to create AI Inference Thread.\n"); + ret_main = -1; + goto end_threads; + } + /*Create Capture Thread*/ + create_thread_capture = pthread_create(&capture_thread, NULL, R_Capture_Thread, NULL); + if (0 != create_thread_capture) + { + sem_trywait(&terminate_req_sem); + fprintf(stderr, "[ERROR] Failed to create Capture Thread.\n"); + ret_main = -1; + goto end_threads; + } + /*Create Image Thread*/ + create_thread_img = pthread_create(&img_thread, NULL, R_Img_Thread, NULL); + if(0 != create_thread_img) + { + sem_trywait(&terminate_req_sem); + fprintf(stderr, "[ERROR] Failed to create Image Thread.\n"); + ret_main = -1; + goto end_threads; + } + + /*Create Display Thread*/ + create_thread_hdmi = pthread_create(&hdmi_thread, NULL, R_Display_Thread, NULL); + if(0 != create_thread_hdmi) + { + sem_trywait(&terminate_req_sem); + fprintf(stderr, "[ERROR] Failed to create Display Thread.\n"); + ret_main = -1; + goto end_threads; + } + /*Main Processing*/ + main_proc = R_Main_Process(); + if (0 != main_proc) + { + fprintf(stderr, "[ERROR] Error during Main Process\n"); + ret_main = -1; + } + goto end_threads; + +end_threads: + if (0 == create_thread_hdmi) + { + ret = wait_join(&hdmi_thread, DISPLAY_THREAD_TIMEOUT); + if (0 != ret) + { + fprintf(stderr, "[ERROR] Failed to exit Display Thread on time.\n"); + ret_main = -1; + } + } + if (0 == create_thread_img) + { + ret = wait_join(&img_thread, IMAGE_THREAD_TIMEOUT); + if (0 != ret) + { + fprintf(stderr, "[ERROR] Failed to exit Image Thread on time.\n"); + ret_main = -1; + } + } + if (0 == create_thread_capture) + { + ret = wait_join(&capture_thread, CAPTURE_TIMEOUT); + if (0 != ret) + { + fprintf(stderr, "[ERROR] Failed to exit Capture Thread on time.\n"); + ret_main = -1; + } + } + if (0 == create_thread_ai) + { + ret = wait_join(&ai_inf_thread, AI_THREAD_TIMEOUT); + if (0 != ret) + { + fprintf(stderr, "[ERROR] Failed to exit AI Inference Thread on time.\n"); + ret_main = -1; + } + } + if (0 == create_thread_key) + { + ret = wait_join(&kbhit_thread, KEY_THREAD_TIMEOUT); + if (0 != ret) + { + fprintf(stderr, "[ERROR] Failed to exit Key Hit Thread on time.\n"); + ret_main = -1; + } + } + + /*Delete Terminate Request Semaphore.*/ + if (0 == sem_create) + { + sem_destroy(&terminate_req_sem); + } + /* Exit waylad */ + wayland.exit(); + + goto end_close_dmabuf; + +end_close_dmabuf: + buffer_free_dmabuf(drpai_buf); + goto end_free_malloc; + +end_free_malloc: + free(drpai_buf); + drpai_buf = NULL; + + goto end_close_drpai; + +end_close_drpai: + /*Close DRP-AI Driver.*/ + if (0 < drpai_fd) + { + errno = 0; + ret = close(drpai_fd); + if (0 != ret) + { + fprintf(stderr, "[ERROR] Failed to close DRP-AI Driver: errno=%d\n", errno); + ret_main = -1; + } + } + goto end_main; + +end_main: + printf("Application End\n"); + return ret_main; +} diff --git a/R01_object_detection/src_v2h/toolchain/runtime.cmake b/R01_object_detection/src_v2h/toolchain/runtime.cmake new file mode 100755 index 0000000..0ad3cfc --- /dev/null +++ b/R01_object_detection/src_v2h/toolchain/runtime.cmake @@ -0,0 +1,14 @@ +set(CMAKE_SYSTEM_NAME Linux) +set(CMAKE_SYSTEM_PROCESSOR aarch64) +set(MERA_DRP_RUNTIME ON) +set(DCMAKE_SYSTEM_VERSION 1) + +set(CMAKE_SYSROOT $ENV{SDK}/sysroots/aarch64-poky-linux) +set(CMAKE_FIND_ROOT_PATH $ENV{SDK}/sysroots/aarch64-poky-linux/usr/include/gnu) +set(CMAKE_CXX_COMPILER $ENV{SDK}/sysroots/x86_64-pokysdk-linux/usr/bin/aarch64-poky-linux/aarch64-poky-linux-g++) +set(CMAKE_C_COMPILER $ENV{SDK}/sysroots/x86_64-pokysdk-linux/usr/bin/aarch64-poky-linux/aarch64-poky-linux-gcc) + +set(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER) +set(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY) +set(CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY) +set(CMAKE_FIND_ROOT_PATH_MODE_PACKAGE ONLY) diff --git a/R01_object_detection/src_v2h/wayland.cpp b/R01_object_detection/src_v2h/wayland.cpp new file mode 100755 index 0000000..8e2f514 --- /dev/null +++ b/R01_object_detection/src_v2h/wayland.cpp @@ -0,0 +1,495 @@ +/*********************************************************************************************************************** + * Copyright (C) 2024 Renesas Electronics Corporation. All rights reserved. + ***********************************************************************************************************************/ +/*********************************************************************************************************************** + * File Name : wayland.cpp +* Version : v3.00 +* Description : RZ/V2H AI SDK Sample Application for Object Detection +***********************************************************************************************************************/ + +/***************************************** + * Includes + ******************************************/ +#include "define.h" +#include "wayland.h" +#include +#include +#include +#include +#include +#include +#include +#include + + +struct WaylandGlobals { + struct wl_compositor* compositor; + struct wl_shell* shell; +}; + +/***************************************** + * Function Name : registry_global + * Description : wl_registry_listener callback + * wayland func bind. + * Arguments : data = The third argument of wl_registry_add_listener() is notified. + * regisry = The first argument of wl_registry_add_listener() is notified. + * name = global object ID is notified. + * interface = interface name is notifed. + * version = interface version is notified. + * Return value : - + ******************************************/ +static void registry_global(void *data, + struct wl_registry *registry, uint32_t id, + const char *interface, uint32_t version) +{ + struct WaylandGlobals* globals = (struct WaylandGlobals*)data; + if (strcmp(interface, "wl_compositor") == 0) { + globals->compositor = (struct wl_compositor*)wl_registry_bind(registry, id, &wl_compositor_interface, 1); + } + else if (strcmp(interface, "wl_shell") == 0) { + globals->shell = (struct wl_shell*)wl_registry_bind(registry, id, &wl_shell_interface, 1); + } +} + +/* registry callback for listener */ +static const struct wl_registry_listener registry_listener = { registry_global, NULL }; + +/***************************************** + * Function Name : shell_surface_ping + * Description : wl_shell_surface_listener callback + * compositer check hungup + * Arguments : data = The third argument of wl_shell_surface_add_listener() is notified. + * shell_surface = The first argument of wl_shell_surface_add_listener() is notified. + * serial = Identification ID is notified. + * Return value : - + ******************************************/ +static void shell_surface_ping(void *data, + struct wl_shell_surface *shell_surface, + uint32_t serial) +{ + wl_shell_surface_pong(shell_surface, serial); +} + +static const struct wl_shell_surface_listener shell_surface_listener = +{ + .ping = shell_surface_ping, +}; + +Wayland::Wayland() +{ +} + +Wayland::~Wayland() +{ +} + +/***************************************** + * Function Name : LoadShader + * Description : Return the loaded and compiled shader + * Arguments : type + * shaderSrc + * Return value : 0 if Success + * not 0 otherwise + ******************************************/ +GLuint Wayland::LoadShader(GLenum type, const char* shaderSrc) +{ + GLuint shader = glCreateShader(type); + assert(shader); + + glShaderSource(shader, 1, &shaderSrc, NULL); + glCompileShader(shader); + + GLint compiled; + glGetShaderiv(shader, GL_COMPILE_STATUS, &compiled); + assert(compiled); + + return shader; +} + +/***************************************** + * Function Name : initProgramObject + * Description : Initialize the shaders and return the program object + * Arguments : pShader + * Return value : 0 if Success + * not 0 otherwise + ******************************************/ +GLuint Wayland::initProgramObject(SShader* pShader) +{ + const char* vshader = R"( + attribute vec4 position; + attribute vec2 texcoord; + varying vec2 texcoordVarying; + void main() { + gl_Position = position; + texcoordVarying = texcoord; + } + )"; + + const char* fshader = R"( + precision mediump float; + uniform sampler2D texture; + varying vec2 texcoordVarying; + void main() { + highp float r = texture2D(texture, texcoordVarying).b; + highp float g = texture2D(texture, texcoordVarying).g; + highp float b = texture2D(texture, texcoordVarying).r; + highp float a = texture2D(texture, texcoordVarying).a; + gl_FragColor = vec4(r,g,b,a); + } + + )"; + + GLuint vertexShader = LoadShader(GL_VERTEX_SHADER, vshader); + GLuint fragmentShader = LoadShader(GL_FRAGMENT_SHADER, fshader); + + GLuint programObject = glCreateProgram(); + assert(programObject); + + glAttachShader(programObject, vertexShader); + glAttachShader(programObject, fragmentShader); + + glLinkProgram(programObject); + + GLint linked; + glGetProgramiv(programObject, GL_LINK_STATUS, &linked); + assert(linked); + + glDeleteShader(fragmentShader); + glDeleteShader(vertexShader); + + pShader->unProgram = programObject; + pShader->nAttrPos = glGetAttribLocation(pShader->unProgram, "position"); + pShader->nAttrColor = glGetAttribLocation(pShader->unProgram, "texcoord"); + return programObject; +} + +/***************************************** + * Function Name : initEGLDisplay + * Description : Configure EGL and return necessary resources + * Arguments : nativeDisplay + * nativeWindow + * eglDisplay + * eglSurface + * Return value : 0 if Success + * not 0 otherwise + ******************************************/ +static int8_t initEGLDisplay(EGLNativeDisplayType nativeDisplay, EGLNativeWindowType nativeWindow, EGLDisplay* eglDisplay, EGLSurface* eglSurface) +{ +// int8_t ret = 0; + + EGLint number_of_config; + EGLint config_attribs[] = { + EGL_SURFACE_TYPE, EGL_WINDOW_BIT, + EGL_RED_SIZE, 8, + EGL_GREEN_SIZE, 8, + EGL_BLUE_SIZE, 8, + EGL_ALPHA_SIZE, 8, + EGL_RENDERABLE_TYPE, EGL_OPENGL_ES2_BIT, + EGL_NONE + }; + + static const EGLint context_attribs[] = { + EGL_CONTEXT_CLIENT_VERSION, 2, + EGL_NONE + }; + + *eglDisplay = eglGetDisplay(nativeDisplay); + if (*eglDisplay == EGL_NO_DISPLAY) + { + return -1; + } + + EGLBoolean initialized = eglInitialize(*eglDisplay, NULL, NULL); + if (initialized != EGL_TRUE) + { + return -1; + } + + EGLConfig configs[1]; + + EGLBoolean config = eglChooseConfig(*eglDisplay, config_attribs, configs, 1, &number_of_config); + if (config != EGL_TRUE) + { + return -1; + } + + EGLContext eglContext = eglCreateContext(*eglDisplay, configs[0], EGL_NO_CONTEXT, context_attribs); + + *eglSurface = eglCreateWindowSurface(*eglDisplay, configs[0], nativeWindow, NULL); + if (*eglSurface == EGL_NO_SURFACE) + { + return -1; + } + + EGLBoolean makeCurrent = eglMakeCurrent(*eglDisplay, *eglSurface, *eglSurface, eglContext); + if (makeCurrent != EGL_TRUE) + { + return -1; + } + return 0; +} + + +/***************************************** + * Function Name : initWaylandDisplay + * Description : Connect to the Wayland display and return the display and the surface + * Arguments : wlDisplay + * wlSurface + * Return value : 0 if Success + * not 0 otherwise + ******************************************/ +static int8_t initWaylandDisplay(struct wl_display** wlDisplay, struct wl_surface** wlSurface) +{ +// int8_t ret = 0; + struct WaylandGlobals globals = { 0 }; + + *wlDisplay = wl_display_connect(NULL); + if(*wlDisplay == NULL) + { + return -1; + } + + struct wl_registry* registry = wl_display_get_registry(*wlDisplay); + wl_registry_add_listener(registry, ®istry_listener, (void*)&globals); + + wl_display_dispatch(*wlDisplay); + wl_display_roundtrip(*wlDisplay); + if (globals.compositor == NULL || globals.shell == NULL) + { + return -1; + } + + *wlSurface = wl_compositor_create_surface(globals.compositor); + if (*wlSurface == NULL) + { + return -1; + } + + struct wl_shell_surface* shellSurface = wl_shell_get_shell_surface(globals.shell, *wlSurface); + wl_shell_surface_set_toplevel(shellSurface); + return 0; +} + +/***************************************** + * Function Name : initWindow + * Description : Connect Wayland and make EGL + * Arguments : width + * height + * wlDisplay + * eglDisplay + * eglSurface + * Return value : 0 if Success + * not 0 otherwise + ******************************************/ +static int8_t initWindow(GLint width, GLint height, struct wl_display** wlDisplay, EGLDisplay* eglDisplay, EGLSurface* eglSurface) +{ + int8_t ret = 0; + struct wl_surface* wlSurface; + ret = initWaylandDisplay(wlDisplay, &wlSurface); + if (ret != 0) + { + return -1; + } + + struct wl_egl_window* wlEglWindow = wl_egl_window_create(wlSurface, width, height); + if (wlEglWindow == NULL) + { + return -1; + } + + ret = initEGLDisplay((EGLNativeDisplayType)*wlDisplay, (EGLNativeWindowType)wlEglWindow, eglDisplay, eglSurface); + if (ret != 0) + { + return -1; + } + return 0; +} + +/***************************************** + * Function Name : init + * Description : wayland client init + * create buffer. + * Arguments : w = width + * h = height + * c = color channel + * overlay = flag for alpha blending + * Return value : 0 if Success + * not 0 otherwise + ******************************************/ +uint8_t Wayland::init(uint32_t w, uint32_t h, uint32_t c, bool overlay) +{ + int8_t ret = 0; + img_w = w; + img_h = h; + img_c = c; + img_overlay = overlay; + + // Connect Wayland and make EGL + ret = initWindow(w, h, &display, &eglDisplay, &eglSurface); + if (ret != 0) + { + return -1; + } + + //Initialize the shaders and return the program object + GLuint programObject = initProgramObject(&sShader); + if (programObject == 0) + { + return -1; + } + + // Apply program object + glUseProgram(sShader.unProgram); + glGenTextures(2, textures); + + glEnableVertexAttribArray(sShader.nAttrPos); + glEnableVertexAttribArray(sShader.nAttrColor); + + // enable Alpha Blending + if (img_overlay == true){ + glEnable(GL_BLEND); + glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA); + } + + glUniform1i(glGetUniformLocation(sShader.unProgram, "texture"), 0); + + return 0; +} + +/***************************************** + * Function Name : exit + * Description : Exit Wayland + * Arguments : - + * Return value : 0 if Success + * not 0 otherwise + ******************************************/ +uint8_t Wayland::exit() +{ + SShader* pShader = &sShader; + if (pShader) { + glDeleteProgram(pShader->unProgram); + pShader->unProgram = 0; + pShader->nAttrPos = -1; + pShader->nAttrColor = -1; + } + wl_display_disconnect(display); + return 0; +} + + +/***************************************** + * Function Name : render + * Description : + * Arguments : pShader + * texID + * Return value : 0 if Success + * not 0 otherwise + ******************************************/ +uint8_t Wayland::render(SShader* pShader, GLuint texID) +{ + const float vertices[] = { + -1.0f, 1.0f, 0.0f, + -1.0f, -1.0f, 0.0f, + 1.0f, 1.0f, 0.0f, + 1.0f, -1.0f, 0.0f + }; + + const float texcoords[] = { + 0.0f, 0.0f, + 0.0f, 1.0f, + 1.0f, 0.0f, + 1.0f, 1.0f }; + + + glVertexAttribPointer(pShader->nAttrColor, 2, GL_FLOAT, GL_FALSE, 0, texcoords); + glVertexAttribPointer(pShader->nAttrPos, 3, GL_FLOAT, GL_FALSE, 0, vertices); + + // draw texture + glActiveTexture(GL_TEXTURE0); + glBindTexture(GL_TEXTURE_2D, texID); + //glUniform1i(uniID, texID); + glDrawArrays(GL_TRIANGLE_STRIP, 0, 4); + return 0; +} + + +/***************************************** + * Function Name : setupTexture + * Description : Bind Texture + * Arguments : texID + * src_pixels + * Return value : 0 if Success + * not 0 otherwise + ******************************************/ +uint8_t Wayland::setupTexture(GLuint texID, uint8_t* src_pixels) +{ + glPixelStorei(GL_UNPACK_ALIGNMENT, 1); + glBindTexture(GL_TEXTURE_2D, texID); + glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, img_w, img_h, 0, GL_RGBA, GL_UNSIGNED_BYTE, src_pixels); + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR); + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE); + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE); + return 0; +} + + +/***************************************** + * Function Name : commit + * Description : Commit to update the display image + * Arguments : buf_id = buffer id + * Return value : 0 if Success + * not 0 otherwise + ******************************************/ +uint8_t Wayland::commit(uint8_t* cam_buffer, uint8_t* ol_buffer) +{ + uint8_t ret = 0; +#ifdef DEBUG_TIME_FLG + using namespace std; + chrono::system_clock::time_point start, end; + double time = 0; + start = chrono::system_clock::now(); +#endif // DEBUG_TIME_FLG + + // setup texture + setupTexture(textures[0], cam_buffer); + if (ol_buffer != NULL && img_overlay == true) { + setupTexture(textures[1], ol_buffer); + } +#ifdef DEBUG_TIME_FLG + end = chrono::system_clock::now(); + time = static_cast(chrono::duration_cast(end - start).count() / 1000.0); + printf("Setup Image Time : %lf[ms]\n", time); +#endif // DEBUG_TIME_FLG + + // clear + glClearColor(0.0f, 0.0f, 0.0f, 1.0f); + glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); + +#ifdef DEBUG_TIME_FLG + start = chrono::system_clock::now(); +#endif // DEBUG_TIME_FLG + + // render + render(&sShader, textures[0]); + if (ol_buffer != NULL && img_overlay == true) { + render(&sShader, textures[1]); + } +#ifdef DEBUG_TIME_FLG + end = chrono::system_clock::now(); + time = static_cast(chrono::duration_cast(end - start).count() / 1000.0); + printf("Specifies Render Time : %lf[ms]\n", time); + start = chrono::system_clock::now(); +#endif // DEBUG_TIME_FLG + + eglSwapBuffers(eglDisplay, eglSurface); + +#ifdef DEBUG_TIME_FLG + end = chrono::system_clock::now(); + time = static_cast(chrono::duration_cast(end - start).count() / 1000.0); + printf("Update Frame Time : %lf[ms]\n", time); +#endif // DEBUG_TIME_FLG + + return ret; +} + diff --git a/R01_object_detection/src_v2h/wayland.h b/R01_object_detection/src_v2h/wayland.h new file mode 100755 index 0000000..5115337 --- /dev/null +++ b/R01_object_detection/src_v2h/wayland.h @@ -0,0 +1,60 @@ +/*********************************************************************************************************************** +* Copyright (C) 2024 Renesas Electronics Corporation. All rights reserved. +***********************************************************************************************************************/ +/*********************************************************************************************************************** +* File Name : wayland.h +* Version : v3.00 +* Description : RZ/V2H AI SDK Sample Application for Object Detection +***********************************************************************************************************************/ + +#ifndef WAYLAND_H +#define WAYLAND_H + +#include "define.h" +#include +#include +#include +#include + +class Wayland +{ + /* structure of Shader settings */ + typedef struct _SShader { + GLuint unProgram; + GLint nAttrPos; + GLint nAttrColor; + } SShader; + + public: + Wayland(); + ~Wayland(); + + uint8_t init(uint32_t w, uint32_t h, uint32_t c, bool overlay = false); + uint8_t exit(); + uint8_t commit(uint8_t* cam_buffer, uint8_t* ol_buffer); + + struct wl_compositor *compositor = NULL; + struct wl_shm *shm = NULL; + struct wl_shell *shell = NULL; + private: + uint32_t img_h; + uint32_t img_w; + uint32_t img_c; + bool img_overlay; + + struct wl_display *display = NULL; + struct wl_surface *surface; + struct wl_shell_surface *shell_surface; + struct wl_registry *registry = NULL; + EGLDisplay eglDisplay; + EGLSurface eglSurface; + SShader sShader; + GLuint textures[2]; + + GLuint LoadShader(GLenum type, const char* shaderSrc); + GLuint initProgramObject(SShader* pShader); + uint8_t render(SShader* pShader, GLuint texID); + uint8_t setupTexture(GLuint texID, uint8_t* src_pixels); +}; + +#endif diff --git a/README.md b/README.md old mode 100644 new mode 100755 index 475b367..ed43d5a --- a/README.md +++ b/README.md @@ -1,14 +1,19 @@ -# RZ/V2L AI Applications +# RZ/V AI Applications ## Overview -This repository consists of the various sample AI applications that are implemented on the RZ/V2L evaluation board kit.\ +This repository consists of the various functions of AI applications that are implemented on the RZ/V evaluation board kit.\ Each application is provided in their respective folder alongside the respective `readme.md` file to operate the application. ## Hardware Requirements and Setup -Users need some additional hardware setup for executing the sample application. -Each hardware requirements are specified in the respective `readme.md` of the sample applications. +Users need some additional hardware setup for executing the application. +Each hardware requirements are specified in the respective `readme.md` of the applications. +### Supported board +- [RZ/V2L Evaluation Board Kit](#rzv2l-evaluation-board-kit) +- [RZ/V2H Evaluation Board Kit](#rzv2h-evaluation-board-kit) + +### RZ/V2L Evaluation Board Kit Here it is specified how to connect those hardware equipment to the RZ/V2L Evaluation Board Kit. ###### Lists of hardware equipments required @@ -54,25 +59,27 @@ The number information is present as: #### Additional Requirements -For building the sample applications +For building the applications - Linux Host PC with more than 100 GB free space is required. - Ubuntu 20.04 LTS - OpenCV 4.x - C++11 or higher +### RZ/V2H Evaluation Board Kit +Please refer to [Getting Started](https://renesas-rz.github.io/rzv_ai_sdk/latest/getting_started) for the details of RZ/V2H Evaluation Board Kit. ## Startup Guide -The users are mandate to follow the [startup guide](https://renesas-rz.github.io/rzv_ai_sdk/getting_started.html) provided by the Renesas, before building the sample application. +The users are mandate to follow the [Getting Started](https://renesas-rz.github.io/rzv_ai_sdk/latest/getting_started) provided by the Renesas, before building the application. After completion of the startup guide, users are expected to have -- Completed the bootup procedure of RZ/V2L Evaluation board kit -- `rzv2l_ai_sdk_image` docker container running on the host machine. This docker container will contain RZ/V2L sdk and tvm environment. - >Note: The docker container is required for building the sample applications. Users can skip this, if they want to use the pre-built binaries. +- Completed the bootup procedure of RZ/V Evaluation board kit +- Docker container running on the host machine. This docker container will contain AI SDK and DRP-AI TVM environment. + >Note: The docker container is required for building the applications. Users can skip this, if they want to use the pre-built binaries. - Necessary Hardware equipments -## Sample Application list -The sample applications already developed for RZ/V2L Evaluation Board Kit are listed as: +## Functions of AI Applications +The applications already developed for RZ/V Evaluation Board Kit are listed as: ### 1. Footfall Counter @@ -119,7 +126,7 @@ The [Suspicious Person Detection](./Q10_suspicious_person_detection) application The [Fish Detection](./Q11_fish_detection) application is a software used to automatically detect fishes in real-time camera streams. -## Operating the sample application -- Each sample application folder have their respective `readme.md` file to modify and run the sample application. +## Operating the application +- Each application folder have their respective `readme.md` file to modify and run the application. ## References \ No newline at end of file