From c80d5381b4d900f973c8f8325c4673a5cfc86f53 Mon Sep 17 00:00:00 2001 From: Jarno Ralli Date: Fri, 6 Sep 2024 09:08:09 +0300 Subject: [PATCH] Improvements to the documentation and code quality (#17) * Improvements to the documentation and code quality * Added status badge * Added README.md for Conda directory --- .flake8 | 3 + .github/workflows/pre-commit.yml | 25 ++ .pre-commit-config.yaml | 27 ++ README.md | 31 +- conda/README.md | 39 +++ ...st_pytorch_gpu.yml => gst-pytorch-gpu.yml} | 3 +- deepstream-examples/README.md | 36 ++- .../deepstream-retinaface/retinaface.py | 26 +- .../gst-tracking-parallel.py | 306 +++++++++++------- .../deepstream-tracking/README.md | 2 +- .../deepstream-tracking/gst-tracking-v2.py | 203 ++++++++---- .../deepstream-tracking/gst-tracking.py | 188 +++++++---- .../deepstream-tracking/tracker_config.yml | 2 +- .../gst-triton-tracking-v2.py | 209 +++++++----- .../gst-triton-tracking.py | 196 +++++++---- .../tracker_config.yml | 2 +- .../gst-nvinfer-rect/CMakeLists.txt | 4 +- .../gst-nvinfer-rect/gstnvinfer.cpp | 2 +- .../gstnvinfer_meta_utils.cpp | 4 +- .../gst-nvinfer-rect/CMakeLists.txt | 2 +- .../gst-nvinfer-rect/gstnvinfer.cpp | 6 +- .../gst-nvinfer-rect/CMakeLists.txt | 2 +- .../gst-nvinfer-rect/gstnvinfer.cpp | 6 +- .../nvdsparse_retinaface.cpp | 10 +- deepstream-examples/src/utils/CMakeLists.txt | 2 +- .../src/utils/nvbufsurf_tools.cpp | 2 +- docker/Dockerfile-deepstream | 2 +- docker/Dockerfile-deepstream-6.0.1-devel | 2 +- docker/README.md | 50 +++ gst-examples/README.md | 13 +- gst-examples/gst-pytorch-example-1.1.py | 86 +++-- gst-examples/gst-pytorch-example-1.py | 86 +++-- gst-examples/gst-qtdemux-h264-avdec_aac.py | 50 +-- gst-examples/gst-qtdemux-h264.py | 32 +- hailo-examples/README.md | 4 +- hailo-examples/tracking/README.md | 2 +- helper-package/src/helpers/__init__.py | 2 +- helper-package/src/helpers/gsthelpers.py | 48 ++- 38 files changed, 1140 insertions(+), 575 deletions(-) create mode 100644 .flake8 create mode 100644 .github/workflows/pre-commit.yml create mode 100644 .pre-commit-config.yaml create mode 100644 conda/README.md rename conda/{gst_pytorch_gpu.yml => gst-pytorch-gpu.yml} (90%) diff --git a/.flake8 b/.flake8 new file mode 100644 index 0000000..a754688 --- /dev/null +++ b/.flake8 @@ -0,0 +1,3 @@ +[flake8] +max-line-length = 170 +extend-ignore = E203 diff --git a/.github/workflows/pre-commit.yml b/.github/workflows/pre-commit.yml new file mode 100644 index 0000000..dc6b042 --- /dev/null +++ b/.github/workflows/pre-commit.yml @@ -0,0 +1,25 @@ +name: pre-commit + +on: + pull_request: + branches: [main] + push: + branches: [main] + workflow_dispatch: + +jobs: + pre-commit: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-python@v4 + with: + python-version: '3.9' + - name: install pre-commit + run: | + pip install --upgrade pip + pip install pre-commit jupyter + pre-commit install + - name: run pre-commit hooks + run: | + pre-commit run --color=always --all-files diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 0000000..02eec75 --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,27 @@ +repos: + - repo: local + hooks: + - id: jupyter-nb-clear-output + name: jupyter-nb-clear-output + files: \.ipynb$ + stages: [commit] + language: system + entry: jupyter nbconvert --ClearOutputPreprocessor.enabled=True --inplace + + - repo: https://github.com/ambv/black + rev: 22.12.0 + hooks: + - id: black + language_version: python3 + files: \.py$ + + - repo: https://github.com/pycqa/flake8 + rev: 6.0.0 + hooks: + - id: flake8 + files: \.py$ + + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v4.4.0 + hooks: + - id: trailing-whitespace diff --git a/README.md b/README.md index 7d4268b..73fad11 100644 --- a/README.md +++ b/README.md @@ -1,24 +1,33 @@ +[![pre-commit](https://github.com/JarnoRalli/gstreamer-examples/actions/workflows/pre-commit.yml/badge.svg?branch=main&event=push)](https://github.com/JarnoRalli/gstreamer-examples/actions/workflows/pre-commit.yml) + # GSTREAMER-EXAMPLES -This repository contains both GStreamer and Deepstream related examples in Python. Directories are as follows: +This repository contains examples related to GStreamer, Deepstream and Hailo. Some of the examples are written in Python +and some of them are written in C/C++. + +# 1 Contents + +Directories are as follows: * [helper-package](helper-package/README.md). A package that contains helper functions and classes. * [deepstream-examples](deepstream-examples/README.md). Deepstream related examples. * [hailo-examples](hailo-examples/README.md). Hailo related examples. * [gst-examples](gst-examples/README.md). Gst-examples. * [docker](docker/README.md). Docker files for generating containers. +* [conda](conda/README.md). Conda virtual environments. Paul Bridger has excellent tutorials regarding how to speed up inference. For anyone interested in the subject, -I recommend to take a look at: +I recommend you to take a look at: * https://paulbridger.com/posts/video-analytics-pytorch-pipeline/ * https://paulbridger.com/posts/video-analytics-pipeline-tuning/ -## Helper-Package +# 2 Helper-Package -Helpers is a Python package that contains some helper routines for creating gst-pipelines. Most of the examples, if not all, -use modules from this package, so it needs to be available to Python. Easiest way to make this accessible is to install it as follows. +Helpers is a Python package that contains some helper routines for creating gst-pipelines. Most of the examples, if not all, +use modules from this package, so it needs to be available to Python. The Docker images in the directory [docker](./docker/README.md) install +this package automatically. Easiest way to make this accessible is to install it as follows. -Make sure that you have the latest version of PyPA's build installed: +Make sure that you have the latest version of the `build` package installed using the following command: ```bash python3 -m pip install --upgrade build @@ -31,7 +40,7 @@ cd helper-package python3 -m build ``` -Above command creates a new directory called `dist` where the package can be found. In order to install the created package, +Above command creates a new directory called `dist` where the package can be found. In order to install the created package, run the following command from the `dist` directory: ```bash @@ -40,15 +49,15 @@ pip3 install ./helpers-0.0.1-py3-none-any.whl Replace `helpers-0.0.1-py3-none-any.whl` with the actual name/path of the whl-file that was created. -### Usage +## 2.1 Usage Once you have installed the `helpers` package, you can use is as follows: -```bash -from helpers import * +```python +from helpers import gsthelpers ``` -### Python Packages and Modules +## 2.2 Python Packages and Modules For more information regarding Python packagaging etc., take a look at: diff --git a/conda/README.md b/conda/README.md new file mode 100644 index 0000000..d85a585 --- /dev/null +++ b/conda/README.md @@ -0,0 +1,39 @@ +# Conda Virtual Environments + +If you use conda for managing Python virtual environments, first you need to install either [Miniconda](https://docs.conda.io/projects/miniconda/en/latest/) or [Anaconda](https://docs.anaconda.com/free/anaconda/install/index.html). + +# 1 Libmamba Solver + +Conda's own solver is very slow, so I recommend using `Libmamba`. To use the new solver, first update conda in the base environment (optional step): + +```bash +conda update -n base conda +``` + +Then install and activate `Libmamba` as the solver: + +```bash +conda install -n base conda-libmamba-solver +conda config --set solver libmamba +``` + +# 2 Environments + +Following YAML configuration files for Conda environments are available: + +* [gst-pytorch-gpu.yml](./gst-pytorch-gpu.yml) + * **Environment name:** gst-pytorch-gpu + * **Contains:** python 3.9, pytorch, pytorch-cuda=11.6, gstreamerm, matplotlib, numpy + +You can create a new virtual environment as follows: + +```bash +conda env create -f +``` + +Once the environment has been created, you can activate it by executing the following command: + +```bash +conda activate +``` + diff --git a/conda/gst_pytorch_gpu.yml b/conda/gst-pytorch-gpu.yml similarity index 90% rename from conda/gst_pytorch_gpu.yml rename to conda/gst-pytorch-gpu.yml index ab2a608..c0ac71b 100644 --- a/conda/gst_pytorch_gpu.yml +++ b/conda/gst-pytorch-gpu.yml @@ -1,4 +1,4 @@ -name: gst-pytorch +name: gst-pytorch-gpu channels: - menpo - conda-forge @@ -14,6 +14,7 @@ dependencies: - gst-plugins-base - gst-plugins-good - gst-plugins-bad + - gst-libav - gstreamer - gst-python - pip diff --git a/deepstream-examples/README.md b/deepstream-examples/README.md index 47034fe..2c7f928 100644 --- a/deepstream-examples/README.md +++ b/deepstream-examples/README.md @@ -1,6 +1,11 @@ # DEEPSTREAM EXAMPLES This directory contains DeepStream related examples. Example code, along with configuration files etc., are placed inside sub-directories. +Before running the examples, it is a good idea to refresh the GStreamer plugin cache by running the following: + +```bash +gst-inspect-1.0 +``` --- @@ -76,7 +81,7 @@ Related directories: # 3 Activating Nvidia GPU -Before executing any of the examples, you need to install Nvidia driver. However, some systems have several graphics +Before executing any of the examples, you need to install Nvidia driver. However, some systems have several graphics cards, i.e. you might have both an Nvidia GPU and an Intel integrated graphics controller. You can verify this by running the following command: @@ -113,7 +118,7 @@ sudo prime-select --- -# 3 Running the Examples Using Docker +# 4 Running the Examples Using Docker This is the preferred way to run the tests. Before creating the docker image, you need to install Nvidia's Container Toolkit. Instructions can be found here: @@ -139,7 +144,7 @@ You should see output following (or similar) output: | 32% 38C P0 34W / 151W | 735MiB / 8192MiB | 0% Default | | | | N/A | +-------------------------------+----------------------+----------------------+ - + +-----------------------------------------------------------------------------+ | Processes: | | GPU GI CI PID Type Process name GPU Memory | @@ -149,7 +154,7 @@ You should see output following (or similar) output: ``` -## 3.1 Create the Docker Image +## 4.1 Create the Docker Image After this you can create the docker image used in the examples. @@ -158,7 +163,7 @@ cd gstreamer-examples/docker docker build -t nvidia-deepstream-samples -f ./Dockerfile-deepstream . ``` -## 3.2 Test the Docker Image +## 4.2 Test the Docker Image Some of the examples use GStreamer plugin `nveglglessink` for showing the results in realtime. `nveglglessink` depends on OpenGL, so making sure that OpenGL works inside the container is essential. Make sure that `DISPLAY` @@ -222,7 +227,7 @@ glmark2 A window should pop-up, displaying a horse. -## 3.3 Execute the Examples +## 4.3 Execute the Examples Run the following, from the `gstreamer-examples` directory, in order to start the docker container in interactive mode and run one of the examples: @@ -240,9 +245,12 @@ cd /home/gstreamer-examples/deepstream-examples/deepstream-tracking python3 gst-tracking.py -i /opt/nvidia/deepstream/deepstream-6.1/samples/streams/sample_1080p_h264.mp4 ``` +When starting the Docker container with the above command, the switch `-v $(pwd):/home/gstreamer-examples` maps the local directory `$(pwd)` +to a directory `/home/gstreamer-examples` inside the container. + --- -# 4 Running the Examples Without Docker +# 5 Running the Examples Without Docker If you're not using Docker to run the examples, you need to install DeepStream, and Triton Inference Server if you are planning on executing Triton related examples as well, in the host system. Due to the complexity of Nvidia's libraries, depending on the system your're using, @@ -251,9 +259,9 @@ are for: * Ubuntu 20.04 -## 4.1 Install DeepStream SDK +## 5.1 Install DeepStream SDK -Follow these instructions for installing the DeepStream SDK +Follow these instructions for installing the DeepStream SDK [https://docs.nvidia.com/metropolis/deepstream/dev-guide/text/DS_Quickstart.html](https://docs.nvidia.com/metropolis/deepstream/dev-guide/text/DS_Quickstart.html). After installation, verify that `nvinfer` plug-in can be found @@ -270,7 +278,7 @@ sudo /opt/nvidia/deepstream/deepstream/install.sh And then reboot. -## 4.2 Install DeepStream Python Bindings +## 5.2 Install DeepStream Python Bindings Information regarding DeepStream Python bindings can be found from here [https://github.com/NVIDIA-AI-IOT/deepstream_python_apps](https://github.com/NVIDIA-AI-IOT/deepstream_python_apps). You can download ready to install packages from here [https://github.com/NVIDIA-AI-IOT/deepstream_python_apps/releases](https://github.com/NVIDIA-AI-IOT/deepstream_python_apps/releases). @@ -283,7 +291,7 @@ pip3 install pyds-1.1.4-py3-none-linux_x86_64.whl Replace `pyds-1.1.4-py3-none-linux_x86_64.whl` with the version that you downloaded. -## 4.3 Install Triton Inference Server +## 5.3 Install Triton Inference Server Before executing those examples that use Triton, you first need to install it locally. First install the following package(s): @@ -309,7 +317,7 @@ cd build/install sudo cp -vr ./backends /opt/tritonserver ``` -## 4.4 Set Environment Variables +## 5.4 Set Environment Variables Triton libraries need to be discoverable by the the dynamic library loader: @@ -337,7 +345,7 @@ If it cannot be found, but it is installed, you can add it to path: export PATH=${PATH}:/usr/src/tensorrt/bin/ ``` -## 4.5 Build the Model Repo +## 5.5 Build the Model Repo We will use the models shipped with the DeepStream SDK. However, first make sure that `trtexec` is found: @@ -353,7 +361,7 @@ cd /opt/nvidia/deepstream/deepstream/samples ``` -## 4.6 Testing Triton Installation +## 5.6 Testing Triton Installation Test that the `nvinferenceserver` plugin can be found diff --git a/deepstream-examples/deepstream-retinaface/retinaface.py b/deepstream-examples/deepstream-retinaface/retinaface.py index 2f51861..0c8e5e1 100644 --- a/deepstream-examples/deepstream-retinaface/retinaface.py +++ b/deepstream-examples/deepstream-retinaface/retinaface.py @@ -7,22 +7,10 @@ import sys import gi -import numpy as np import argparse -import contextlib -import time -from functools import partial -gi.require_version('Gst', '1.0') -from gi.repository import Gst - -#@contextlib.contextmanager -#def nvtx_range(msg): -# depth = torch.cuda.nvtx.range_push(msg) -# try: -# yield depth -# finally: -# torch.cuda.nvtx.range_pop() +gi.require_version("Gst", "1.0") +from gi.repository import Gst # noqa: E402 if __name__ == "__main__": @@ -35,7 +23,7 @@ if args.input_file == "": sys.exit("No input file has been given!") - pipeline_definition = f''' + pipeline_definition = f""" filesrc location={args.input_file} ! qtdemux ! queue ! @@ -46,7 +34,7 @@ nvvideoconvert ! nvdsosd ! queue ! - nveglglessink''' + nveglglessink""" print("--- PIPELINE DEFINITION ---") print(pipeline_definition) @@ -57,9 +45,11 @@ try: while True: - msg = pipeline.get_bus().timed_pop_filtered(Gst.SECOND, Gst.MessageType.EOS | Gst.MessageType.ERROR) + msg = pipeline.get_bus().timed_pop_filtered( + Gst.SECOND, Gst.MessageType.EOS | Gst.MessageType.ERROR + ) if msg: - text = msg.get_structure().to_string() if msg.get_structure() else '' + text = msg.get_structure().to_string() if msg.get_structure() else "" msg_type = Gst.message_type_get_name(msg.type) print(f"{msg.src.name}: [{msg.type}] {text}") break diff --git a/deepstream-examples/deepstream-tracking-parallel/gst-tracking-parallel.py b/deepstream-examples/deepstream-tracking-parallel/gst-tracking-parallel.py index d12fe02..d9267e2 100644 --- a/deepstream-examples/deepstream-tracking-parallel/gst-tracking-parallel.py +++ b/deepstream-examples/deepstream-tracking-parallel/gst-tracking-parallel.py @@ -33,12 +33,11 @@ import sys import signal import pyds -from helpers import * - +from helpers import gsthelpers import gi -gi.require_version('Gst', '1.0') -from gi.repository import Gst, GLib, GObject +gi.require_version("Gst", "1.0") +from gi.repository import Gst, GLib # noqa: E402 PGIE_CLASS_ID_VEHICLE = 0 PGIE_CLASS_ID_BICYCLE = 1 @@ -46,8 +45,11 @@ PGIE_CLASS_ID_ROADSIGN = 3 past_tracking_meta = [0] -MetaObject = namedtuple("MetaObject", ["left", "top", "height", "width", "area", "bottom", "id", "text", "class_id"]) -ColorObject = namedtuple('ColorObject', ['red', 'green', 'blue', 'alpha']) +MetaObject = namedtuple( + "MetaObject", + ["left", "top", "height", "width", "area", "bottom", "id", "text", "class_id"], +) +ColorObject = namedtuple("ColorObject", ["red", "green", "blue", "alpha"]) ColorList = { PGIE_CLASS_ID_VEHICLE: ColorObject(red=1.0, green=0.0, blue=0.0, alpha=1.0), @@ -59,7 +61,12 @@ def osd_sink_pad_buffer_probe(pad, info, u_data): frame_number = 0 - obj_counter = {PGIE_CLASS_ID_VEHICLE: 0, PGIE_CLASS_ID_PERSON: 0, PGIE_CLASS_ID_BICYCLE: 0, PGIE_CLASS_ID_ROADSIGN: 0} + obj_counter = { + PGIE_CLASS_ID_VEHICLE: 0, + PGIE_CLASS_ID_PERSON: 0, + PGIE_CLASS_ID_BICYCLE: 0, + PGIE_CLASS_ID_ROADSIGN: 0, + } num_rects = 0 gst_buffer = info.get_buffer() if not gst_buffer: @@ -93,8 +100,10 @@ def osd_sink_pad_buffer_probe(pad, info, u_data): top=obj_meta.tracker_bbox_info.org_bbox_coords.top, height=obj_meta.tracker_bbox_info.org_bbox_coords.height, width=obj_meta.tracker_bbox_info.org_bbox_coords.width, - area=obj_meta.tracker_bbox_info.org_bbox_coords.height * obj_meta.tracker_bbox_info.org_bbox_coords.width, - bottom=obj_meta.tracker_bbox_info.org_bbox_coords.top + obj_meta.tracker_bbox_info.org_bbox_coords.height, + area=obj_meta.tracker_bbox_info.org_bbox_coords.height + * obj_meta.tracker_bbox_info.org_bbox_coords.width, + bottom=obj_meta.tracker_bbox_info.org_bbox_coords.top + + obj_meta.tracker_bbox_info.org_bbox_coords.height, id=obj_meta.object_id, text=f"ID: {obj_meta.object_id:04d}, Class: {pyds.get_string(obj_meta.text_params.display_text)}", class_id=obj_meta.class_id, @@ -149,14 +158,24 @@ def osd_sink_pad_buffer_probe(pad, info, u_data): if x < 0 or y < 0: continue - display_meta.text_params[display_meta.num_labels].display_text = meta_list_sorted[idx].text + display_meta.text_params[ + display_meta.num_labels + ].display_text = meta_list_sorted[idx].text display_meta.text_params[display_meta.num_labels].x_offset = x display_meta.text_params[display_meta.num_labels].y_offset = y - display_meta.text_params[display_meta.num_labels].font_params.font_name = "Serif" - display_meta.text_params[display_meta.num_labels].font_params.font_size = 10 - display_meta.text_params[display_meta.num_labels].font_params.font_color.set(1.0, 1.0, 1.0, 1.0) + display_meta.text_params[ + display_meta.num_labels + ].font_params.font_name = "Serif" + display_meta.text_params[ + display_meta.num_labels + ].font_params.font_size = 10 + display_meta.text_params[ + display_meta.num_labels + ].font_params.font_color.set(1.0, 1.0, 1.0, 1.0) display_meta.text_params[display_meta.num_labels].set_bg_clr = 1 - display_meta.text_params[display_meta.num_labels].text_bg_clr.set(0.45, 0.20, 0.50, 0.75) + display_meta.text_params[display_meta.num_labels].text_bg_clr.set( + 0.45, 0.20, 0.50, 0.75 + ) display_meta.num_labels += 1 display_meta.num_rects = end_idx - start_idx @@ -192,9 +211,15 @@ def osd_sink_pad_buffer_probe(pad, info, u_data): user_meta = pyds.NvDsUserMeta.cast(l_user.data) except StopIteration: break - if user_meta and user_meta.base_meta.meta_type == pyds.NvDsMetaType.NVDS_TRACKER_PAST_FRAME_META: + if ( + user_meta + and user_meta.base_meta.meta_type + == pyds.NvDsMetaType.NVDS_TRACKER_PAST_FRAME_META + ): try: - pPastFrameObjBatch = pyds.NvDsPastFrameObjBatch.cast(user_meta.user_meta_data) + pPastFrameObjBatch = pyds.NvDsPastFrameObjBatch.cast( + user_meta.user_meta_data + ) except StopIteration: break for trackobj in pyds.NvDsPastFrameObjBatch.list(pPastFrameObjBatch): @@ -253,27 +278,51 @@ def __init__(self, dump_dot_file=False): self.stream_muxer_2 = gsthelpers.create_element("nvstreammux", "stream-muxer-2") self.tee = gsthelpers.create_element("tee", "tee") # Processing branch 1 - self.primary_inference_1 = gsthelpers.create_element("nvinfer", "primary-1-inference") - self.secondary1_inference_1 = gsthelpers.create_element("nvinfer", "secondary1-inference-1") - self.secondary2_inference_1 = gsthelpers.create_element("nvinfer", "secondary2-inference-1") - self.secondary3_inference_1 = gsthelpers.create_element("nvinfer", "secondary3-inference-1") + self.primary_inference_1 = gsthelpers.create_element( + "nvinfer", "primary-1-inference" + ) + self.secondary1_inference_1 = gsthelpers.create_element( + "nvinfer", "secondary1-inference-1" + ) + self.secondary2_inference_1 = gsthelpers.create_element( + "nvinfer", "secondary2-inference-1" + ) + self.secondary3_inference_1 = gsthelpers.create_element( + "nvinfer", "secondary3-inference-1" + ) self.tracker_1 = gsthelpers.create_element("nvtracker", "tracker-1") # Processing branch 2 - self.primary_inference_2 = gsthelpers.create_element("nvinfer", "primary-2-inference") - self.secondary1_inference_2 = gsthelpers.create_element("nvinfer", "secondary1-inference-2") - self.secondary2_inference_2 = gsthelpers.create_element("nvinfer", "secondary2-inference-2") - self.secondary3_inference_2 = gsthelpers.create_element("nvinfer", "secondary3-inference-2") + self.primary_inference_2 = gsthelpers.create_element( + "nvinfer", "primary-2-inference" + ) + self.secondary1_inference_2 = gsthelpers.create_element( + "nvinfer", "secondary1-inference-2" + ) + self.secondary2_inference_2 = gsthelpers.create_element( + "nvinfer", "secondary2-inference-2" + ) + self.secondary3_inference_2 = gsthelpers.create_element( + "nvinfer", "secondary3-inference-2" + ) self.tracker_2 = gsthelpers.create_element("nvtracker", "tracker-2") # Video sink branch 1 - self.video_converter_1 = gsthelpers.create_element("nvvideoconvert", "video-converter-1") + self.video_converter_1 = gsthelpers.create_element( + "nvvideoconvert", "video-converter-1" + ) self.osd_1 = gsthelpers.create_element("nvdsosd", "nvidia-bounding-box-draw-1") self.videosink_queue_1 = gsthelpers.create_element("queue", "videosink-queue-1") - self.video_sink_1 = gsthelpers.create_element("nveglglessink", "nvvideo-renderer-1") + self.video_sink_1 = gsthelpers.create_element( + "nveglglessink", "nvvideo-renderer-1" + ) # Video sink branch 2 - self.video_converter_2 = gsthelpers.create_element("nvvideoconvert", "video-converter-2") + self.video_converter_2 = gsthelpers.create_element( + "nvvideoconvert", "video-converter-2" + ) self.osd_2 = gsthelpers.create_element("nvdsosd", "nvidia-bounding-box-draw-2") self.videosink_queue_2 = gsthelpers.create_element("queue", "videosink-queue-2") - self.video_sink_2 = gsthelpers.create_element("nveglglessink", "nvvideo-renderer-2") + self.video_sink_2 = gsthelpers.create_element( + "nveglglessink", "nvvideo-renderer-2" + ) # Processing queues self.queue_1 = gsthelpers.create_element("queue", "queue-1") @@ -324,74 +373,94 @@ def __init__(self, dump_dot_file=False): # Set properties for the inference engines in processing branch 1 self.primary_inference_1.set_property("config-file-path", "pgie_config_1.txt") - self.secondary1_inference_1.set_property("config-file-path", "sgie1_config_1.txt") - self.secondary2_inference_1.set_property("config-file-path", "sgie2_config_1.txt") - self.secondary3_inference_1.set_property("config-file-path", "sgie3_config_1.txt") + self.secondary1_inference_1.set_property( + "config-file-path", "sgie1_config_1.txt" + ) + self.secondary2_inference_1.set_property( + "config-file-path", "sgie2_config_1.txt" + ) + self.secondary3_inference_1.set_property( + "config-file-path", "sgie3_config_1.txt" + ) # Set properties for the inference engines in processing branch 2 self.primary_inference_2.set_property("config-file-path", "pgie_config_2.txt") - self.secondary1_inference_2.set_property("config-file-path", "sgie1_config_2.txt") - self.secondary2_inference_2.set_property("config-file-path", "sgie2_config_2.txt") - self.secondary3_inference_2.set_property("config-file-path", "sgie3_config_2.txt") + self.secondary1_inference_2.set_property( + "config-file-path", "sgie1_config_2.txt" + ) + self.secondary2_inference_2.set_property( + "config-file-path", "sgie2_config_2.txt" + ) + self.secondary3_inference_2.set_property( + "config-file-path", "sgie3_config_2.txt" + ) # Set properties for the tracker_1 tracker_config = configparser.ConfigParser() tracker_config.read("tracker_config_1.txt") tracker_config.sections() - for key in tracker_config['tracker']: - if key == 'tracker-width': - tracker_width = tracker_config.getint('tracker', key) - self.tracker_1.set_property('tracker-width', tracker_width) - if key == 'tracker-height': - tracker_height = tracker_config.getint('tracker', key) - self.tracker_1.set_property('tracker-height', tracker_height) - if key == 'gpu-id': - tracker_gpu_id = tracker_config.getint('tracker', key) - self.tracker_1.set_property('gpu_id', tracker_gpu_id) - if key == 'll-lib-file': - tracker_ll_lib_file = tracker_config.get('tracker', key) - self.tracker_1.set_property('ll-lib-file', tracker_ll_lib_file) - if key == 'll-config-file': - tracker_ll_config_file = tracker_config.get('tracker', key) - self.tracker_1.set_property('ll-config-file', tracker_ll_config_file) - if key == 'enable-batch-process': - tracker_enable_batch_process = tracker_config.getint('tracker', key) - self.tracker_1.set_property('enable_batch_process', tracker_enable_batch_process) - if key == 'enable-past-frame': - tracker_enable_past_frame = tracker_config.getint('tracker', key) - self.tracker_1.set_property('enable_past_frame', tracker_enable_past_frame) + for key in tracker_config["tracker"]: + if key == "tracker-width": + tracker_width = tracker_config.getint("tracker", key) + self.tracker_1.set_property("tracker-width", tracker_width) + if key == "tracker-height": + tracker_height = tracker_config.getint("tracker", key) + self.tracker_1.set_property("tracker-height", tracker_height) + if key == "gpu-id": + tracker_gpu_id = tracker_config.getint("tracker", key) + self.tracker_1.set_property("gpu_id", tracker_gpu_id) + if key == "ll-lib-file": + tracker_ll_lib_file = tracker_config.get("tracker", key) + self.tracker_1.set_property("ll-lib-file", tracker_ll_lib_file) + if key == "ll-config-file": + tracker_ll_config_file = tracker_config.get("tracker", key) + self.tracker_1.set_property("ll-config-file", tracker_ll_config_file) + if key == "enable-batch-process": + tracker_enable_batch_process = tracker_config.getint("tracker", key) + self.tracker_1.set_property( + "enable_batch_process", tracker_enable_batch_process + ) + if key == "enable-past-frame": + tracker_enable_past_frame = tracker_config.getint("tracker", key) + self.tracker_1.set_property( + "enable_past_frame", tracker_enable_past_frame + ) # Set properties for the tracker_2 tracker_config.read("tracker_config_2.txt") tracker_config.sections() - for key in tracker_config['tracker']: - if key == 'tracker-width': - tracker_width = tracker_config.getint('tracker', key) - self.tracker_2.set_property('tracker-width', tracker_width) - if key == 'tracker-height': - tracker_height = tracker_config.getint('tracker', key) - self.tracker_2.set_property('tracker-height', tracker_height) - if key == 'gpu-id': - tracker_gpu_id = tracker_config.getint('tracker', key) - self.tracker_2.set_property('gpu_id', tracker_gpu_id) - if key == 'll-lib-file': - tracker_ll_lib_file = tracker_config.get('tracker', key) - self.tracker_2.set_property('ll-lib-file', tracker_ll_lib_file) - if key == 'll-config-file': - tracker_ll_config_file = tracker_config.get('tracker', key) - self.tracker_2.set_property('ll-config-file', tracker_ll_config_file) - if key == 'enable-batch-process': - tracker_enable_batch_process = tracker_config.getint('tracker', key) - self.tracker_2.set_property('enable_batch_process', tracker_enable_batch_process) - if key == 'enable-past-frame': - tracker_enable_past_frame = tracker_config.getint('tracker', key) - self.tracker_2.set_property('enable_past_frame', tracker_enable_past_frame) - - #---------------------- + for key in tracker_config["tracker"]: + if key == "tracker-width": + tracker_width = tracker_config.getint("tracker", key) + self.tracker_2.set_property("tracker-width", tracker_width) + if key == "tracker-height": + tracker_height = tracker_config.getint("tracker", key) + self.tracker_2.set_property("tracker-height", tracker_height) + if key == "gpu-id": + tracker_gpu_id = tracker_config.getint("tracker", key) + self.tracker_2.set_property("gpu_id", tracker_gpu_id) + if key == "ll-lib-file": + tracker_ll_lib_file = tracker_config.get("tracker", key) + self.tracker_2.set_property("ll-lib-file", tracker_ll_lib_file) + if key == "ll-config-file": + tracker_ll_config_file = tracker_config.get("tracker", key) + self.tracker_2.set_property("ll-config-file", tracker_ll_config_file) + if key == "enable-batch-process": + tracker_enable_batch_process = tracker_config.getint("tracker", key) + self.tracker_2.set_property( + "enable_batch_process", tracker_enable_batch_process + ) + if key == "enable-past-frame": + tracker_enable_past_frame = tracker_config.getint("tracker", key) + self.tracker_2.set_property( + "enable_past_frame", tracker_enable_past_frame + ) + + # ---------------------- # PIPELINE DESCRIPTION - #---------------------- + # ---------------------- # It appears that you need to have the nvstreammux components on each processing pipeline in order for the # nvtracker to work properly. # @@ -399,9 +468,11 @@ def __init__(self, dump_dot_file=False): # filesrc -> demux -> queue -> h264parser -> h264decoder -> tee -| # |-> queue_2 -> streammux_2 -> pipeline-2 # - # pipeline-1 -> primary_inference_1 -> tracker_1 -> secondary_inference_1_1 -> secondary_inference_2_1 -> secondary_inference_3_1 -> videoconverter_1 -> osd_1 -> queue -> videosink_1 + # pipeline-1 -> primary_inference_1 -> tracker_1 -> secondary_inference_1_1 -> secondary_inference_2_1 + # -> secondary_inference_3_1 -> videoconverter_1 -> osd_1 -> queue -> videosink_1 # - # pipeline-2 -> primary_inference_2 -> tracker_2 -> secondary_inference_1_2 -> secondary_inference_2_2 -> secondary_inference_3_2 -> videoconverter_2 -> osd_2 -> queue -> videosink_2 + # pipeline-2 -> primary_inference_2 -> tracker_2 -> secondary_inference_1_2 -> secondary_inference_2_2 + # -> secondary_inference_3_2 -> videoconverter_2 -> osd_2 -> queue -> videosink_2 # --- LINK IMAGE PROCESSING --- # Link source to demuxer @@ -410,10 +481,12 @@ def __init__(self, dump_dot_file=False): # Connect demux to the pad-added signal, used to link demuxer to queue dynamically demuxer_pad_added = gsthelpers.PadAddedLinkFunctor() demuxer_pad_added.register("video_", self.video_queue, "sink") - assert self.demuxer.connect("pad-added", demuxer_pad_added) == True + assert self.demuxer.connect("pad-added", demuxer_pad_added) is not None # Link video pipeline - gsthelpers.link_elements([self.video_queue, self.h264_parser, self.h264_decoder, self.tee]) + gsthelpers.link_elements( + [self.video_queue, self.h264_parser, self.h264_decoder, self.tee] + ) # Link tee to queue_1 source = self.tee.get_request_pad("src_0") @@ -444,28 +517,36 @@ def __init__(self, dump_dot_file=False): assert source.link(sink) == Gst.PadLinkReturn.OK # --- LINK FIRST IMAGE PROCESSING PIPELINE --- - gsthelpers.link_elements([self.stream_muxer_1, - self.primary_inference_1, - self.tracker_1, - self.secondary1_inference_1, - self.secondary2_inference_1, - self.secondary3_inference_1, - self.video_converter_1, - self.osd_1, - self.videosink_queue_1, - self.video_sink_1]) + gsthelpers.link_elements( + [ + self.stream_muxer_1, + self.primary_inference_1, + self.tracker_1, + self.secondary1_inference_1, + self.secondary2_inference_1, + self.secondary3_inference_1, + self.video_converter_1, + self.osd_1, + self.videosink_queue_1, + self.video_sink_1, + ] + ) # --- LINK SECOND IMAGE PROCESSING PIPELINE --- - gsthelpers.link_elements([self.stream_muxer_2, - self.primary_inference_2, - self.tracker_2, - self.secondary1_inference_2, - self.secondary2_inference_2, - self.secondary3_inference_2, - self.video_converter_2, - self.osd_2, - self.videosink_queue_2, - self.video_sink_2]) + gsthelpers.link_elements( + [ + self.stream_muxer_2, + self.primary_inference_2, + self.tracker_2, + self.secondary1_inference_2, + self.secondary2_inference_2, + self.secondary3_inference_2, + self.video_converter_2, + self.osd_2, + self.videosink_queue_2, + self.video_sink_2, + ] + ) # --- Meta-data output --- # Add a probe to the sink pad of the osd-element in order to draw/print meta-data to the canvas @@ -478,7 +559,9 @@ def __init__(self, dump_dot_file=False): osdsinkpad.add_probe(Gst.PadProbeType.BUFFER, osd_sink_pad_buffer_probe, 0) if dump_dot_file: - Gst.debug_bin_to_dot_file(self.pipeline, Gst.DebugGraphDetails.ALL, "gst-tracking-parallel") + Gst.debug_bin_to_dot_file( + self.pipeline, Gst.DebugGraphDetails.ALL, "gst-tracking-parallel" + ) def play(self, input_file: str): """ @@ -547,15 +630,20 @@ def stop_handler(self, sig, frame): self.stop() -if __name__ == '__main__': +if __name__ == "__main__": argParser = argparse.ArgumentParser() argParser.add_argument("-i", "--input_file", help="input file path", default="") - argParser.add_argument("-d", "--dump_dot_file", action="store_true", help="dump a dot file of the pipeline") + argParser.add_argument( + "-d", + "--dump_dot_file", + action="store_true", + help="dump a dot file of the pipeline", + ) args = argParser.parse_args() if args.dump_dot_file: os.environ["GST_DEBUG_DUMP_DOT_DIR"] = str(os.getcwd()) - os.putenv('GST_DEBUG_DUMP_DIR_DIR', str(os.getcwd())) + os.putenv("GST_DEBUG_DUMP_DIR_DIR", str(os.getcwd())) player = Player(args.dump_dot_file) try: diff --git a/deepstream-examples/deepstream-tracking/README.md b/deepstream-examples/deepstream-tracking/README.md index 118b4a1..db960a1 100644 --- a/deepstream-examples/deepstream-tracking/README.md +++ b/deepstream-examples/deepstream-tracking/README.md @@ -27,7 +27,7 @@ There are two versions: * [gst-tracking.py](gst-tracking.py) * This version draws bounding box and object information using deepstream's native way. * [gst-tracking-v2.py](gst-tracking-v2.py) - * This version draws the information so that bounding- and text boxes for smaller objects are drawn first. + * This version draws the information so that bounding- and text boxes for smaller objects are drawn first. Everything else being the same, smaller objects tend to be further away from the camera. Also bounding bbox colors are different for each object type. ## Requirements diff --git a/deepstream-examples/deepstream-tracking/gst-tracking-v2.py b/deepstream-examples/deepstream-tracking/gst-tracking-v2.py index 9cd609d..573b6cc 100644 --- a/deepstream-examples/deepstream-tracking/gst-tracking-v2.py +++ b/deepstream-examples/deepstream-tracking/gst-tracking-v2.py @@ -29,12 +29,11 @@ import sys import signal import pyds -from helpers import * - +from helpers import gsthelpers import gi -gi.require_version('Gst', '1.0') -from gi.repository import Gst, GLib, GObject +gi.require_version("Gst", "1.0") +from gi.repository import Gst, GLib # noqa: E402 PGIE_CLASS_ID_VEHICLE = 0 PGIE_CLASS_ID_BICYCLE = 1 @@ -42,8 +41,11 @@ PGIE_CLASS_ID_ROADSIGN = 3 past_tracking_meta = [0] -MetaObject = namedtuple("MetaObject", ["left", "top", "height", "width", "area", "bottom", "id", "text", "class_id"]) -ColorObject = namedtuple('ColorObject', ['red', 'green', 'blue', 'alpha']) +MetaObject = namedtuple( + "MetaObject", + ["left", "top", "height", "width", "area", "bottom", "id", "text", "class_id"], +) +ColorObject = namedtuple("ColorObject", ["red", "green", "blue", "alpha"]) ColorList = { PGIE_CLASS_ID_VEHICLE: ColorObject(red=1.0, green=0.0, blue=0.0, alpha=1.0), @@ -55,7 +57,12 @@ def osd_sink_pad_buffer_probe(pad, info, u_data): frame_number = 0 - obj_counter = {PGIE_CLASS_ID_VEHICLE: 0, PGIE_CLASS_ID_PERSON: 0, PGIE_CLASS_ID_BICYCLE: 0, PGIE_CLASS_ID_ROADSIGN: 0} + obj_counter = { + PGIE_CLASS_ID_VEHICLE: 0, + PGIE_CLASS_ID_PERSON: 0, + PGIE_CLASS_ID_BICYCLE: 0, + PGIE_CLASS_ID_ROADSIGN: 0, + } num_rects = 0 gst_buffer = info.get_buffer() if not gst_buffer: @@ -89,8 +96,10 @@ def osd_sink_pad_buffer_probe(pad, info, u_data): top=obj_meta.tracker_bbox_info.org_bbox_coords.top, height=obj_meta.tracker_bbox_info.org_bbox_coords.height, width=obj_meta.tracker_bbox_info.org_bbox_coords.width, - area=obj_meta.tracker_bbox_info.org_bbox_coords.height * obj_meta.tracker_bbox_info.org_bbox_coords.width, - bottom=obj_meta.tracker_bbox_info.org_bbox_coords.top + obj_meta.tracker_bbox_info.org_bbox_coords.height, + area=obj_meta.tracker_bbox_info.org_bbox_coords.height + * obj_meta.tracker_bbox_info.org_bbox_coords.width, + bottom=obj_meta.tracker_bbox_info.org_bbox_coords.top + + obj_meta.tracker_bbox_info.org_bbox_coords.height, id=obj_meta.object_id, text=f"ID: {obj_meta.object_id:04d}, Class: {pyds.get_string(obj_meta.text_params.display_text)}", class_id=obj_meta.class_id, @@ -145,14 +154,24 @@ def osd_sink_pad_buffer_probe(pad, info, u_data): if x < 0 or y < 0: continue - display_meta.text_params[display_meta.num_labels].display_text = meta_list_sorted[idx].text + display_meta.text_params[ + display_meta.num_labels + ].display_text = meta_list_sorted[idx].text display_meta.text_params[display_meta.num_labels].x_offset = x display_meta.text_params[display_meta.num_labels].y_offset = y - display_meta.text_params[display_meta.num_labels].font_params.font_name = "Serif" - display_meta.text_params[display_meta.num_labels].font_params.font_size = 10 - display_meta.text_params[display_meta.num_labels].font_params.font_color.set(1.0, 1.0, 1.0, 1.0) + display_meta.text_params[ + display_meta.num_labels + ].font_params.font_name = "Serif" + display_meta.text_params[ + display_meta.num_labels + ].font_params.font_size = 10 + display_meta.text_params[ + display_meta.num_labels + ].font_params.font_color.set(1.0, 1.0, 1.0, 1.0) display_meta.text_params[display_meta.num_labels].set_bg_clr = 1 - display_meta.text_params[display_meta.num_labels].text_bg_clr.set(0.45, 0.20, 0.50, 0.75) + display_meta.text_params[display_meta.num_labels].text_bg_clr.set( + 0.45, 0.20, 0.50, 0.75 + ) display_meta.num_labels += 1 display_meta.num_rects = end_idx - start_idx @@ -188,9 +207,15 @@ def osd_sink_pad_buffer_probe(pad, info, u_data): user_meta = pyds.NvDsUserMeta.cast(l_user.data) except StopIteration: break - if user_meta and user_meta.base_meta.meta_type == pyds.NvDsMetaType.NVDS_TRACKER_PAST_FRAME_META: + if ( + user_meta + and user_meta.base_meta.meta_type + == pyds.NvDsMetaType.NVDS_TRACKER_PAST_FRAME_META + ): try: - pPastFrameObjBatch = pyds.NvDsPastFrameObjBatch.cast(user_meta.user_meta_data) + pPastFrameObjBatch = pyds.NvDsPastFrameObjBatch.cast( + user_meta.user_meta_data + ) except StopIteration: break for trackobj in pyds.NvDsPastFrameObjBatch.list(pPastFrameObjBatch): @@ -246,12 +271,22 @@ def __init__(self): self.h264_parser = gsthelpers.create_element("h264parse", "h264-parser") self.h264_decoder = gsthelpers.create_element("nvv4l2decoder", "h264-decoder") self.stream_muxer = gsthelpers.create_element("nvstreammux", "stream-muxer") - self.primary_inference = gsthelpers.create_element("nvinfer", "primary-inference") + self.primary_inference = gsthelpers.create_element( + "nvinfer", "primary-inference" + ) self.tracker = gsthelpers.create_element("nvtracker", "tracker") - self.secondary1_inference = gsthelpers.create_element("nvinfer", "secondary1-inference") - self.secondary2_inference = gsthelpers.create_element("nvinfer", "secondary2-inference") - self.secondary3_inference = gsthelpers.create_element("nvinfer", "secondary3-inference") - self.video_converter = gsthelpers.create_element("nvvideoconvert", "video-converter") + self.secondary1_inference = gsthelpers.create_element( + "nvinfer", "secondary1-inference" + ) + self.secondary2_inference = gsthelpers.create_element( + "nvinfer", "secondary2-inference" + ) + self.secondary3_inference = gsthelpers.create_element( + "nvinfer", "secondary3-inference" + ) + self.video_converter = gsthelpers.create_element( + "nvvideoconvert", "video-converter" + ) self.osd = gsthelpers.create_element("nvdsosd", "nvidia-bounding-box-draw") self.tee = gsthelpers.create_element("tee", "tee") # Video sink branch @@ -259,10 +294,18 @@ def __init__(self): self.video_sink = gsthelpers.create_element("nveglglessink", "nvvideo-renderer") # File sink branch self.filesink_queue = gsthelpers.create_element("queue", "filesink-queue") - self.file_sink_converter = gsthelpers.create_element("nvvideoconvert", "file-sink-videoconverter") - self.file_sink_encoder = gsthelpers.create_element("nvv4l2h264enc", "file-sink-encoder") - self.file_sink_parser = gsthelpers.create_element("h264parse", "file-sink-parser") - self.file_sink_muxer = gsthelpers.create_element("matroskamux", "file-sink-muxer") + self.file_sink_converter = gsthelpers.create_element( + "nvvideoconvert", "file-sink-videoconverter" + ) + self.file_sink_encoder = gsthelpers.create_element( + "nvv4l2h264enc", "file-sink-encoder" + ) + self.file_sink_parser = gsthelpers.create_element( + "h264parse", "file-sink-parser" + ) + self.file_sink_muxer = gsthelpers.create_element( + "matroskamux", "file-sink-muxer" + ) self.file_sink = gsthelpers.create_element("filesink", "file-sink") # Add elements to the pipeline @@ -301,38 +344,50 @@ def __init__(self): self.file_sink_encoder.set_property("profile", 4) # Set properties for the inference engines - self.primary_inference.set_property("config-file-path", "dstest2_pgie_config.txt") - self.secondary1_inference.set_property("config-file-path", "dstest2_sgie1_config.txt") - self.secondary2_inference.set_property("config-file-path", "dstest2_sgie2_config.txt") - self.secondary3_inference.set_property("config-file-path", "dstest2_sgie3_config.txt") + self.primary_inference.set_property( + "config-file-path", "dstest2_pgie_config.txt" + ) + self.secondary1_inference.set_property( + "config-file-path", "dstest2_sgie1_config.txt" + ) + self.secondary2_inference.set_property( + "config-file-path", "dstest2_sgie2_config.txt" + ) + self.secondary3_inference.set_property( + "config-file-path", "dstest2_sgie3_config.txt" + ) # Set properties for the tracker tracker_config = configparser.ConfigParser() tracker_config.read("dstest2_tracker_config.txt") tracker_config.sections() - for key in tracker_config['tracker']: - if key == 'tracker-width': - tracker_width = tracker_config.getint('tracker', key) - self.tracker.set_property('tracker-width', tracker_width) - if key == 'tracker-height': - tracker_height = tracker_config.getint('tracker', key) - self.tracker.set_property('tracker-height', tracker_height) - if key == 'gpu-id': - tracker_gpu_id = tracker_config.getint('tracker', key) - self.tracker.set_property('gpu_id', tracker_gpu_id) - if key == 'll-lib-file': - tracker_ll_lib_file = tracker_config.get('tracker', key) - self.tracker.set_property('ll-lib-file', tracker_ll_lib_file) - if key == 'll-config-file': - tracker_ll_config_file = tracker_config.get('tracker', key) - self.tracker.set_property('ll-config-file', tracker_ll_config_file) - if key == 'enable-batch-process': - tracker_enable_batch_process = tracker_config.getint('tracker', key) - self.tracker.set_property('enable_batch_process', tracker_enable_batch_process) - if key == 'enable-past-frame': - tracker_enable_past_frame = tracker_config.getint('tracker', key) - self.tracker.set_property('enable_past_frame', tracker_enable_past_frame) + for key in tracker_config["tracker"]: + if key == "tracker-width": + tracker_width = tracker_config.getint("tracker", key) + self.tracker.set_property("tracker-width", tracker_width) + if key == "tracker-height": + tracker_height = tracker_config.getint("tracker", key) + self.tracker.set_property("tracker-height", tracker_height) + if key == "gpu-id": + tracker_gpu_id = tracker_config.getint("tracker", key) + self.tracker.set_property("gpu_id", tracker_gpu_id) + if key == "ll-lib-file": + tracker_ll_lib_file = tracker_config.get("tracker", key) + self.tracker.set_property("ll-lib-file", tracker_ll_lib_file) + if key == "ll-config-file": + tracker_ll_config_file = tracker_config.get("tracker", key) + self.tracker.set_property("ll-config-file", tracker_ll_config_file) + if key == "enable-batch-process": + tracker_enable_batch_process = tracker_config.getint("tracker", key) + self.tracker.set_property( + "enable_batch_process", tracker_enable_batch_process + ) + if key == "enable-past-frame": + tracker_enable_past_frame = tracker_config.getint("tracker", key) + self.tracker.set_property( + "enable_past_frame", tracker_enable_past_frame + ) # --- LINK IMAGE PROCESSING --- # Link video input and inference as follows: @@ -350,10 +405,12 @@ def __init__(self): demuxer_pad_added = gsthelpers.PadAddedLinkFunctor() demuxer_pad_added.register("video_", self.video_queue, "sink") - assert self.demuxer.connect("pad-added", demuxer_pad_added) == True + assert self.demuxer.connect("pad-added", demuxer_pad_added) is not None # Link video pipeline - gsthelpers.link_elements([self.video_queue, self.h264_parser, self.h264_decoder]) + gsthelpers.link_elements( + [self.video_queue, self.h264_parser, self.h264_decoder] + ) # Link decoder to streammux source = self.h264_decoder.get_static_pad("src") @@ -363,15 +420,19 @@ def __init__(self): assert source.link(sink) == Gst.PadLinkReturn.OK # Link inference, tracker and visualization - gsthelpers.link_elements([self.stream_muxer, - self.primary_inference, - self.tracker, - self.secondary1_inference, - self.secondary2_inference, - self.secondary3_inference, - self.video_converter, - self.osd, - self.tee]) + gsthelpers.link_elements( + [ + self.stream_muxer, + self.primary_inference, + self.tracker, + self.secondary1_inference, + self.secondary2_inference, + self.secondary3_inference, + self.video_converter, + self.osd, + self.tee, + ] + ) # --- LINK OUTPUT BRANCHES --- # We have two outputs, videosink and a filesink, as follows: @@ -397,10 +458,14 @@ def __init__(self): assert sink is not None assert src.link(sink) == Gst.PadLinkReturn.OK - gsthelpers.link_elements([self.filesink_queue, - self.file_sink_converter, - self.file_sink_encoder, - self.file_sink_parser]) + gsthelpers.link_elements( + [ + self.filesink_queue, + self.file_sink_converter, + self.file_sink_encoder, + self.file_sink_parser, + ] + ) src = self.file_sink_parser.get_static_pad("src") assert src is not None @@ -487,10 +552,12 @@ def stop_handler(self, sig, frame): self.stop() -if __name__ == '__main__': +if __name__ == "__main__": argParser = argparse.ArgumentParser() argParser.add_argument("-i", "--input_file", help="input file path", default="") - argParser.add_argument("-o", "--output_file", help="output file path", default="output.mp4") + argParser.add_argument( + "-o", "--output_file", help="output file path", default="output.mp4" + ) args = argParser.parse_args() player = Player() diff --git a/deepstream-examples/deepstream-tracking/gst-tracking.py b/deepstream-examples/deepstream-tracking/gst-tracking.py index 3e1e54f..b5c70d6 100644 --- a/deepstream-examples/deepstream-tracking/gst-tracking.py +++ b/deepstream-examples/deepstream-tracking/gst-tracking.py @@ -22,12 +22,11 @@ import sys import signal import pyds -from helpers import * - +from helpers import gsthelpers import gi -gi.require_version('Gst', '1.0') -from gi.repository import Gst, GLib, GObject +gi.require_version("Gst", "1.0") +from gi.repository import Gst, GLib # noqa: E402 PGIE_CLASS_ID_VEHICLE = 0 PGIE_CLASS_ID_BICYCLE = 1 @@ -45,7 +44,7 @@ def osd_sink_pad_buffer_probe(pad, info, u_data): PGIE_CLASS_ID_VEHICLE: 0, PGIE_CLASS_ID_PERSON: 0, PGIE_CLASS_ID_BICYCLE: 0, - PGIE_CLASS_ID_ROADSIGN: 0 + PGIE_CLASS_ID_ROADSIGN: 0, } num_rects = 0 gst_buffer = info.get_buffer() @@ -95,9 +94,12 @@ def osd_sink_pad_buffer_probe(pad, info, u_data): # memory will not be claimed by the garbage collector. # Reading the display_text field here will return the C address of the # allocated string. Use pyds.get_string() to get the string content. - py_nvosd_text_params.display_text = \ - "Frame Number={} Number of Objects={} Vehicle_count={} Person_count={}".format( - frame_number, num_rects, obj_counter[PGIE_CLASS_ID_VEHICLE], obj_counter[PGIE_CLASS_ID_PERSON]) + py_nvosd_text_params.display_text = "Frame Number={} Number of Objects={} Vehicle_count={} Person_count={}".format( + frame_number, + num_rects, + obj_counter[PGIE_CLASS_ID_VEHICLE], + obj_counter[PGIE_CLASS_ID_PERSON], + ) # Now set the offsets where the string should appear py_nvosd_text_params.x_offset = 10 @@ -134,14 +136,20 @@ def osd_sink_pad_buffer_probe(pad, info, u_data): user_meta = pyds.NvDsUserMeta.cast(l_user.data) except StopIteration: break - if user_meta and user_meta.base_meta.meta_type == pyds.NvDsMetaType.NVDS_TRACKER_PAST_FRAME_META: + if ( + user_meta + and user_meta.base_meta.meta_type + == pyds.NvDsMetaType.NVDS_TRACKER_PAST_FRAME_META + ): try: # Note that user_meta.user_meta_data needs a cast to pyds.NvDsPastFrameObjBatch # The casting is done by pyds.NvDsPastFrameObjBatch.cast() # The casting also keeps ownership of the underlying memory # in the C code, so the Python garbage collector will leave # it alone - pPastFrameObjBatch = pyds.NvDsPastFrameObjBatch.cast(user_meta.user_meta_data) + pPastFrameObjBatch = pyds.NvDsPastFrameObjBatch.cast( + user_meta.user_meta_data + ) except StopIteration: break for trackobj in pyds.NvDsPastFrameObjBatch.list(pPastFrameObjBatch): @@ -153,13 +161,13 @@ def osd_sink_pad_buffer_probe(pad, info, u_data): print("classId=", pastframeobj.classId) print("objLabel=", pastframeobj.objLabel) for objlist in pyds.NvDsPastFrameObjList.list(pastframeobj): - print('frameNum:', objlist.frameNum) - print('tBbox.left:', objlist.tBbox.left) - print('tBbox.width:', objlist.tBbox.width) - print('tBbox.top:', objlist.tBbox.top) - print('tBbox.right:', objlist.tBbox.height) - print('confidence:', objlist.confidence) - print('age:', objlist.age) + print("frameNum:", objlist.frameNum) + print("tBbox.left:", objlist.tBbox.left) + print("tBbox.width:", objlist.tBbox.width) + print("tBbox.top:", objlist.tBbox.top) + print("tBbox.right:", objlist.tBbox.height) + print("confidence:", objlist.confidence) + print("age:", objlist.age) try: l_user = l_user.next except StopIteration: @@ -196,12 +204,22 @@ def __init__(self): self.h264_parser = gsthelpers.create_element("h264parse", "h264-parser") self.h264_decoder = gsthelpers.create_element("nvv4l2decoder", "h264-decoder") self.stream_muxer = gsthelpers.create_element("nvstreammux", "stream-muxer") - self.primary_inference = gsthelpers.create_element("nvinfer", "primary-inference") + self.primary_inference = gsthelpers.create_element( + "nvinfer", "primary-inference" + ) self.tracker = gsthelpers.create_element("nvtracker", "tracker") - self.secondary1_inference = gsthelpers.create_element("nvinfer", "secondary1-inference") - self.secondary2_inference = gsthelpers.create_element("nvinfer", "secondary2-inference") - self.secondary3_inference = gsthelpers.create_element("nvinfer", "secondary3-inference") - self.video_converter = gsthelpers.create_element("nvvideoconvert", "video-converter") + self.secondary1_inference = gsthelpers.create_element( + "nvinfer", "secondary1-inference" + ) + self.secondary2_inference = gsthelpers.create_element( + "nvinfer", "secondary2-inference" + ) + self.secondary3_inference = gsthelpers.create_element( + "nvinfer", "secondary3-inference" + ) + self.video_converter = gsthelpers.create_element( + "nvvideoconvert", "video-converter" + ) self.osd = gsthelpers.create_element("nvdsosd", "nvidia-bounding-box-draw") self.tee = gsthelpers.create_element("tee", "tee") # Video sink branch @@ -209,10 +227,18 @@ def __init__(self): self.video_sink = gsthelpers.create_element("nveglglessink", "nvvideo-renderer") # File sink branch self.filesink_queue = gsthelpers.create_element("queue", "filesink-queue") - self.file_sink_converter = gsthelpers.create_element("nvvideoconvert", "file-sink-videoconverter") - self.file_sink_encoder = gsthelpers.create_element("nvv4l2h264enc", "file-sink-encoder") - self.file_sink_parser = gsthelpers.create_element("h264parse", "file-sink-parser") - self.file_sink_muxer = gsthelpers.create_element("matroskamux", "file-sink-muxer") + self.file_sink_converter = gsthelpers.create_element( + "nvvideoconvert", "file-sink-videoconverter" + ) + self.file_sink_encoder = gsthelpers.create_element( + "nvv4l2h264enc", "file-sink-encoder" + ) + self.file_sink_parser = gsthelpers.create_element( + "h264parse", "file-sink-parser" + ) + self.file_sink_muxer = gsthelpers.create_element( + "matroskamux", "file-sink-muxer" + ) self.file_sink = gsthelpers.create_element("filesink", "file-sink") # Add elements to the pipeline @@ -251,38 +277,50 @@ def __init__(self): self.file_sink_encoder.set_property("profile", 4) # Set properties for the inference engines - self.primary_inference.set_property("config-file-path", "dstest2_pgie_config.txt") - self.secondary1_inference.set_property("config-file-path", "dstest2_sgie1_config.txt") - self.secondary2_inference.set_property("config-file-path", "dstest2_sgie2_config.txt") - self.secondary3_inference.set_property("config-file-path", "dstest2_sgie3_config.txt") + self.primary_inference.set_property( + "config-file-path", "dstest2_pgie_config.txt" + ) + self.secondary1_inference.set_property( + "config-file-path", "dstest2_sgie1_config.txt" + ) + self.secondary2_inference.set_property( + "config-file-path", "dstest2_sgie2_config.txt" + ) + self.secondary3_inference.set_property( + "config-file-path", "dstest2_sgie3_config.txt" + ) # Set properties for the tracker tracker_config = configparser.ConfigParser() tracker_config.read("dstest2_tracker_config.txt") tracker_config.sections() - for key in tracker_config['tracker']: - if key == 'tracker-width': - tracker_width = tracker_config.getint('tracker', key) - self.tracker.set_property('tracker-width', tracker_width) - if key == 'tracker-height': - tracker_height = tracker_config.getint('tracker', key) - self.tracker.set_property('tracker-height', tracker_height) - if key == 'gpu-id': - tracker_gpu_id = tracker_config.getint('tracker', key) - self.tracker.set_property('gpu_id', tracker_gpu_id) - if key == 'll-lib-file': - tracker_ll_lib_file = tracker_config.get('tracker', key) - self.tracker.set_property('ll-lib-file', tracker_ll_lib_file) - if key == 'll-config-file': - tracker_ll_config_file = tracker_config.get('tracker', key) - self.tracker.set_property('ll-config-file', tracker_ll_config_file) - if key == 'enable-batch-process': - tracker_enable_batch_process = tracker_config.getint('tracker', key) - self.tracker.set_property('enable_batch_process', tracker_enable_batch_process) - if key == 'enable-past-frame': - tracker_enable_past_frame = tracker_config.getint('tracker', key) - self.tracker.set_property('enable_past_frame', tracker_enable_past_frame) + for key in tracker_config["tracker"]: + if key == "tracker-width": + tracker_width = tracker_config.getint("tracker", key) + self.tracker.set_property("tracker-width", tracker_width) + if key == "tracker-height": + tracker_height = tracker_config.getint("tracker", key) + self.tracker.set_property("tracker-height", tracker_height) + if key == "gpu-id": + tracker_gpu_id = tracker_config.getint("tracker", key) + self.tracker.set_property("gpu_id", tracker_gpu_id) + if key == "ll-lib-file": + tracker_ll_lib_file = tracker_config.get("tracker", key) + self.tracker.set_property("ll-lib-file", tracker_ll_lib_file) + if key == "ll-config-file": + tracker_ll_config_file = tracker_config.get("tracker", key) + self.tracker.set_property("ll-config-file", tracker_ll_config_file) + if key == "enable-batch-process": + tracker_enable_batch_process = tracker_config.getint("tracker", key) + self.tracker.set_property( + "enable_batch_process", tracker_enable_batch_process + ) + if key == "enable-past-frame": + tracker_enable_past_frame = tracker_config.getint("tracker", key) + self.tracker.set_property( + "enable_past_frame", tracker_enable_past_frame + ) # --- LINK IMAGE PROCESSING --- # Link video input and inference as follows: @@ -300,10 +338,12 @@ def __init__(self): demuxer_pad_added = gsthelpers.PadAddedLinkFunctor() demuxer_pad_added.register("video_", self.video_queue, "sink") - assert self.demuxer.connect("pad-added", demuxer_pad_added) == True + assert self.demuxer.connect("pad-added", demuxer_pad_added) is not None # Link video pipeline - gsthelpers.link_elements([self.video_queue, self.h264_parser, self.h264_decoder]) + gsthelpers.link_elements( + [self.video_queue, self.h264_parser, self.h264_decoder] + ) # Link decoder to streammux source = self.h264_decoder.get_static_pad("src") @@ -313,15 +353,19 @@ def __init__(self): assert source.link(sink) == Gst.PadLinkReturn.OK # Link inference, tracker and visualization - gsthelpers.link_elements([self.stream_muxer, - self.primary_inference, - self.tracker, - self.secondary1_inference, - self.secondary2_inference, - self.secondary3_inference, - self.video_converter, - self.osd, - self.tee]) + gsthelpers.link_elements( + [ + self.stream_muxer, + self.primary_inference, + self.tracker, + self.secondary1_inference, + self.secondary2_inference, + self.secondary3_inference, + self.video_converter, + self.osd, + self.tee, + ] + ) # --- LINK OUTPUT BRANCHES --- # We have two outputs, videosink and a filesink, as follows: @@ -347,10 +391,14 @@ def __init__(self): assert sink is not None assert src.link(sink) == Gst.PadLinkReturn.OK - gsthelpers.link_elements([self.filesink_queue, - self.file_sink_converter, - self.file_sink_encoder, - self.file_sink_parser]) + gsthelpers.link_elements( + [ + self.filesink_queue, + self.file_sink_converter, + self.file_sink_encoder, + self.file_sink_parser, + ] + ) src = self.file_sink_parser.get_static_pad("src") assert src is not None @@ -437,10 +485,12 @@ def stop_handler(self, sig, frame): self.stop() -if __name__ == '__main__': +if __name__ == "__main__": argParser = argparse.ArgumentParser() argParser.add_argument("-i", "--input_file", help="input file path", default="") - argParser.add_argument("-o", "--output_file", help="output file path", default="output.mp4") + argParser.add_argument( + "-o", "--output_file", help="output file path", default="output.mp4" + ) args = argParser.parse_args() player = Player() diff --git a/deepstream-examples/deepstream-tracking/tracker_config.yml b/deepstream-examples/deepstream-tracking/tracker_config.yml index 6af8f06..3a6f550 100644 --- a/deepstream-examples/deepstream-tracking/tracker_config.yml +++ b/deepstream-examples/deepstream-tracking/tracker_config.yml @@ -62,7 +62,7 @@ NvDCF: minMatchingScore4SizeSimilarity: 0.5 # Min bbox size similarity score minMatchingScore4Iou: 0.1 # Min IOU score minMatchingScore4VisualSimilarity: 0.2 # Min visual similarity score - minTrackingConfidenceDuringInactive: 1.0 # Min tracking confidence during INACTIVE period. If tracking confidence is higher than this, then tracker will still output results until next detection + minTrackingConfidenceDuringInactive: 1.0 # Min tracking confidence during INACTIVE period. If tracking confidence is higher than this, then tracker will still output results until next detection # [Data Association] Weights for each matching score term matchingScoreWeight4VisualSimilarity: 0.8 # Weight for the visual similarity (in terms of correlation response ratio) diff --git a/deepstream-examples/deepstream-triton-tracking/gst-triton-tracking-v2.py b/deepstream-examples/deepstream-triton-tracking/gst-triton-tracking-v2.py index 6876ce8..63d1307 100644 --- a/deepstream-examples/deepstream-triton-tracking/gst-triton-tracking-v2.py +++ b/deepstream-examples/deepstream-triton-tracking/gst-triton-tracking-v2.py @@ -29,12 +29,11 @@ import sys import signal import pyds -from helpers import * - +from helpers import gsthelpers import gi -gi.require_version('Gst', '1.0') -from gi.repository import Gst, GLib, GObject +gi.require_version("Gst", "1.0") +from gi.repository import Gst, GLib # noqa: E402 PGIE_CLASS_ID_VEHICLE = 0 PGIE_CLASS_ID_BICYCLE = 1 @@ -42,8 +41,10 @@ PGIE_CLASS_ID_ROADSIGN = 3 past_tracking_meta = [0] -MetaObject = namedtuple('MetaObject', ['left', 'top', 'height', 'width', 'area', 'id', 'text', 'class_id']) -ColorObject = namedtuple('ColorObject', ['red', 'green', 'blue', 'alpha']) +MetaObject = namedtuple( + "MetaObject", ["left", "top", "height", "width", "area", "id", "text", "class_id"] +) +ColorObject = namedtuple("ColorObject", ["red", "green", "blue", "alpha"]) ColorList = { PGIE_CLASS_ID_VEHICLE: ColorObject(red=1.0, green=0.0, blue=0.0, alpha=1.0), @@ -60,7 +61,7 @@ def osd_sink_pad_buffer_probe(pad, info, u_data): PGIE_CLASS_ID_VEHICLE: 0, PGIE_CLASS_ID_PERSON: 0, PGIE_CLASS_ID_BICYCLE: 0, - PGIE_CLASS_ID_ROADSIGN: 0 + PGIE_CLASS_ID_ROADSIGN: 0, } num_rects = 0 gst_buffer = info.get_buffer() @@ -103,10 +104,10 @@ def osd_sink_pad_buffer_probe(pad, info, u_data): height=obj_meta.tracker_bbox_info.org_bbox_coords.height, width=obj_meta.tracker_bbox_info.org_bbox_coords.width, area=obj_meta.tracker_bbox_info.org_bbox_coords.height - * obj_meta.tracker_bbox_info.org_bbox_coords.width, + * obj_meta.tracker_bbox_info.org_bbox_coords.width, id=obj_meta.object_id, text=pyds.get_string(obj_meta.text_params.display_text), - class_id=obj_meta.class_id + class_id=obj_meta.class_id, ) meta_list.append(obj) @@ -123,7 +124,7 @@ def osd_sink_pad_buffer_probe(pad, info, u_data): break # Sort the meta_list so that objects further back are drawn first - meta_list_sorted = sorted(meta_list, key=attrgetter('area')) + meta_list_sorted = sorted(meta_list, key=attrgetter("area")) # Acquiring a display meta object. The memory ownership remains in # the C code so downstream plugins can still access it. Otherwise @@ -151,12 +152,14 @@ def osd_sink_pad_buffer_probe(pad, info, u_data): # Display information regarding number of objects detected idx = display_meta.num_labels - 1 - display_meta.text_params[idx].display_text = f"Frame number={frame_number}, " \ - f"nr objects={num_rects}, " \ - f"vehicles={obj_counter[PGIE_CLASS_ID_VEHICLE]}, " \ - f"bicycles={obj_counter[PGIE_CLASS_ID_BICYCLE]}, " \ - f"persons={obj_counter[PGIE_CLASS_ID_PERSON]}, " \ - f"roadsigns={obj_counter[PGIE_CLASS_ID_ROADSIGN]}" + display_meta.text_params[idx].display_text = ( + f"Frame number={frame_number}, " + f"nr objects={num_rects}, " + f"vehicles={obj_counter[PGIE_CLASS_ID_VEHICLE]}, " + f"bicycles={obj_counter[PGIE_CLASS_ID_BICYCLE]}, " + f"persons={obj_counter[PGIE_CLASS_ID_PERSON]}, " + f"roadsigns={obj_counter[PGIE_CLASS_ID_ROADSIGN]}" + ) display_meta.text_params[idx].x_offset = 10 display_meta.text_params[idx].y_offset = 14 display_meta.text_params[idx].font_params.font_name = "Serif" @@ -205,14 +208,20 @@ def osd_sink_pad_buffer_probe(pad, info, u_data): user_meta = pyds.NvDsUserMeta.cast(l_user.data) except StopIteration: break - if user_meta and user_meta.base_meta.meta_type == pyds.NvDsMetaType.NVDS_TRACKER_PAST_FRAME_META: + if ( + user_meta + and user_meta.base_meta.meta_type + == pyds.NvDsMetaType.NVDS_TRACKER_PAST_FRAME_META + ): try: # Note that user_meta.user_meta_data needs a cast to pyds.NvDsPastFrameObjBatch # The casting is done by pyds.NvDsPastFrameObjBatch.cast() # The casting also keeps ownership of the underlying memory # in the C code, so the Python garbage collector will leave # it alone - pPastFrameObjBatch = pyds.NvDsPastFrameObjBatch.cast(user_meta.user_meta_data) + pPastFrameObjBatch = pyds.NvDsPastFrameObjBatch.cast( + user_meta.user_meta_data + ) except StopIteration: break for trackobj in pyds.NvDsPastFrameObjBatch.list(pPastFrameObjBatch): @@ -224,13 +233,13 @@ def osd_sink_pad_buffer_probe(pad, info, u_data): print("classId=", pastframeobj.classId) print("objLabel=", pastframeobj.objLabel) for objlist in pyds.NvDsPastFrameObjList.list(pastframeobj): - print('frameNum:', objlist.frameNum) - print('tBbox.left:', objlist.tBbox.left) - print('tBbox.width:', objlist.tBbox.width) - print('tBbox.top:', objlist.tBbox.top) - print('tBbox.right:', objlist.tBbox.height) - print('confidence:', objlist.confidence) - print('age:', objlist.age) + print("frameNum:", objlist.frameNum) + print("tBbox.left:", objlist.tBbox.left) + print("tBbox.width:", objlist.tBbox.width) + print("tBbox.top:", objlist.tBbox.top) + print("tBbox.right:", objlist.tBbox.height) + print("confidence:", objlist.confidence) + print("age:", objlist.age) try: l_user = l_user.next except StopIteration: @@ -267,12 +276,22 @@ def __init__(self): self.h264_parser = gsthelpers.create_element("h264parse", "h264-parser") self.h264_decoder = gsthelpers.create_element("nvv4l2decoder", "h264-decoder") self.stream_muxer = gsthelpers.create_element("nvstreammux", "stream-muxer") - self.primary_inference = gsthelpers.create_element("nvinferserver", "primary-inference") + self.primary_inference = gsthelpers.create_element( + "nvinferserver", "primary-inference" + ) self.tracker = gsthelpers.create_element("nvtracker", "tracker") - self.secondary1_inference = gsthelpers.create_element("nvinferserver", "secondary1-inference") - self.secondary2_inference = gsthelpers.create_element("nvinferserver", "secondary2-inference") - self.secondary3_inference = gsthelpers.create_element("nvinferserver", "secondary3-inference") - self.video_converter = gsthelpers.create_element("nvvideoconvert", "video-converter") + self.secondary1_inference = gsthelpers.create_element( + "nvinferserver", "secondary1-inference" + ) + self.secondary2_inference = gsthelpers.create_element( + "nvinferserver", "secondary2-inference" + ) + self.secondary3_inference = gsthelpers.create_element( + "nvinferserver", "secondary3-inference" + ) + self.video_converter = gsthelpers.create_element( + "nvvideoconvert", "video-converter" + ) self.osd = gsthelpers.create_element("nvdsosd", "nvidia-bounding-box-draw") self.tee = gsthelpers.create_element("tee", "tee") # Video sink branch @@ -280,10 +299,18 @@ def __init__(self): self.video_sink = gsthelpers.create_element("nveglglessink", "nvvideo-renderer") # File sink branch self.filesink_queue = gsthelpers.create_element("queue", "filesink-queue") - self.file_sink_converter = gsthelpers.create_element("nvvideoconvert", "file-sink-videoconverter") - self.file_sink_encoder = gsthelpers.create_element("x264enc", "file-sink-encoder") - self.file_sink_parser = gsthelpers.create_element("h264parse", "file-sink-parser") - self.file_sink_muxer = gsthelpers.create_element("matroskamux", "file-sink-muxer") + self.file_sink_converter = gsthelpers.create_element( + "nvvideoconvert", "file-sink-videoconverter" + ) + self.file_sink_encoder = gsthelpers.create_element( + "x264enc", "file-sink-encoder" + ) + self.file_sink_parser = gsthelpers.create_element( + "h264parse", "file-sink-parser" + ) + self.file_sink_muxer = gsthelpers.create_element( + "matroskamux", "file-sink-muxer" + ) self.file_sink = gsthelpers.create_element("filesink", "file-sink") # Add elements to the pipeline @@ -324,38 +351,54 @@ def __init__(self): self.file_sink.set_property("async", False) # Set properties for the inference engines - self.primary_inference.set_property("config-file-path", "/opt/nvidia/deepstream/deepstream/samples/configs/deepstream-app-triton/config_infer_plan_engine_primary.txt") - self.secondary1_inference.set_property("config-file-path", "/opt/nvidia/deepstream/deepstream/samples/configs/deepstream-app-triton/config_infer_secondary_plan_engine_carcolor.txt") - self.secondary2_inference.set_property("config-file-path", "/opt/nvidia/deepstream/deepstream/samples/configs/deepstream-app-triton/config_infer_secondary_plan_engine_carmake.txt") - self.secondary3_inference.set_property("config-file-path", "/opt/nvidia/deepstream/deepstream/samples/configs/deepstream-app-triton/config_infer_secondary_plan_engine_vehicletypes.txt") + self.primary_inference.set_property( + "config-file-path", + "/opt/nvidia/deepstream/deepstream/samples/configs/deepstream-app-triton/config_infer_plan_engine_primary.txt", + ) + self.secondary1_inference.set_property( + "config-file-path", + "/opt/nvidia/deepstream/deepstream/samples/configs/deepstream-app-triton/config_infer_secondary_plan_engine_carcolor.txt", + ) + self.secondary2_inference.set_property( + "config-file-path", + "/opt/nvidia/deepstream/deepstream/samples/configs/deepstream-app-triton/config_infer_secondary_plan_engine_carmake.txt", + ) + self.secondary3_inference.set_property( + "config-file-path", + "/opt/nvidia/deepstream/deepstream/samples/configs/deepstream-app-triton/config_infer_secondary_plan_engine_vehicletypes.txt", + ) # Set properties for the tracker tracker_config = configparser.ConfigParser() tracker_config.read("dstest2_tracker_config.txt") tracker_config.sections() - for key in tracker_config['tracker']: - if key == 'tracker-width': - tracker_width = tracker_config.getint('tracker', key) - self.tracker.set_property('tracker-width', tracker_width) - if key == 'tracker-height': - tracker_height = tracker_config.getint('tracker', key) - self.tracker.set_property('tracker-height', tracker_height) - if key == 'gpu-id': - tracker_gpu_id = tracker_config.getint('tracker', key) - self.tracker.set_property('gpu_id', tracker_gpu_id) - if key == 'll-lib-file': - tracker_ll_lib_file = tracker_config.get('tracker', key) - self.tracker.set_property('ll-lib-file', tracker_ll_lib_file) - if key == 'll-config-file': - tracker_ll_config_file = tracker_config.get('tracker', key) - self.tracker.set_property('ll-config-file', tracker_ll_config_file) - if key == 'enable-batch-process': - tracker_enable_batch_process = tracker_config.getint('tracker', key) - self.tracker.set_property('enable_batch_process', tracker_enable_batch_process) - if key == 'enable-past-frame': - tracker_enable_past_frame = tracker_config.getint('tracker', key) - self.tracker.set_property('enable_past_frame', tracker_enable_past_frame) + for key in tracker_config["tracker"]: + if key == "tracker-width": + tracker_width = tracker_config.getint("tracker", key) + self.tracker.set_property("tracker-width", tracker_width) + if key == "tracker-height": + tracker_height = tracker_config.getint("tracker", key) + self.tracker.set_property("tracker-height", tracker_height) + if key == "gpu-id": + tracker_gpu_id = tracker_config.getint("tracker", key) + self.tracker.set_property("gpu_id", tracker_gpu_id) + if key == "ll-lib-file": + tracker_ll_lib_file = tracker_config.get("tracker", key) + self.tracker.set_property("ll-lib-file", tracker_ll_lib_file) + if key == "ll-config-file": + tracker_ll_config_file = tracker_config.get("tracker", key) + self.tracker.set_property("ll-config-file", tracker_ll_config_file) + if key == "enable-batch-process": + tracker_enable_batch_process = tracker_config.getint("tracker", key) + self.tracker.set_property( + "enable_batch_process", tracker_enable_batch_process + ) + if key == "enable-past-frame": + tracker_enable_past_frame = tracker_config.getint("tracker", key) + self.tracker.set_property( + "enable_past_frame", tracker_enable_past_frame + ) # --- LINK IMAGE PROCESSING --- # Link video input and inference as follows: @@ -373,10 +416,12 @@ def __init__(self): demuxer_pad_added = gsthelpers.PadAddedLinkFunctor() demuxer_pad_added.register("video_", self.video_queue, "sink") - assert self.demuxer.connect("pad-added", demuxer_pad_added) == True + assert self.demuxer.connect("pad-added", demuxer_pad_added) is not None # Link video pipeline - gsthelpers.link_elements([self.video_queue, self.h264_parser, self.h264_decoder]) + gsthelpers.link_elements( + [self.video_queue, self.h264_parser, self.h264_decoder] + ) # Link decoder to streammux source = self.h264_decoder.get_static_pad("src") @@ -386,15 +431,19 @@ def __init__(self): assert source.link(sink) == Gst.PadLinkReturn.OK # Link inference, tracker and visualization - gsthelpers.link_elements([self.stream_muxer, - self.primary_inference, - self.tracker, - self.secondary1_inference, - self.secondary2_inference, - self.secondary3_inference, - self.video_converter, - self.osd, - self.tee]) + gsthelpers.link_elements( + [ + self.stream_muxer, + self.primary_inference, + self.tracker, + self.secondary1_inference, + self.secondary2_inference, + self.secondary3_inference, + self.video_converter, + self.osd, + self.tee, + ] + ) # --- LINK OUTPUT BRANCHES --- # We have two outputs, videosink and a filesink, as follows: @@ -420,10 +469,14 @@ def __init__(self): assert sink is not None assert src.link(sink) == Gst.PadLinkReturn.OK - gsthelpers.link_elements([self.filesink_queue, - self.file_sink_converter, - self.file_sink_encoder, - self.file_sink_parser]) + gsthelpers.link_elements( + [ + self.filesink_queue, + self.file_sink_converter, + self.file_sink_encoder, + self.file_sink_parser, + ] + ) src = self.file_sink_parser.get_static_pad("src") assert src is not None @@ -510,10 +563,12 @@ def stop_handler(self, sig, frame): self.stop() -if __name__ == '__main__': +if __name__ == "__main__": argParser = argparse.ArgumentParser() argParser.add_argument("-i", "--input_file", help="input file path", default="") - argParser.add_argument("-o", "--output_file", help="output file path", default="output.mp4") + argParser.add_argument( + "-o", "--output_file", help="output file path", default="output.mp4" + ) args = argParser.parse_args() player = Player() diff --git a/deepstream-examples/deepstream-triton-tracking/gst-triton-tracking.py b/deepstream-examples/deepstream-triton-tracking/gst-triton-tracking.py index ae05037..b11bd55 100644 --- a/deepstream-examples/deepstream-triton-tracking/gst-triton-tracking.py +++ b/deepstream-examples/deepstream-triton-tracking/gst-triton-tracking.py @@ -22,12 +22,11 @@ import sys import signal import pyds -from helpers import * - +from helpers import gsthelpers import gi -gi.require_version('Gst', '1.0') -from gi.repository import Gst, GLib, GObject +gi.require_version("Gst", "1.0") +from gi.repository import Gst, GLib # noqa: E402 PGIE_CLASS_ID_VEHICLE = 0 PGIE_CLASS_ID_BICYCLE = 1 @@ -45,7 +44,7 @@ def osd_sink_pad_buffer_probe(pad, info, u_data): PGIE_CLASS_ID_VEHICLE: 0, PGIE_CLASS_ID_PERSON: 0, PGIE_CLASS_ID_BICYCLE: 0, - PGIE_CLASS_ID_ROADSIGN: 0 + PGIE_CLASS_ID_ROADSIGN: 0, } num_rects = 0 gst_buffer = info.get_buffer() @@ -95,9 +94,12 @@ def osd_sink_pad_buffer_probe(pad, info, u_data): # memory will not be claimed by the garbage collector. # Reading the display_text field here will return the C address of the # allocated string. Use pyds.get_string() to get the string content. - py_nvosd_text_params.display_text = \ - "Frame Number={} Number of Objects={} Vehicle_count={} Person_count={}".format( - frame_number, num_rects, obj_counter[PGIE_CLASS_ID_VEHICLE], obj_counter[PGIE_CLASS_ID_PERSON]) + py_nvosd_text_params.display_text = "Frame Number={} Number of Objects={} Vehicle_count={} Person_count={}".format( + frame_number, + num_rects, + obj_counter[PGIE_CLASS_ID_VEHICLE], + obj_counter[PGIE_CLASS_ID_PERSON], + ) # Now set the offsets where the string should appear py_nvosd_text_params.x_offset = 10 @@ -134,14 +136,20 @@ def osd_sink_pad_buffer_probe(pad, info, u_data): user_meta = pyds.NvDsUserMeta.cast(l_user.data) except StopIteration: break - if user_meta and user_meta.base_meta.meta_type == pyds.NvDsMetaType.NVDS_TRACKER_PAST_FRAME_META: + if ( + user_meta + and user_meta.base_meta.meta_type + == pyds.NvDsMetaType.NVDS_TRACKER_PAST_FRAME_META + ): try: # Note that user_meta.user_meta_data needs a cast to pyds.NvDsPastFrameObjBatch # The casting is done by pyds.NvDsPastFrameObjBatch.cast() # The casting also keeps ownership of the underlying memory # in the C code, so the Python garbage collector will leave # it alone - pPastFrameObjBatch = pyds.NvDsPastFrameObjBatch.cast(user_meta.user_meta_data) + pPastFrameObjBatch = pyds.NvDsPastFrameObjBatch.cast( + user_meta.user_meta_data + ) except StopIteration: break for trackobj in pyds.NvDsPastFrameObjBatch.list(pPastFrameObjBatch): @@ -153,13 +161,13 @@ def osd_sink_pad_buffer_probe(pad, info, u_data): print("classId=", pastframeobj.classId) print("objLabel=", pastframeobj.objLabel) for objlist in pyds.NvDsPastFrameObjList.list(pastframeobj): - print('frameNum:', objlist.frameNum) - print('tBbox.left:', objlist.tBbox.left) - print('tBbox.width:', objlist.tBbox.width) - print('tBbox.top:', objlist.tBbox.top) - print('tBbox.right:', objlist.tBbox.height) - print('confidence:', objlist.confidence) - print('age:', objlist.age) + print("frameNum:", objlist.frameNum) + print("tBbox.left:", objlist.tBbox.left) + print("tBbox.width:", objlist.tBbox.width) + print("tBbox.top:", objlist.tBbox.top) + print("tBbox.right:", objlist.tBbox.height) + print("confidence:", objlist.confidence) + print("age:", objlist.age) try: l_user = l_user.next except StopIteration: @@ -197,12 +205,22 @@ def __init__(self): self.h264_decoder = gsthelpers.create_element("nvv4l2decoder", "h264-decoder") self.decoder_queue = gsthelpers.create_element("queue", "decoder-queue") self.stream_muxer = gsthelpers.create_element("nvstreammux", "stream-muxer") - self.primary_inference = gsthelpers.create_element("nvinferserver", "primary-inference") + self.primary_inference = gsthelpers.create_element( + "nvinferserver", "primary-inference" + ) self.tracker = gsthelpers.create_element("nvtracker", "tracker") - self.secondary1_inference = gsthelpers.create_element("nvinferserver", "secondary1-inference") - self.secondary2_inference = gsthelpers.create_element("nvinferserver", "secondary2-inference") - self.secondary3_inference = gsthelpers.create_element("nvinferserver", "secondary3-inference") - self.video_converter = gsthelpers.create_element("nvvideoconvert", "video-converter") + self.secondary1_inference = gsthelpers.create_element( + "nvinferserver", "secondary1-inference" + ) + self.secondary2_inference = gsthelpers.create_element( + "nvinferserver", "secondary2-inference" + ) + self.secondary3_inference = gsthelpers.create_element( + "nvinferserver", "secondary3-inference" + ) + self.video_converter = gsthelpers.create_element( + "nvvideoconvert", "video-converter" + ) self.osd = gsthelpers.create_element("nvdsosd", "nvidia-bounding-box-draw") self.tee = gsthelpers.create_element("tee", "tee") # Video sink branch @@ -210,10 +228,18 @@ def __init__(self): self.video_sink = gsthelpers.create_element("nveglglessink", "nvvideo-renderer") # File sink branch self.filesink_queue = gsthelpers.create_element("queue", "filesink-queue") - self.file_sink_converter = gsthelpers.create_element("nvvideoconvert", "file-sink-videoconverter") - self.file_sink_encoder = gsthelpers.create_element("x264enc", "file-sink-encoder") - self.file_sink_parser = gsthelpers.create_element("h264parse", "file-sink-parser") - self.file_sink_muxer = gsthelpers.create_element("matroskamux", "file-sink-muxer") + self.file_sink_converter = gsthelpers.create_element( + "nvvideoconvert", "file-sink-videoconverter" + ) + self.file_sink_encoder = gsthelpers.create_element( + "x264enc", "file-sink-encoder" + ) + self.file_sink_parser = gsthelpers.create_element( + "h264parse", "file-sink-parser" + ) + self.file_sink_muxer = gsthelpers.create_element( + "matroskamux", "file-sink-muxer" + ) self.file_sink = gsthelpers.create_element("filesink", "file-sink") # Add elements to the pipeline @@ -255,38 +281,54 @@ def __init__(self): self.file_sink.set_property("async", False) # Set properties for the inference engines - self.primary_inference.set_property("config-file-path", "/opt/nvidia/deepstream/deepstream/samples/configs/deepstream-app-triton/config_infer_plan_engine_primary.txt") - self.secondary1_inference.set_property("config-file-path", "/opt/nvidia/deepstream/deepstream/samples/configs/deepstream-app-triton/config_infer_secondary_plan_engine_carcolor.txt") - self.secondary2_inference.set_property("config-file-path", "/opt/nvidia/deepstream/deepstream/samples/configs/deepstream-app-triton/config_infer_secondary_plan_engine_carmake.txt") - self.secondary3_inference.set_property("config-file-path", "/opt/nvidia/deepstream/deepstream/samples/configs/deepstream-app-triton/config_infer_secondary_plan_engine_vehicletypes.txt") + self.primary_inference.set_property( + "config-file-path", + "/opt/nvidia/deepstream/deepstream/samples/configs/deepstream-app-triton/config_infer_plan_engine_primary.txt", + ) + self.secondary1_inference.set_property( + "config-file-path", + "/opt/nvidia/deepstream/deepstream/samples/configs/deepstream-app-triton/config_infer_secondary_plan_engine_carcolor.txt", + ) + self.secondary2_inference.set_property( + "config-file-path", + "/opt/nvidia/deepstream/deepstream/samples/configs/deepstream-app-triton/config_infer_secondary_plan_engine_carmake.txt", + ) + self.secondary3_inference.set_property( + "config-file-path", + "/opt/nvidia/deepstream/deepstream/samples/configs/deepstream-app-triton/config_infer_secondary_plan_engine_vehicletypes.txt", + ) # Set properties for the tracker tracker_config = configparser.ConfigParser() tracker_config.read("dstest2_tracker_config.txt") tracker_config.sections() - for key in tracker_config['tracker']: - if key == 'tracker-width': - tracker_width = tracker_config.getint('tracker', key) - self.tracker.set_property('tracker-width', tracker_width) - if key == 'tracker-height': - tracker_height = tracker_config.getint('tracker', key) - self.tracker.set_property('tracker-height', tracker_height) - if key == 'gpu-id': - tracker_gpu_id = tracker_config.getint('tracker', key) - self.tracker.set_property('gpu_id', tracker_gpu_id) - if key == 'll-lib-file': - tracker_ll_lib_file = tracker_config.get('tracker', key) - self.tracker.set_property('ll-lib-file', tracker_ll_lib_file) - if key == 'll-config-file': - tracker_ll_config_file = tracker_config.get('tracker', key) - self.tracker.set_property('ll-config-file', tracker_ll_config_file) - if key == 'enable-batch-process': - tracker_enable_batch_process = tracker_config.getint('tracker', key) - self.tracker.set_property('enable_batch_process', tracker_enable_batch_process) - if key == 'enable-past-frame': - tracker_enable_past_frame = tracker_config.getint('tracker', key) - self.tracker.set_property('enable_past_frame', tracker_enable_past_frame) + for key in tracker_config["tracker"]: + if key == "tracker-width": + tracker_width = tracker_config.getint("tracker", key) + self.tracker.set_property("tracker-width", tracker_width) + if key == "tracker-height": + tracker_height = tracker_config.getint("tracker", key) + self.tracker.set_property("tracker-height", tracker_height) + if key == "gpu-id": + tracker_gpu_id = tracker_config.getint("tracker", key) + self.tracker.set_property("gpu_id", tracker_gpu_id) + if key == "ll-lib-file": + tracker_ll_lib_file = tracker_config.get("tracker", key) + self.tracker.set_property("ll-lib-file", tracker_ll_lib_file) + if key == "ll-config-file": + tracker_ll_config_file = tracker_config.get("tracker", key) + self.tracker.set_property("ll-config-file", tracker_ll_config_file) + if key == "enable-batch-process": + tracker_enable_batch_process = tracker_config.getint("tracker", key) + self.tracker.set_property( + "enable_batch_process", tracker_enable_batch_process + ) + if key == "enable-past-frame": + tracker_enable_past_frame = tracker_config.getint("tracker", key) + self.tracker.set_property( + "enable_past_frame", tracker_enable_past_frame + ) # --- LINK IMAGE PROCESSING --- # Link video input and inference as follows: @@ -304,10 +346,12 @@ def __init__(self): demuxer_pad_added = gsthelpers.PadAddedLinkFunctor() demuxer_pad_added.register("video_", self.video_queue, "sink") - assert self.demuxer.connect("pad-added", demuxer_pad_added) == True + assert self.demuxer.connect("pad-added", demuxer_pad_added) is not None # Link video pipeline - gsthelpers.link_elements([self.video_queue, self.h264_parser, self.h264_decoder, self.decoder_queue]) + gsthelpers.link_elements( + [self.video_queue, self.h264_parser, self.h264_decoder, self.decoder_queue] + ) # Link decoder to streammux source = self.decoder_queue.get_static_pad("src") @@ -317,15 +361,19 @@ def __init__(self): assert source.link(sink) == Gst.PadLinkReturn.OK # Link inference, tracker and visualization - gsthelpers.link_elements([self.stream_muxer, - self.primary_inference, - self.tracker, - self.secondary1_inference, - self.secondary2_inference, - self.secondary3_inference, - self.video_converter, - self.osd, - self.tee]) + gsthelpers.link_elements( + [ + self.stream_muxer, + self.primary_inference, + self.tracker, + self.secondary1_inference, + self.secondary2_inference, + self.secondary3_inference, + self.video_converter, + self.osd, + self.tee, + ] + ) # --- LINK OUTPUT BRANCHES --- # We have two outputs, videosink and a filesink, as follows: @@ -351,10 +399,14 @@ def __init__(self): assert sink is not None assert src.link(sink) == Gst.PadLinkReturn.OK - gsthelpers.link_elements([self.filesink_queue, - self.file_sink_converter, - self.file_sink_encoder, - self.file_sink_parser]) + gsthelpers.link_elements( + [ + self.filesink_queue, + self.file_sink_converter, + self.file_sink_encoder, + self.file_sink_parser, + ] + ) src = self.file_sink_parser.get_static_pad("src") assert src is not None @@ -435,7 +487,9 @@ def on_message(self, bus, message): elif message_type == Gst.MessageType.STATE_CHANGED: old_state, new_state, pending_state = message.parse_state_changed() - print(f"State changed from {message.src.get_name()}: {old_state.value_nick} -> {new_state.value_nick}, pending: {pending_state.value_nick}") + print( + f"State changed from {message.src.get_name()}: {old_state.value_nick} -> {new_state.value_nick}, pending: {pending_state.value_nick}" + ) def stop_handler(self, sig, frame): """ @@ -449,10 +503,12 @@ def stop_handler(self, sig, frame): self.stop() -if __name__ == '__main__': +if __name__ == "__main__": argParser = argparse.ArgumentParser() argParser.add_argument("-i", "--input_file", help="input file path", default="") - argParser.add_argument("-o", "--output_file", help="output file path", default="output.mp4") + argParser.add_argument( + "-o", "--output_file", help="output file path", default="output.mp4" + ) args = argParser.parse_args() player = Player() diff --git a/deepstream-examples/deepstream-triton-tracking/tracker_config.yml b/deepstream-examples/deepstream-triton-tracking/tracker_config.yml index 6af8f06..3a6f550 100644 --- a/deepstream-examples/deepstream-triton-tracking/tracker_config.yml +++ b/deepstream-examples/deepstream-triton-tracking/tracker_config.yml @@ -62,7 +62,7 @@ NvDCF: minMatchingScore4SizeSimilarity: 0.5 # Min bbox size similarity score minMatchingScore4Iou: 0.1 # Min IOU score minMatchingScore4VisualSimilarity: 0.2 # Min visual similarity score - minTrackingConfidenceDuringInactive: 1.0 # Min tracking confidence during INACTIVE period. If tracking confidence is higher than this, then tracker will still output results until next detection + minTrackingConfidenceDuringInactive: 1.0 # Min tracking confidence during INACTIVE period. If tracking confidence is higher than this, then tracker will still output results until next detection # [Data Association] Weights for each matching score term matchingScoreWeight4VisualSimilarity: 0.8 # Weight for the visual similarity (in terms of correlation response ratio) diff --git a/deepstream-examples/src/gst-plugins/deepstream6.0/gst-nvinfer-rect/CMakeLists.txt b/deepstream-examples/src/gst-plugins/deepstream6.0/gst-nvinfer-rect/CMakeLists.txt index d7f6770..b30cade 100644 --- a/deepstream-examples/src/gst-plugins/deepstream6.0/gst-nvinfer-rect/CMakeLists.txt +++ b/deepstream-examples/src/gst-plugins/deepstream6.0/gst-nvinfer-rect/CMakeLists.txt @@ -4,7 +4,7 @@ find_package(Deepstream REQUIRED) find_package(yaml-cpp REQUIRED) find_package(PkgConfig REQUIRED) -pkg_search_module(GST REQUIRED +pkg_search_module(GST REQUIRED gstreamer-1.0>=1.4 gstreamer-base-1.0>=1.4 gstreamer-video-1.0>=1.4) @@ -18,7 +18,7 @@ add_library(nvdsgst_inferrect SHARED ) target_include_directories(nvdsgst_inferrect - PUBLIC + PUBLIC $ $ PRIVATE diff --git a/deepstream-examples/src/gst-plugins/deepstream6.0/gst-nvinfer-rect/gstnvinfer.cpp b/deepstream-examples/src/gst-plugins/deepstream6.0/gst-nvinfer-rect/gstnvinfer.cpp index bc7ef5e..1cff294 100644 --- a/deepstream-examples/src/gst-plugins/deepstream6.0/gst-nvinfer-rect/gstnvinfer.cpp +++ b/deepstream-examples/src/gst-plugins/deepstream6.0/gst-nvinfer-rect/gstnvinfer.cpp @@ -1410,7 +1410,7 @@ convert_batch_and_push_to_input_thread (GstNvInfer *nvinfer, }; NvBufSurface *surf_gray8 = NULL; - + if(NvBufSurfaceCreate(&surf_gray8, mem->surf->batchSize, &nvbufsurface_create_params) != 0) { std::cerr << "Failed to allocate space for surface 'surf_gray8'" << std::endl; diff --git a/deepstream-examples/src/gst-plugins/deepstream6.0/gst-nvinfer-rect/gstnvinfer_meta_utils.cpp b/deepstream-examples/src/gst-plugins/deepstream6.0/gst-nvinfer-rect/gstnvinfer_meta_utils.cpp index 992a055..166e628 100644 --- a/deepstream-examples/src/gst-plugins/deepstream6.0/gst-nvinfer-rect/gstnvinfer_meta_utils.cpp +++ b/deepstream-examples/src/gst-plugins/deepstream6.0/gst-nvinfer-rect/gstnvinfer_meta_utils.cpp @@ -139,8 +139,8 @@ attach_metadata_detector (GstNvInfer * nvinfer, GstMiniObject * tensor_out_objec text_params.font_params.font_color = (NvOSD_ColorParams) { 1, 1, 1, 1}; - if (nvinfer->output_instance_mask && obj.mask) { - float *mask = (float *)g_malloc(obj.mask_size); + if (nvinfer->output_instance_mask && obj.mask) { + float *mask = (float *)g_malloc(obj.mask_size); memcpy(mask, obj.mask, obj.mask_size); obj_meta->mask_params.data = mask; obj_meta->mask_params.size = obj.mask_size; diff --git a/deepstream-examples/src/gst-plugins/deepstream6.1/gst-nvinfer-rect/CMakeLists.txt b/deepstream-examples/src/gst-plugins/deepstream6.1/gst-nvinfer-rect/CMakeLists.txt index 4b8849a..e0e9356 100644 --- a/deepstream-examples/src/gst-plugins/deepstream6.1/gst-nvinfer-rect/CMakeLists.txt +++ b/deepstream-examples/src/gst-plugins/deepstream6.1/gst-nvinfer-rect/CMakeLists.txt @@ -15,7 +15,7 @@ add_library(nvdsgst_inferrect SHARED ) target_include_directories(nvdsgst_inferrect - PUBLIC + PUBLIC $ $ PRIVATE diff --git a/deepstream-examples/src/gst-plugins/deepstream6.1/gst-nvinfer-rect/gstnvinfer.cpp b/deepstream-examples/src/gst-plugins/deepstream6.1/gst-nvinfer-rect/gstnvinfer.cpp index 7a57c81..71eeb74 100644 --- a/deepstream-examples/src/gst-plugins/deepstream6.1/gst-nvinfer-rect/gstnvinfer.cpp +++ b/deepstream-examples/src/gst-plugins/deepstream6.1/gst-nvinfer-rect/gstnvinfer.cpp @@ -1473,7 +1473,7 @@ convert_batch_and_push_to_input_thread(GstNvInfer *nvinfer, (NULL)); return FALSE; } - + //TODO: Saves images only when we are not processing a full frame -> secondary inference if((!nvinfer->process_full_frame) && (mem->surf->numFilled>0)) { @@ -1502,7 +1502,7 @@ convert_batch_and_push_to_input_thread(GstNvInfer *nvinfer, }; NvBufSurface *surf_gray8 = NULL; - + if(NvBufSurfaceCreate(&surf_gray8, mem->surf->batchSize, &nvbufsurface_create_params) != 0) { std::cerr << "Failed to allocate space for surface 'surf_gray8'" << std::endl; @@ -1865,7 +1865,7 @@ gst_nvinfer_process_objects(GstNvInfer *nvinfer, GstBuffer *inbuf, bool needs_infer = should_infer_object(nvinfer, inbuf, object_meta, frame_num, obj_history.get()); - + if (!needs_infer) { /* Should not infer again. */ diff --git a/deepstream-examples/src/gst-plugins/deepstream6.2/gst-nvinfer-rect/CMakeLists.txt b/deepstream-examples/src/gst-plugins/deepstream6.2/gst-nvinfer-rect/CMakeLists.txt index 4b8849a..e0e9356 100644 --- a/deepstream-examples/src/gst-plugins/deepstream6.2/gst-nvinfer-rect/CMakeLists.txt +++ b/deepstream-examples/src/gst-plugins/deepstream6.2/gst-nvinfer-rect/CMakeLists.txt @@ -15,7 +15,7 @@ add_library(nvdsgst_inferrect SHARED ) target_include_directories(nvdsgst_inferrect - PUBLIC + PUBLIC $ $ PRIVATE diff --git a/deepstream-examples/src/gst-plugins/deepstream6.2/gst-nvinfer-rect/gstnvinfer.cpp b/deepstream-examples/src/gst-plugins/deepstream6.2/gst-nvinfer-rect/gstnvinfer.cpp index 7a57c81..71eeb74 100644 --- a/deepstream-examples/src/gst-plugins/deepstream6.2/gst-nvinfer-rect/gstnvinfer.cpp +++ b/deepstream-examples/src/gst-plugins/deepstream6.2/gst-nvinfer-rect/gstnvinfer.cpp @@ -1473,7 +1473,7 @@ convert_batch_and_push_to_input_thread(GstNvInfer *nvinfer, (NULL)); return FALSE; } - + //TODO: Saves images only when we are not processing a full frame -> secondary inference if((!nvinfer->process_full_frame) && (mem->surf->numFilled>0)) { @@ -1502,7 +1502,7 @@ convert_batch_and_push_to_input_thread(GstNvInfer *nvinfer, }; NvBufSurface *surf_gray8 = NULL; - + if(NvBufSurfaceCreate(&surf_gray8, mem->surf->batchSize, &nvbufsurface_create_params) != 0) { std::cerr << "Failed to allocate space for surface 'surf_gray8'" << std::endl; @@ -1865,7 +1865,7 @@ gst_nvinfer_process_objects(GstNvInfer *nvinfer, GstBuffer *inbuf, bool needs_infer = should_infer_object(nvinfer, inbuf, object_meta, frame_num, obj_history.get()); - + if (!needs_infer) { /* Should not infer again. */ diff --git a/deepstream-examples/src/retinaface_parser/nvdsparse_retinaface.cpp b/deepstream-examples/src/retinaface_parser/nvdsparse_retinaface.cpp index 8938eca..254855c 100644 --- a/deepstream-examples/src/retinaface_parser/nvdsparse_retinaface.cpp +++ b/deepstream-examples/src/retinaface_parser/nvdsparse_retinaface.cpp @@ -75,7 +75,7 @@ struct alignas(float) Bbox { Bbox() : top_left(Point2D()), bottom_right(Point2D()) {} - + Bbox(Point2D top_left_in, Point2D bottom_right_in) : top_left(top_left_in), bottom_right(bottom_right_in) {} @@ -141,7 +141,7 @@ float IoU(Bbox const& a, Bbox const& b) // Maximum coordinates of top-left point float x_min = std::max(a.top_left.x, b.top_left.x); float y_min = std::max(a.top_left.y, b.top_left.y); - + // Width and height for intersection float w = std::max(0.0f, x_max - x_min); float h = std::max(0.0f, y_max - y_min); @@ -189,7 +189,7 @@ std::size_t NMS(std::list& index_list, Bbox* p_bbox, float index_list.pop_front(); // Add those bounding boxes that overlap enough with the candidate to the list of candidates - std::copy_if(index_list.begin(), index_list.end(), std::back_inserter(candidates), + std::copy_if(index_list.begin(), index_list.end(), std::back_inserter(candidates), [&candidate, &min_iou_threshold, p_bbox](IndexWithProbability& elem){return IoU(p_bbox[candidate.index], p_bbox[elem.index]) > min_iou_threshold;}); // Erase the overlapping items from the index_list @@ -229,7 +229,7 @@ static bool NvDsInferParseRetinaface(std::vector const &outp std::cerr << "Could not find an output layer called 'bboxes'" << std::endl; return false; } - + // Look for the classes layer auto itr_class = std::find_if(outputLayersInfo.begin(), outputLayersInfo.end(), [](const NvDsInferLayerInfo& obj){ return std::string(obj.layerName) == "classes";}); if(itr_class == outputLayersInfo.end()) @@ -237,7 +237,7 @@ static bool NvDsInferParseRetinaface(std::vector const &outp std::cerr << "Could not find an output layer called 'classes'" << std::endl; return false; } - + // Look for the landmarks layer auto itr_landmark = std::find_if(outputLayersInfo.begin(), outputLayersInfo.end(), [](const NvDsInferLayerInfo& obj){ return std::string(obj.layerName) == "landmarks";}); if(itr_landmark == outputLayersInfo.end()) diff --git a/deepstream-examples/src/utils/CMakeLists.txt b/deepstream-examples/src/utils/CMakeLists.txt index acb59b1..84b6c76 100644 --- a/deepstream-examples/src/utils/CMakeLists.txt +++ b/deepstream-examples/src/utils/CMakeLists.txt @@ -6,7 +6,7 @@ find_package(Deepstream REQUIRED) add_library(nvds_tools SHARED nvbufsurf_tools.hpp nvbufsurf_tools.cpp) target_include_directories(nvds_tools - PUBLIC + PUBLIC $ $ PRIVATE diff --git a/deepstream-examples/src/utils/nvbufsurf_tools.cpp b/deepstream-examples/src/utils/nvbufsurf_tools.cpp index 9cb5959..a31fee0 100644 --- a/deepstream-examples/src/utils/nvbufsurf_tools.cpp +++ b/deepstream-examples/src/utils/nvbufsurf_tools.cpp @@ -59,7 +59,7 @@ int write_surfgray8_to_disk(NvBufSurface* surf, const char* filename, bool use_p { std::string bufferFileName = fileName + "_object_" + std::to_string(i) + ".bmp"; cv::Mat mapped; - + if(use_pitch_alignment) { mapped = cv::Mat(drawMe->surfaceList[i].height, drawMe->surfaceList[i].width, CV_8UC1, drawMe->surfaceList[i].dataPtr, drawMe->surfaceList[i].pitch); }else{ diff --git a/docker/Dockerfile-deepstream b/docker/Dockerfile-deepstream index 5a99743..3d77034 100644 --- a/docker/Dockerfile-deepstream +++ b/docker/Dockerfile-deepstream @@ -3,7 +3,7 @@ FROM nvcr.io/nvidia/deepstream:6.1.1-samples # To get video driver libraries at runtime (libnvidia-encode.so/libnvcuvid.so) ENV NVIDIA_DRIVER_CAPABILITIES $NVIDIA_DRIVER_CAPABILITIES,video,compute,graphics,utility -# Install required packages. +# Install required packages. # Some of these are probably already in the base image. # GL Vendor-Neutral Dispatch: libglvnd0, libgl1, libglx0 and libegl1 # OpenGL benchmark application: glmark2 diff --git a/docker/Dockerfile-deepstream-6.0.1-devel b/docker/Dockerfile-deepstream-6.0.1-devel index d79fed1..64984dd 100644 --- a/docker/Dockerfile-deepstream-6.0.1-devel +++ b/docker/Dockerfile-deepstream-6.0.1-devel @@ -3,7 +3,7 @@ FROM nvcr.io/nvidia/deepstream:6.0.1-samples # To get video driver libraries at runtime (libnvidia-encode.so/libnvcuvid.so) ENV NVIDIA_DRIVER_CAPABILITIES $NVIDIA_DRIVER_CAPABILITIES,video,compute,graphics,utility -# Install required packages. +# Install required packages. # Some of these are probably already in the base image. # GL Vendor-Neutral Dispatch: libglvnd0, libgl1, libglx0 and libegl1 # OpenGL benchmark application: glmark2 diff --git a/docker/README.md b/docker/README.md index a0862e8..74f2b5f 100644 --- a/docker/README.md +++ b/docker/README.md @@ -14,3 +14,53 @@ This directory contains docker files used for generating docker containers where * mesa-utils for glxinfo * cuda-tookit * tensorrt-dev + +# 1 Creating Docker Images + +Following sections show how to: + +* Install Docker +* Build the Docker images + +## 1.1 Installing Docker + +Before creating the docker image, you need to install Nvidia's Container Toolkit. Instructions can be found here: + +* https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html + +Once you have installed everything, verify that Nvidia's Container Toolkit is working by executing: + +```bash +sudo docker run --rm --gpus all nvidia/cuda:11.6.2-base-ubuntu20.04 nvidia-smi +``` + +You should see output following (or similar) output: + +```bash ++-----------------------------------------------------------------------------+ +| NVIDIA-SMI 525.60.13 Driver Version: 525.60.13 CUDA Version: 12.0 | +|-------------------------------+----------------------+----------------------+ +| GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC | +| Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. | +| | | MIG M. | +|===============================+======================+======================| +| 0 NVIDIA GeForce ... On | 00000000:09:00.0 On | N/A | +| 32% 38C P0 34W / 151W | 735MiB / 8192MiB | 0% Default | +| | | N/A | ++-------------------------------+----------------------+----------------------+ + ++-----------------------------------------------------------------------------+ +| Processes: | +| GPU GI CI PID Type Process name GPU Memory | +| ID ID Usage | +|=============================================================================| ++-----------------------------------------------------------------------------+ + +``` + +## 1.2 Create the Docker Image + +After this you can create the docker image used in the examples. + +```bash +docker build -t nvidia-deepstream-samples -f ./Dockerfile-deepstream . diff --git a/gst-examples/README.md b/gst-examples/README.md index b6cf4f7..0d7523c 100644 --- a/gst-examples/README.md +++ b/gst-examples/README.md @@ -1,17 +1,22 @@ # GST-EXAMPLES -These are Gstreamer related examples. +These are Gstreamer related examples. Before running the examples, it is a good idea to refresh the GStreamer plugin cache by running the following: -## Requirements +```bash +gst-inspect-1.0 +``` + +# 1 Requirements * Python 3.8 * Gst-python -* gstreamer1.0-plugins-bad (you probably need this) +* gstreamer1.0-plugins-bad +* gstreamer1.0-libav * The following are needed only for PyTorch related examples * torch * torchvision -## Examples +# 2 Examples * [gst-qtdemux-h264.py](gst-qtdemux-h264.py) * Plays back h264 encoded video stream from a file (e.g. mp4). diff --git a/gst-examples/gst-pytorch-example-1.1.py b/gst-examples/gst-pytorch-example-1.1.py index 1312151..0d1fedc 100644 --- a/gst-examples/gst-pytorch-example-1.1.py +++ b/gst-examples/gst-pytorch-example-1.1.py @@ -14,11 +14,13 @@ import time from functools import partial -gi.require_version('Gst', '1.0') -from gi.repository import Gst +gi.require_version("Gst", "1.0") +from gi.repository import Gst # noqa: E402 -frame_format, pixel_bytes, model_precision = 'RGBA', 4, 'fp32' -ssd_utils = torch.hub.load('NVIDIA/DeepLearningExamples:torchhub', 'nvidia_ssd_processing_utils') +frame_format, pixel_bytes, model_precision = "RGBA", 4, "fp32" +ssd_utils = torch.hub.load( + "NVIDIA/DeepLearningExamples:torchhub", "nvidia_ssd_processing_utils" +) start_time, frames_processed = None, 0 @@ -31,7 +33,9 @@ def nvtx_range(msg): torch.cuda.nvtx.range_pop() -def on_frame_probe(pad_in, info_in, detector_in, transform_in, device_in, detection_threshold_in): +def on_frame_probe( + pad_in, info_in, detector_in, transform_in, device_in, detection_threshold_in +): """ This function is called every time a new frame is available. :param pad_in: pad of the probe @@ -62,8 +66,10 @@ def on_frame_probe(pad_in, info_in, detector_in, transform_in, device_in, detect # Since the decoding is done in the cpu, it is much more efficient to send to complete tensor to cpu, and then # decode the results. results_per_input = ssd_utils.decode_results((locs.cpu(), labels.cpu())) - best_results_per_input = [ssd_utils.pick_best(results, detection_threshold_in) for results in - results_per_input] + best_results_per_input = [ + ssd_utils.pick_best(results, detection_threshold_in) + for results in results_per_input + ] for bboxes, classes, scores in best_results_per_input: print(f"{bboxes=}") @@ -83,7 +89,7 @@ def buffer_to_numpy(buf, caps): """ with nvtx_range("buffer_to_image_tensor"): caps_struct = caps.get_structure(0) - width, height = caps_struct.get_value('width'), caps_struct.get_value('height') + width, height = caps_struct.get_value("width"), caps_struct.get_value("height") is_mapped, map_info = buf.map(Gst.MapFlags.READ) image_array = None @@ -91,9 +97,7 @@ def buffer_to_numpy(buf, caps): if is_mapped: try: image_array = np.ndarray( - (height, width, pixel_bytes), - dtype=np.uint8, - buffer=map_info.data + (height, width, pixel_bytes), dtype=np.uint8, buffer=map_info.data )[:, :, :3].copy() finally: buf.unmap(map_info) @@ -104,27 +108,41 @@ def buffer_to_numpy(buf, caps): if __name__ == "__main__": argParser = argparse.ArgumentParser() argParser.add_argument("-i", "--input_file", help="input file path", default="") - argParser.add_argument("-d", "--detection_threshold", help="detection threshold", default=0.4, type=float) + argParser.add_argument( + "-d", + "--detection_threshold", + help="detection threshold", + default=0.4, + type=float, + ) args = argParser.parse_args() - device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') - detector = torch.hub.load('NVIDIA/DeepLearningExamples:torchhub', 'nvidia_ssd', model_math='fp32').eval().to(device) + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + detector = ( + torch.hub.load( + "NVIDIA/DeepLearningExamples:torchhub", "nvidia_ssd", model_math="fp32" + ) + .eval() + .to(device) + ) # Preprocessing - transform = T.Compose([ - T.ToPILImage(), - T.Resize((300, 300)), - T.ToTensor(), - T.Normalize(mean=(0.485, 0.456, 0.406), std=[0.229, 0.224, 0.225]) - ]) - - pipeline_definition = f''' + transform = T.Compose( + [ + T.ToPILImage(), + T.Resize((300, 300)), + T.ToTensor(), + T.Normalize(mean=(0.485, 0.456, 0.406), std=[0.229, 0.224, 0.225]), + ] + ) + + pipeline_definition = f""" filesrc location={args.input_file} ! decodebin ! nvvideoconvert ! video/x-raw, format={frame_format} ! fakesink name=fake_sink - ''' + """ print("--- PIPELINE DEFINITION ---") print(pipeline_definition) @@ -132,19 +150,25 @@ def buffer_to_numpy(buf, caps): pipeline = Gst.parse_launch(pipeline_definition) # Add probe to fake sink for capturing and processing frames - pipeline.get_by_name('fake_sink').get_static_pad('sink').add_probe(Gst.PadProbeType.BUFFER, - partial(on_frame_probe, - detector_in=detector, - transform_in=transform, - device_in=device, - detection_threshold_in=args.detection_threshold)) + pipeline.get_by_name("fake_sink").get_static_pad("sink").add_probe( + Gst.PadProbeType.BUFFER, + partial( + on_frame_probe, + detector_in=detector, + transform_in=transform, + device_in=device, + detection_threshold_in=args.detection_threshold, + ), + ) pipeline.set_state(Gst.State.PLAYING) try: while True: - msg = pipeline.get_bus().timed_pop_filtered(Gst.SECOND, Gst.MessageType.EOS | Gst.MessageType.ERROR) + msg = pipeline.get_bus().timed_pop_filtered( + Gst.SECOND, Gst.MessageType.EOS | Gst.MessageType.ERROR + ) if msg: - text = msg.get_structure().to_string() if msg.get_structure() else '' + text = msg.get_structure().to_string() if msg.get_structure() else "" msg_type = Gst.message_type_get_name(msg.type) print(f"{msg.src.name}: [{msg.type}] {text}") break diff --git a/gst-examples/gst-pytorch-example-1.py b/gst-examples/gst-pytorch-example-1.py index 86dbec0..ebe34c3 100644 --- a/gst-examples/gst-pytorch-example-1.py +++ b/gst-examples/gst-pytorch-example-1.py @@ -14,11 +14,13 @@ import time from functools import partial -gi.require_version('Gst', '1.0') -from gi.repository import Gst +gi.require_version("Gst", "1.0") +from gi.repository import Gst # noqa: E402 -frame_format, pixel_bytes, model_precision = 'RGBA', 4, 'fp32' -ssd_utils = torch.hub.load('NVIDIA/DeepLearningExamples:torchhub', 'nvidia_ssd_processing_utils') +frame_format, pixel_bytes, model_precision = "RGBA", 4, "fp32" +ssd_utils = torch.hub.load( + "NVIDIA/DeepLearningExamples:torchhub", "nvidia_ssd_processing_utils" +) start_time, frames_processed = None, 0 @@ -31,7 +33,9 @@ def nvtx_range(msg): torch.cuda.nvtx.range_pop() -def on_frame_probe(pad_in, info_in, detector_in, transform_in, device_in, detection_threshold_in): +def on_frame_probe( + pad_in, info_in, detector_in, transform_in, device_in, detection_threshold_in +): """ This function is called every time a new frame is available. :param pad_in: pad of the probe @@ -60,8 +64,10 @@ def on_frame_probe(pad_in, info_in, detector_in, transform_in, device_in, detect with nvtx_range("postprocessing"): results_per_input = ssd_utils.decode_results(detections) - best_results_per_input = [ssd_utils.pick_best(results, detection_threshold_in) for results in - results_per_input] + best_results_per_input = [ + ssd_utils.pick_best(results, detection_threshold_in) + for results in results_per_input + ] for bboxes, classes, scores in best_results_per_input: print(f"{bboxes=}") @@ -81,7 +87,7 @@ def buffer_to_numpy(buf, caps): """ with nvtx_range("buffer_to_image_tensor"): caps_struct = caps.get_structure(0) - width, height = caps_struct.get_value('width'), caps_struct.get_value('height') + width, height = caps_struct.get_value("width"), caps_struct.get_value("height") is_mapped, map_info = buf.map(Gst.MapFlags.READ) image_array = None @@ -89,9 +95,7 @@ def buffer_to_numpy(buf, caps): if is_mapped: try: image_array = np.ndarray( - (height, width, pixel_bytes), - dtype=np.uint8, - buffer=map_info.data + (height, width, pixel_bytes), dtype=np.uint8, buffer=map_info.data )[:, :, :3].copy() finally: buf.unmap(map_info) @@ -102,27 +106,41 @@ def buffer_to_numpy(buf, caps): if __name__ == "__main__": argParser = argparse.ArgumentParser() argParser.add_argument("-i", "--input_file", help="input file path", default="") - argParser.add_argument("-d", "--detection_threshold", help="detection threshold", default=0.4, type=float) + argParser.add_argument( + "-d", + "--detection_threshold", + help="detection threshold", + default=0.4, + type=float, + ) args = argParser.parse_args() - device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') - detector = torch.hub.load('NVIDIA/DeepLearningExamples:torchhub', 'nvidia_ssd', model_math='fp32').eval().to(device) + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + detector = ( + torch.hub.load( + "NVIDIA/DeepLearningExamples:torchhub", "nvidia_ssd", model_math="fp32" + ) + .eval() + .to(device) + ) # Preprocessing - transform = T.Compose([ - T.ToPILImage(), - T.Resize((300, 300)), - T.ToTensor(), - T.Normalize(mean=(0.485, 0.456, 0.406), std=[0.229, 0.224, 0.225]) - ]) - - pipeline_definition = f''' + transform = T.Compose( + [ + T.ToPILImage(), + T.Resize((300, 300)), + T.ToTensor(), + T.Normalize(mean=(0.485, 0.456, 0.406), std=[0.229, 0.224, 0.225]), + ] + ) + + pipeline_definition = f""" filesrc location={args.input_file} ! decodebin ! nvvideoconvert ! video/x-raw, format={frame_format} ! fakesink name=fake_sink - ''' + """ print("--- PIPELINE DEFINITION ---") print(pipeline_definition) @@ -130,19 +148,25 @@ def buffer_to_numpy(buf, caps): pipeline = Gst.parse_launch(pipeline_definition) # Add probe to fake sink for capturing and processing frames - pipeline.get_by_name('fake_sink').get_static_pad('sink').add_probe(Gst.PadProbeType.BUFFER, - partial(on_frame_probe, - detector_in=detector, - transform_in=transform, - device_in=device, - detection_threshold_in=args.detection_threshold)) + pipeline.get_by_name("fake_sink").get_static_pad("sink").add_probe( + Gst.PadProbeType.BUFFER, + partial( + on_frame_probe, + detector_in=detector, + transform_in=transform, + device_in=device, + detection_threshold_in=args.detection_threshold, + ), + ) pipeline.set_state(Gst.State.PLAYING) try: while True: - msg = pipeline.get_bus().timed_pop_filtered(Gst.SECOND, Gst.MessageType.EOS | Gst.MessageType.ERROR) + msg = pipeline.get_bus().timed_pop_filtered( + Gst.SECOND, Gst.MessageType.EOS | Gst.MessageType.ERROR + ) if msg: - text = msg.get_structure().to_string() if msg.get_structure() else '' + text = msg.get_structure().to_string() if msg.get_structure() else "" msg_type = Gst.message_type_get_name(msg.type) print(f"{msg.src.name}: [{msg.type}] {text}") break diff --git a/gst-examples/gst-qtdemux-h264-avdec_aac.py b/gst-examples/gst-qtdemux-h264-avdec_aac.py index e90a2b4..3721edc 100644 --- a/gst-examples/gst-qtdemux-h264-avdec_aac.py +++ b/gst-examples/gst-qtdemux-h264-avdec_aac.py @@ -15,12 +15,12 @@ import os import sys import signal -from helpers import * +from helpers import gsthelpers import gi -gi.require_version('Gst', '1.0') -from gi.repository import Gst, GLib, GObject +gi.require_version("Gst", "1.0") +from gi.repository import Gst, GLib # noqa: E402 class Player(object): @@ -57,14 +57,18 @@ def __init__(self): self.video_queue = gsthelpers.create_element("queue", "video-queue") self.h264_parser = gsthelpers.create_element("h264parse", "h264-parser") self.h264_decoder = gsthelpers.create_element("avdec_h264", "h264-decoder") - self.video_converter = gsthelpers.create_element("videoconvert", "video-converter") - self.image_sink = gsthelpers.create_element("xvimagesink", "image-sink") + self.video_converter = gsthelpers.create_element( + "videoconvert", "video-converter" + ) + self.image_sink = gsthelpers.create_element("autovideosink", "image-sink") # Audio pipeline self.audio_queue = gsthelpers.create_element("queue", "audio-queue") self.audio_decoder = gsthelpers.create_element("avdec_aac", "avdec_aac") self.audio_convert = gsthelpers.create_element("audioconvert", "audio-convert") - self.audio_resample = gsthelpers.create_element("audioresample", "audio-resample") + self.audio_resample = gsthelpers.create_element( + "audioresample", "audio-resample" + ) self.audio_sink = gsthelpers.create_element("autoaudiosink", "audio-sink") # Add elements to the pipeline @@ -85,18 +89,26 @@ def __init__(self): gsthelpers.link_elements([self.source, self.demuxer]) # Link video pipeline - gsthelpers.link_elements([self.video_queue, - self.h264_parser, - self.h264_decoder, - self.video_converter, - self.image_sink]) + gsthelpers.link_elements( + [ + self.video_queue, + self.h264_parser, + self.h264_decoder, + self.video_converter, + self.image_sink, + ] + ) # Link audio pipeline - gsthelpers.link_elements([self.audio_queue, - self.audio_decoder, - self.audio_convert, - self.audio_resample, - self.audio_sink]) + gsthelpers.link_elements( + [ + self.audio_queue, + self.audio_decoder, + self.audio_convert, + self.audio_resample, + self.audio_sink, + ] + ) # Connect demux to the pad-added signal, used to link demuxer to queues dynamically pad_added_functor = gsthelpers.PadAddedLinkFunctor() @@ -172,9 +184,11 @@ def stop_handler(self, sig, frame): self.stop() -if __name__ == '__main__': +if __name__ == "__main__": argParser = argparse.ArgumentParser() - argParser.add_argument("-i", "--input_file", help="path to the file to be played back", default="") + argParser.add_argument( + "-i", "--input_file", help="path to the file to be played back", default="" + ) args = argParser.parse_args() player = Player() diff --git a/gst-examples/gst-qtdemux-h264.py b/gst-examples/gst-qtdemux-h264.py index 75e3852..7f512dc 100644 --- a/gst-examples/gst-qtdemux-h264.py +++ b/gst-examples/gst-qtdemux-h264.py @@ -14,12 +14,12 @@ import os import sys import signal -from helpers import * +from helpers import gsthelpers import gi -gi.require_version('Gst', '1.0') -from gi.repository import Gst, GLib, GObject +gi.require_version("Gst", "1.0") +from gi.repository import Gst, GLib # noqa: E402 class Player(object): @@ -56,8 +56,10 @@ def __init__(self): self.video_queue = gsthelpers.create_element("queue", "video-queue") self.h264_parser = gsthelpers.create_element("h264parse", "h264-parser") self.h264_decoder = gsthelpers.create_element("avdec_h264", "h264-decoder") - self.video_converter = gsthelpers.create_element("videoconvert", "video-converter") - self.image_sink = gsthelpers.create_element("xvimagesink", "image-sink") + self.video_converter = gsthelpers.create_element( + "videoconvert", "video-converter" + ) + self.image_sink = gsthelpers.create_element("autovideosink", "image-sink") # Add elements to the pipeline self.pipeline.add(self.source) @@ -72,11 +74,15 @@ def __init__(self): gsthelpers.link_elements([self.source, self.demuxer]) # Link video pipeline - gsthelpers.link_elements([self.video_queue, - self.h264_parser, - self.h264_decoder, - self.video_converter, - self.image_sink]) + gsthelpers.link_elements( + [ + self.video_queue, + self.h264_parser, + self.h264_decoder, + self.video_converter, + self.image_sink, + ] + ) # Connect demux to the pad-added signal, used to link queue to parser dynamically pad_added_functor = gsthelpers.PadAddedLinkFunctor() @@ -152,9 +158,11 @@ def stop_handler(self, sig, frame): self.stop() -if __name__ == '__main__': +if __name__ == "__main__": argParser = argparse.ArgumentParser() - argParser.add_argument("-i", "--input_file", help="path to the file to be played back") + argParser.add_argument( + "-i", "--input_file", help="path to the file to be played back" + ) args = argParser.parse_args() player = Player() diff --git a/hailo-examples/README.md b/hailo-examples/README.md index 3c4d25a..d49024f 100644 --- a/hailo-examples/README.md +++ b/hailo-examples/README.md @@ -1,7 +1,7 @@ # Hailo Examples This directory contains some examples regarding how to use [Hailo-8](https://hailo.ai/products/hailo-8/) accelerator and Hailo [Tappas](https://github.com/hailo-ai/tappas). -Tappas can be used for creating image processing pipelines using GStreamer with Hailo accelerators. +Tappas can be used for creating image processing pipelines using GStreamer with Hailo accelerators. --- @@ -14,7 +14,7 @@ Following is a list of Hailo related examples: --- -# 3 How to Run the Examples +# 2 How to Run the Examples All the examples have been tested using the following: diff --git a/hailo-examples/tracking/README.md b/hailo-examples/tracking/README.md index 32e79cc..0275ba8 100644 --- a/hailo-examples/tracking/README.md +++ b/hailo-examples/tracking/README.md @@ -41,7 +41,7 @@ videoconvert n-threads=2 qos=false ! \ fpsdisplaysink video-sink=xvimagesink name=hailo_display sync=false ``` -If you want to display the video in normal speed, remove `sync=false` from the `fpsdisplaysink`. +If you want to display the video in normal speed, remove `sync=false` from the `fpsdisplaysink`. ## Running the Example with Filesink diff --git a/helper-package/src/helpers/__init__.py b/helper-package/src/helpers/__init__.py index c0a2aed..fb15c37 100644 --- a/helper-package/src/helpers/__init__.py +++ b/helper-package/src/helpers/__init__.py @@ -1 +1 @@ -__all__ = ['gsthelpers'] +__all__ = ["gsthelpers"] diff --git a/helper-package/src/helpers/gsthelpers.py b/helper-package/src/helpers/gsthelpers.py index 6be719d..73ffe3e 100644 --- a/helper-package/src/helpers/gsthelpers.py +++ b/helper-package/src/helpers/gsthelpers.py @@ -1,7 +1,7 @@ import gi -gi.require_version('Gst', '1.0') -from gi.repository import Gst, GLib, GObject +gi.require_version("Gst", "1.0") +from gi.repository import Gst # noqa: E402 def create_element(gst_elem: str, name: str): @@ -35,14 +35,23 @@ def link_elements(elements: list) -> None: assert len(elements) >= 2 for idx, x in enumerate(elements[:-1]): - print(f"Linking element {elements[idx].get_name()} -> {elements[idx + 1].get_name()}...", end="") - assert isinstance(elements[idx], Gst.Element), "elements[idx] must be of type Gst.Element" - assert isinstance(elements[idx + 1], Gst.Element), "elements[idx+1] must be of type Gst.Element" + print( + f"Linking element {elements[idx].get_name()} -> {elements[idx + 1].get_name()}...", + end="", + ) + assert isinstance( + elements[idx], Gst.Element + ), "elements[idx] must be of type Gst.Element" + assert isinstance( + elements[idx + 1], Gst.Element + ), "elements[idx+1] must be of type Gst.Element" if elements[idx].link(elements[idx + 1]): print("done") else: print("failed") - raise RuntimeError(f"Failed to link: {elements[idx].get_name()} -> {elements[idx + 1].get_name()}") + raise RuntimeError( + f"Failed to link: {elements[idx].get_name()} -> {elements[idx + 1].get_name()}" + ) class PadAddedLinkFunctor: @@ -55,7 +64,9 @@ class PadAddedLinkFunctor: def __init__(self): self.connections = [] - def register(self, new_pad: str, target_element: Gst.Element, target_sink_name: str) -> None: + def register( + self, new_pad: str, target_element: Gst.Element, target_sink_name: str + ) -> None: """ Registers linking information indicating how new pads should be linked to subsequent elements. @@ -74,8 +85,12 @@ def register(self, new_pad: str, target_element: Gst.Element, target_sink_name: """ assert isinstance(new_pad, str), "'new_pad' must be of type str" - assert isinstance(target_element, Gst.Element), "'target_element' must be of type Gst.Element" - assert isinstance(target_sink_name, str), "'target_sink_name' must be of type str" + assert isinstance( + target_element, Gst.Element + ), "'target_element' must be of type Gst.Element" + assert isinstance( + target_sink_name, str + ), "'target_sink_name' must be of type str" self.connections.append((new_pad, target_element, target_sink_name)) @@ -104,11 +119,16 @@ def __call__(self, element: Gst.Element, pad: Gst.Pad) -> None: target_sink_name = self.connections[index][2] sink_pad = target_element.get_static_pad(target_sink_name) - assert sink_pad, f"'{target_element.get_name()}' has no static pad called '{target_sink_name}'" + assert ( + sink_pad + ), f"'{target_element.get_name()}' has no static pad called '{target_sink_name}'" if not sink_pad.is_linked(): - print(f"Linking '{element_name}:{pad_name}' \ - -> '{target_element.get_name()}:{sink_pad.get_name()}'...", end="") + print( + f"Linking '{element_name}:{pad_name}' \ + -> '{target_element.get_name()}:{sink_pad.get_name()}'...", + end="", + ) ret = pad.link(sink_pad) if ret == Gst.PadLinkReturn.OK: print("done") @@ -116,6 +136,8 @@ def __call__(self, element: Gst.Element, pad: Gst.Pad) -> None: print("error") elif len(index) > 1: - raise RuntimeError(f"Pad '{pad_name}' corresponds to several link-definitions, cannot continue") + raise RuntimeError( + f"Pad '{pad_name}' corresponds to several link-definitions, cannot continue" + ) else: return