From 282c3e0c016e0e055fb0df344d2c524e03a154ef Mon Sep 17 00:00:00 2001 From: Jarno Ralli Date: Mon, 17 Feb 2025 21:26:53 +0200 Subject: [PATCH] Feature/triton jetson support (#23) * Testing Jetson support in gst-triton-parallel-tracking-v2.py * Added queues * triton parallel tracking v1 now uses nvmultiurisrcbin, gst-tracking-v2 uses nvurisrcbin * Fix for writing out the file * Added examples using gst-launch-1.0 * Run pre-commit * Examples & improvements --- .gitignore | 6 + deepstream-examples/README.md | 14 +- .../deepstream-tracking-parallel/README.md | 86 ++- .../figures/multi_input_pipeline.drawio | 166 ++++++ .../figures/multi_input_pipeline.png | Bin 0 -> 70563 bytes .../deepstream-tracking/README.md | 39 +- .../config_tracker_NvDCF_perf_uniqueid.yml | 74 +++ .../dstest2_tracker_config.txt | 4 +- .../deepstream-tracking/gst-tracking-v2.py | 364 +++++++----- .../deepstream-triton-tracking/README.md | 98 +++- .../gst-triton-parallel-tracking-v1.py | 551 +++++++++--------- .../gst-triton-parallel-tracking-v2.py | 151 +++-- docker/README.md | 69 ++- gst-examples/gst-rtsp-server.py | 181 ------ gst-examples/rtsp-server/Dockerfile | 22 + gst-examples/rtsp-server/README.md | 76 +++ gst-examples/rtsp-server/docker-compose.yml | 10 + gst-examples/rtsp-server/rtsp-server.py | 164 ++++++ helper-package/src/helpers/gsthelpers.py | 26 +- 19 files changed, 1405 insertions(+), 696 deletions(-) create mode 100644 deepstream-examples/deepstream-tracking-parallel/figures/multi_input_pipeline.drawio create mode 100644 deepstream-examples/deepstream-tracking-parallel/figures/multi_input_pipeline.png create mode 100644 deepstream-examples/deepstream-tracking/config_tracker_NvDCF_perf_uniqueid.yml delete mode 100644 gst-examples/gst-rtsp-server.py create mode 100644 gst-examples/rtsp-server/Dockerfile create mode 100644 gst-examples/rtsp-server/README.md create mode 100644 gst-examples/rtsp-server/docker-compose.yml create mode 100644 gst-examples/rtsp-server/rtsp-server.py diff --git a/.gitignore b/.gitignore index 7f78a6e..e7024e3 100644 --- a/.gitignore +++ b/.gitignore @@ -134,6 +134,9 @@ dmypy.json # mp4 video files *.mp4 +# mkv video files +*.mkv + # Gaphviz dot files *.dot @@ -149,3 +152,6 @@ dmypy.json # VS code .vscode/ + +# TensorRT Engine files +*.engine diff --git a/deepstream-examples/README.md b/deepstream-examples/README.md index ade952e..6cd4869 100644 --- a/deepstream-examples/README.md +++ b/deepstream-examples/README.md @@ -15,14 +15,14 @@ List of examples: * [deepstream-tracking](deepstream-tracking/README.md) * 4-class object detector with tracking - * Tested with deepstream 6.1 + * Tested with deepstream 6.3 * [deepstream-tracking-parallel](deepstream-tracking-parallel/README.md) * 4-class object detector with tracking * Splits the input stream into two and runs two pipelines on the split streams * Tested with deepstream 6.1 * [deepstream-triton-tracking](deepstream-triton-tracking/README.md) * 4-class object detector with tracking, uses local version of the Triton Inference Server for inference - * Tested with deepstream 6.1 + * Tested with deepstream 6.3 * [deepstream-retinaface](deepstream-retinaface/README.md) * RetinaFace bbox- and landmark detector * Uses a custom parser called [NvDsInferParseCustomRetinaface](src/retinaface_parser/nvdsparse_retinaface.cpp) @@ -172,7 +172,7 @@ After this you can create the docker image used in the examples. ```bash cd gstreamer-examples/docker -docker build -t nvidia-deepstream-samples -f ./Dockerfile-deepstream . +docker build -t deepstream-6.3 -f ./Dockerfile-deepstream-6.3-triton-devel . ``` ## 4.2 Test the Docker Image @@ -202,7 +202,7 @@ docker run -i -t --rm \ -e DISPLAY=$DISPLAY \ -e XAUTHORITY=$XAUTHORITY \ -e NVIDIA_DRIVER_CAPABILITIES=all \ - --gpus all nvidia-deepstream-samples bash + --gpus all deepstream-6.3 bash ``` Then execute the following inside the container: @@ -252,9 +252,9 @@ docker run -i -t --rm \ -e DISPLAY=$DISPLAY \ -e XAUTHORITY=$XAUTHORITY \ -e NVIDIA_DRIVER_CAPABILITIES=all \ - --gpus all nvidia-deepstream-samples bash + --gpus all deepstream-6.3 bash cd /home/gstreamer-examples/deepstream-examples/deepstream-tracking -python3 gst-tracking.py -i /opt/nvidia/deepstream/deepstream-6.1/samples/streams/sample_1080p_h264.mp4 +python3 gst-tracking.py -i /opt/nvidia/deepstream/deepstream/samples/streams/sample_1080p_h264.mp4 ``` When starting the Docker container with the above command, the switch `-v $(pwd):/home/gstreamer-examples` maps the local directory `$(pwd)` @@ -302,7 +302,6 @@ pip3 install pyds-1.1.4-py3-none-linux_x86_64.whl Replace `pyds-1.1.4-py3-none-linux_x86_64.whl` with the version that you downloaded. - ## 5.3 Install Triton Inference Server Before executing those examples that use Triton, you first need to install it locally. First install the following package(s): @@ -372,7 +371,6 @@ cd /opt/nvidia/deepstream/deepstream/samples ./prepare_ds_triton_model_repo.sh ``` - ## 5.6 Testing Triton Installation Test that the `nvinferenceserver` plugin can be found diff --git a/deepstream-examples/deepstream-tracking-parallel/README.md b/deepstream-examples/deepstream-tracking-parallel/README.md index 2fe31bf..c30d73b 100644 --- a/deepstream-examples/deepstream-tracking-parallel/README.md +++ b/deepstream-examples/deepstream-tracking-parallel/README.md @@ -1,4 +1,4 @@ -# Deepstream Tracking +# 1 Deepstream Parallel Tracking This example shows to split an input stream into two, using a tee-element, so that two different image processing pipelines can process the same stream. This example processes the split streams using the same inference elements, but they can be different for each stream. It appears that you need to add an @@ -9,15 +9,15 @@ nvstreammux-element into both of the processing streams, after the tee-element, * PGIE_CLASS_ID_PERSON = 2 * PGIE_CLASS_ID_ROADSIGN = 3 -# Pipeline +# 2 Pipeline Pipeline description. ![Image of the pipeline](./gst-tracking-parallel.pdf) -# Processing Pipeline Configurations +# 3 Processing Pipeline Configurations -## Pipeline 1 +## 3.1 Pipeline 1 Configuration files for the inference- and tracker elements: @@ -32,7 +32,7 @@ Configuration files for the inference- and tracker elements: * Tracker * Configuration file: [tracker_config_1.txt](tracker_config_1.txt) -## Pipeline 2 +## 3.2 Pipeline 2 Configuration files for the inference- and tracker elements: @@ -47,7 +47,7 @@ Configuration files for the inference- and tracker elements: * Tracker * Configuration file: [tracker_config_2.txt](tracker_config_2.txt) -## Requirements +## 3.3 Requirements * DeepStreamSDK 6.1.1 * Python 3.8 @@ -57,7 +57,7 @@ Configuration files for the inference- and tracker elements: * gstreamer1.0-plugins-bad * gstreamer1.0-plugins-ugly -## How to Run the Example +## 3.4 How to Run the Example In order to get help regarding input parameters, execute the following: @@ -84,3 +84,75 @@ The dot file can be converted into pdf as follows: dot -Tpdf -o output.pdf ``` +# 4 Test Pipelines + +Following are test pipelines that can be launched with `gst-launch-1.0`. Requirements for running the pipelines: + +* Deepstream 6.3 + +## 4.1 Processing Several Streams Using a Single Pipeline + +Figure 1. shows the pipeline. We connect several streams to a single processing pipeline. I have omitted some details +from the pipeline so that it fits better on the screen. + +
+ +
Figure 1. Parallel processing of several streams with a single pipeline.
+
+ +### 4.1.1 Processing Pipeline with 4 Input Streams with Video- and Filesinks + +```bash +gst-launch-1.0 \ +nvurisrcbin uri=file:///opt/nvidia/deepstream/deepstream/samples/streams/sample_1080p_h264.mp4 ! queue ! m.sink_0 \ +nvurisrcbin uri=file:///opt/nvidia/deepstream/deepstream/samples/streams/sample_1080p_h265.mp4 ! queue ! m.sink_1 \ +nvurisrcbin uri=file:///opt/nvidia/deepstream/deepstream/samples/streams/sample_1080p_h264.mp4 ! queue ! m.sink_2 \ +nvurisrcbin uri=file:///opt/nvidia/deepstream/deepstream/samples/streams/sample_1080p_h265.mp4 ! queue ! m.sink_3 \ +nvstreammux name=m width=1280 height=720 batch-size=4 ! nvinfer config-file-path=dstest2_pgie_config.txt batch-size=4 \ +model-engine-file=/opt/nvidia/deepstream/deepstream/samples/models/Primary_Detector/resnet10.caffemodel_b4_gpu0_int8.engine \ +! queue ! \ +nvtracker tracker-width=640 tracker-height=480 ll-config-file=config_tracker_NvDCF_perf_uniqueid.yml \ +ll-lib-file=/opt/nvidia/deepstream/deepstream/lib/libnvds_nvmultiobjecttracker.so ! queue ! \ +nvmultistreamtiler rows=2 columns=2 width=1280 height=720 ! queue ! \ +nvdsosd ! tee name=t \ +t. ! queue ! nvvideoconvert ! 'video/x-raw(memory:NVMM), format=NV12' ! nvv4l2h264enc profile=High bitrate=10000000 ! h264parse ! matroskamux ! \ +filesink location=4_stream_output.mkv \ +t. ! queue ! nvvideoconvert ! nveglglessink +``` + +### 4.1.2 Processing Pipeline with 20 Input Streams with Video- and Filesinks + +```bash +gst-launch-1.0 \ +nvurisrcbin uri=file:///opt/nvidia/deepstream/deepstream/samples/streams/sample_1080p_h264.mp4 ! queue ! m.sink_0 \ +nvurisrcbin uri=file:///opt/nvidia/deepstream/deepstream/samples/streams/sample_1080p_h265.mp4 ! queue ! m.sink_1 \ +nvurisrcbin uri=file:///opt/nvidia/deepstream/deepstream/samples/streams/sample_1080p_h264.mp4 ! queue ! m.sink_2 \ +nvurisrcbin uri=file:///opt/nvidia/deepstream/deepstream/samples/streams/sample_1080p_h265.mp4 ! queue ! m.sink_3 \ +nvurisrcbin uri=file:///opt/nvidia/deepstream/deepstream/samples/streams/sample_1080p_h264.mp4 ! queue ! m.sink_4 \ +nvurisrcbin uri=file:///opt/nvidia/deepstream/deepstream/samples/streams/sample_1080p_h265.mp4 ! queue ! m.sink_5 \ +nvurisrcbin uri=file:///opt/nvidia/deepstream/deepstream/samples/streams/sample_1080p_h264.mp4 ! queue ! m.sink_6 \ +nvurisrcbin uri=file:///opt/nvidia/deepstream/deepstream/samples/streams/sample_1080p_h265.mp4 ! queue ! m.sink_7 \ +nvurisrcbin uri=file:///opt/nvidia/deepstream/deepstream/samples/streams/sample_1080p_h264.mp4 ! queue ! m.sink_8 \ +nvurisrcbin uri=file:///opt/nvidia/deepstream/deepstream/samples/streams/sample_1080p_h265.mp4 ! queue ! m.sink_9 \ +nvurisrcbin uri=file:///opt/nvidia/deepstream/deepstream/samples/streams/sample_1080p_h264.mp4 ! queue ! m.sink_10 \ +nvurisrcbin uri=file:///opt/nvidia/deepstream/deepstream/samples/streams/sample_1080p_h265.mp4 ! queue ! m.sink_11 \ +nvurisrcbin uri=file:///opt/nvidia/deepstream/deepstream/samples/streams/sample_1080p_h264.mp4 ! queue ! m.sink_12 \ +nvurisrcbin uri=file:///opt/nvidia/deepstream/deepstream/samples/streams/sample_1080p_h265.mp4 ! queue ! m.sink_13 \ +nvurisrcbin uri=file:///opt/nvidia/deepstream/deepstream/samples/streams/sample_1080p_h264.mp4 ! queue ! m.sink_14 \ +nvurisrcbin uri=file:///opt/nvidia/deepstream/deepstream/samples/streams/sample_1080p_h265.mp4 ! queue ! m.sink_15 \ +nvurisrcbin uri=file:///opt/nvidia/deepstream/deepstream/samples/streams/sample_1080p_h264.mp4 ! queue ! m.sink_16 \ +nvurisrcbin uri=file:///opt/nvidia/deepstream/deepstream/samples/streams/sample_1080p_h265.mp4 ! queue ! m.sink_17 \ +nvurisrcbin uri=file:///opt/nvidia/deepstream/deepstream/samples/streams/sample_1080p_h264.mp4 ! queue ! m.sink_18 \ +nvurisrcbin uri=file:///opt/nvidia/deepstream/deepstream/samples/streams/sample_1080p_h265.mp4 ! queue ! m.sink_19 \ +nvstreammux name=m width=1280 height=720 batch-size=20 ! nvinfer config-file-path=dstest2_pgie_config.txt \ +batch-size=30 \ +model-engine-file=/opt/nvidia/deepstream/deepstream/samples/models/Primary_Detector/resnet10.caffemodel_b30_gpu0_int8.engine ! queue ! \ +nvtracker tracker-width=640 tracker-height=480 ll-config-file=config_tracker_NvDCF_perf_uniqueid.yml \ +ll-lib-file=/opt/nvidia/deepstream/deepstream/lib/libnvds_nvmultiobjecttracker.so ! queue ! \ +nvmultistreamtiler rows=5 columns=4 width=1280 height=720 ! queue ! \ +nvdsosd ! tee name=t \ +t. ! queue ! nvvideoconvert ! 'video/x-raw(memory:NVMM), format=NV12' ! nvv4l2h264enc profile=High bitrate=10000000 ! h264parse ! matroskamux ! \ +filesink location=20_stream_output.mkv \ +t. ! queue ! nvvideoconvert ! nveglglessink +``` + diff --git a/deepstream-examples/deepstream-tracking-parallel/figures/multi_input_pipeline.drawio b/deepstream-examples/deepstream-tracking-parallel/figures/multi_input_pipeline.drawio new file mode 100644 index 0000000..82938fb --- /dev/null +++ b/deepstream-examples/deepstream-tracking-parallel/figures/multi_input_pipeline.drawio @@ -0,0 +1,166 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/deepstream-examples/deepstream-tracking-parallel/figures/multi_input_pipeline.png b/deepstream-examples/deepstream-tracking-parallel/figures/multi_input_pipeline.png new file mode 100644 index 0000000000000000000000000000000000000000..bc2939e9a1e0315b90720136410cd6587d60b6af GIT binary patch literal 70563 zcmeFZ1yogOzc{RbC?!Z9KvF>^4~>8Vf`WvANOyNPNOyxuNeZG!hjdG~bT>$MN`Ctg zqXWa8`QQ89`@Z8lYq?y9XYZ$eo!e9R84nUX4*aD{myn+FazDRx2^Mte(q*k{u)v6Z z1F_DfOA5HAk0nj@EY-Di)h?0HaUK66p#^Iin3$5#ag)%}Dj6D5scI={7%S6=*sW0uT} z3XJr}W5(trbeuGFz_fe76CO1c9Rpw$S}+Y5_(uZ_J<&2&Q#SynJvn;ocx!VtV-qa{ z{o~WoGE#x5Xy}fgn5Zb}svVC5vmwV+HZWFIGd>;y&PGc@_m~9C2|R%OqvJZc7x0I^ zlHSQoVc=?%C)2f5kEc?KYAUGH7+OAOlhuxAni?4B znra#Teo)0gUtdk-1UV-wDj6FaSp5Dqbpzd#+nu~)2wePkt3w7v&w&&&Dxsxns(JD* z6ZqtOkO#bKS{jDwm*CYnmB1{TK)91Z=9mSaGkg45W*09fxQto{7> z^d&t@E;U^Uv`+zfvJXHw|L=2u1BdZ%B>q2HPFbAk$+P>?g4TRGR^rV1G?M%}W~cOJ zuB2;r0&{(HGh;0iV-;mB{bQh+m|C3@v$27hzN#7oTM)*w(9|+j6E##)fy}f3Xbl+C zG}Y5Rc?__!E|-C>!74bJQ zep?vw=3hZC@XiT!{MUzv$i6c~^oN)O_C12u56l^vsR2kl8vV6!MsxpHLE4Fa_=(-l z;Y&?ll@lT)0WMO}RWdQrQu#ew{op}BaXeD9)H0PkVK~TdDd0Dlis9r5H-zXQ5EjF>Cy4nvUuJK4!^u8tFP!u76)mcRq%qZw@5R)Rgq}AlYk(1@Tij zwf ze@gME((8ZUHTZRsJT(8-{y6PXLiXR4?M_U*f7tFg^Z5_k0d)a?wFu=Te*Tk3{_mse zsF?n(+3v(j_=oNOVLO0}j{f~+wmXUF{cG9oUt14!6i@oc20SL5i~ffY)S23%vyK)m zWbV%q>A$Vh47mm5B0q73zPXmZI#73gH2!Nj%nYY^`};f0wCBqr_03IduGt_09FnfM$T>Xq73@&+$vLAM10%$uQ zBQ{n0HS7PuzfN$N_I#z*ubT&@WjYDVopRf$(ER%a^=Z!s^&!2ACR+MBzoML^q5p+Y zcRn`!pTlyrr&YCoKbAW^`A`1be-z98ol*Bk0V+mapmFPwy1`MW(+_8y(Z~$aS$p(= z(&VVy7}6L`OKS*puL0fLr_X_38j$y>sHlLSM{5C%MaRn>KRG9efj&~mjGvlPm2|Z< zARXH(KoS(vkNpVJZK?(I7IU6F(9=>?g|xf=VR!3~JUgUy?s&WZ-F8(Pdhi()N^@eZ z|HRLy?&Qz4{HM%wnoRuHH>}c~56_%K*uN|-e%vzo@0y{bJ)wa=%@rJD{qG(XrJ=e1 zd!qk8NR89b(x2J$mnkNqJ0D*AUtzrCbQ{ow{G)#u@(0BB|E<&b@78uqd*WpODd!!t z=-;37PJ6&kJhuN6&O7xAPxt(FIPZLP^nZo(j_XVQQ#tP_qVf;to#c@JA5wupgWO5( z@7LqJ^D*uJ70&w=zqIj}Ro;<7|95A+vw7721C7VP^vkl{`JBW*YAo+S0ge zpO{$Tnt93Oil6K@8r+JJWicO?2|00Ua~^Fm0%DTGGb1(0I7yU0xc|{=in$Ja;Q9M# zPLVfoQKpB#EIL`n%0tO~!6J&`mV2?8q$XVjlK9Zdy6}aiGN>J=#_8TUIG&s;9EL4c zC5_}+9~dROfqp8Jm$7xmHeI$$eJ!wRTHYe`9xi(WQY5y7b&bb^Y&zD|`A>8$@BRoc zko6*Njbu!2&sMH=3&p)`<(QInjDJgNI5LlkUcca8)|OUKbe-r*Ty{K zr_fe6&fxdtT=2q#a!~l?L~*G66XgC*B~@)^P{^5n9PfE=Mi$+4^UL1JFKTP&;&J}# zi;Z_}JWC!QM(r&!`7j5AuZri`lLyj;SMU(R@iDTBckbG{JYGLJPn91NW;FQzll*J~1kx**!RpiqN@p{Xr?w?x*+090IN|f2Il#eoONASgWWhRx zDT3n*<;+F0#_H9OCR{I~JV*K~p^Ch(Yx}N-GN3fVIA@QOru2nlNN9C>vw!^UG-oa= zd#4P>c`jKn|CJPUxd1q{ZIG%kLCR<{6L$5{nOA??m*Xvz0Qmh=)dlEAoNMY%<$Xr4 z#TS~ql0MH?MT=epg;X>}r*gBd53+U1j*6rpff@Y}xsK#KV<~7Wq6c=?RMto zWpyMNx^l&Zyr)T<>Q!RW8)GdOQhGNWxO{{~M>JKnivx^?_`x){fl6CkHb2?1=t4dV z_;3jgQ43XS_tFu#)6ECoi`it2k)>F#OkD&8hdG!~MZ?JKdlIxqcoi+^SHof;PnGTB zkoWjmlKs2@uc!l@8@*?;t-}$xC0X^`5{spbg2aorO)i2$nHd1?yym_)HQW%f=Y*wn)@OGdVjABhU+bN*Tb?Sa3|w2X>|@I=kX=U@7=#h+KjLO;Fi_Us}DNH z>zhUM-p5T|EJg-<5^=Gl+iH1`g4hZR8^Jt_wQ!Z#0M*0V+Z*;xxy2-pjovQxnZA>KX&m#ferwsR2`zU zi@`*qsO>ab*@^}l(6|PVanD6BJolYuM(nm_8PD?S>X(m$J7c+nb7K2T1NqL`;*p=RLF?J3Cq?5S=#VhQ;htH+9Vrk#XGK?Yk+}xm8)lORMNUOotni%2c#G z9F;6xmA6V6*7HSX-@n9zj8|3b7$=;sP2!?^t=!2BA>Dcg=Oyco%z#h6r?v0gyW#Yb z*&?$s`!Mdzcg+_?jZ9{i_=$t%D_u?OlOC4bFoWj2 zRXskXMkmHvvFW1J7?4qCABD$;OZU+u+R8FUq}^@$yOq#NG=75cd*aQgq40=-S6N!- zQ3Ib2ibQXhJh6L-DnYIAI7pjX@vDS$KtEHi=y=)mmq-kWC*7|nFO-MPXy6Cs%fl>b zo}7Djn*E=UKKMjCeHD~hX@7-n`Dv`-+QH=2H=ESHUwz+!4kx=M^A4~~wKwDw;oJ&* z<*PLm?A&hNHz6FENhB*zSF6Aj*jLB-K#f@KJB_O;pL^w-w`$dby+NE{R?c)+A#Pvs z7GrY+i?c;|u0(eTbO}Mx8W9eop2N#4tM3f9?6yg_b&5KcNd&^dNID^hUh(!6oLlAu zYypbb5^gwTyslAPN-duLnt99IlXLQo`V;0fnZq{D>)t^SH-1{N^6DcU?AOnMI=XzCRJUh`xp=YK#{c;h9 zv_n>hk?3fMOtEodV{?UJuZ1(}*FfyV*KXW-rqtHxnR@MNMe1mB9&NG{#f2ipu)tvk z&MkODp{8qDShXb(&mM0Vp_pY$9C}M(9iKv-9{on0%QFz^5C!$T1Poq5wh&!8Sf$l} znR+U1#hDnkW!hw|^aw+UzP=b| zM>ThASdrc0+fDU-Fz#AcqnJH)h+^V~Yz*IX9~}A}N0T0Ev%Qk5SVocw`+$BGhQAF@d^l3MsDZvpdyL$fpsMdIh!7Z5~1?1nrt=8b=(z2 zGZAlk#!&wCu;SpZNaFjq*~IKgA<5M~OBxAb;CoGqk~xalT@-S@T5BIanGjd`c^qdv8c#T`ERET72pU>6z38=dhSG|larj7TxEiFYsZ@Y zZMLr4LuZ@uiHz6N?Hi&L!$kskhmX^!g$o+#v>B`R3G%O2xMkzspx2XKRbWc%hJxm? z^V1qWSmtoz?2>D?42WjIkbiDaUZ~>4nX^>r>uk93?eU5Ne(ho;bncjbta9tD<@et1 zjq zhJ|MnyCgdG%<6Wza9tX$`Qv%iEK^dI=JotKnpdXf004>Y%lCqpUj?Z;mD9>qd0hau zGA*bNEsEQcx6#GDa`z^u4q%)IO!LjZvw(K@K>`QX@amL@Qh2w}5b+8uIDA<;JEL)5 zXn81&yOZ7lb?Ga-W{Zh_qv%n9AHQi90DXAl4)NFB{fDzDYhxtdN%8)w&pGT!ecn10 zDw}_~ql{pCETJ(J_ zaoqGNn3>W#;98o%P+qv|QPK`{gJ7Ix7qy3ctU@!kdBhB`sg2ovNpP`} zU@Cf)#=$mK6dPIbYaz<55mjX6UkmvSiYS67wA+e2cX~H;x{W5=FtVB4Bg)h7C^Hq& z+Kle?t|`NPcyf)Ez|A!ab8OkM0z_-RTr7#YVy?J!nT=q%Xsw_Qjs(zG?$Z0hJ-IGu zd>VllDmDz5&w^myBKb%gB3TNRjt}E95EWU!7UxGWUiT3`cIj)$wO)`@s$RjTVw_j% zU^c39utD`p-9dD+Az^~c_rp0!@2Q3zw0vg=AOT%EIDEj7@@eUH|5bvLe&~&=CoQx} zglIR6Blr50>83_J%e9|43ox@4)n)m?xj71cWG2BJRD-R^woiR0(kTJ9ZoAu|C45!1 zH0o{9Rjx;mh=+9dV8!+1T-lV>1*W>>0s=x~P!VZqXg0z>h0v?`-5TN8i!tWksS6V8Gu#J={K4<#9OpTOkS6NQutGZVgCLSN-rk8pG&RdYvXisb0YcV%-wSej>)eKbkvOTHa2F z#+WkvK3J;YHhVYbQ<)i0f$-#=2q3MtJg8^cYUG#|Nx$P z+C_$8Ksm(JUAt}}?xeIosvw-A>O6ssfJ|4w<)qZOrSML5=9NA^(Ix}+z0_FcG-AsJ zUize->+uNU>1p7o#}}EXlFtDjhu>UghkPL!hD(|u-P&n%AZ+dKm;`f*x_VFaFo8tP zt%`KzLxGGL6Z>uT(%ka<>=)jB7XdK)zu5wQPq;<;(1rqOu_X96VQ=wZ63|ohlR4i_54&J)Z4bmUw>qQ)cE;0v zz6MOR7+X@buH}6}DS{QWrycf!PCob|U%)U3a5_?7G0w06eizKrw-CX5sm%)hC{Nu) zwAtOhlvEAMIyS}qvE9v4jS09@O}o)>uuMyeuiu79%Pz3udg0wDjZbh^X_7-zZ{ z8~5lQ7-3-8V3aGfyQyRJy)!Le-FXqkaA6Pv@dj`dlqa?bPa%tFx120eY-Gkpi%7oN z@*@1X;;9G;t`9fc0-*I{r6;YmS0n4L*%GGk=xl{nn8a6J9m4>q)6 zbhs`eVdmR__ssQjCtA0GBRw!h$*G)IaKAp2AvJg&>4_i3Lruw?AUj_r-``y4v)C*I z7%SboiRDf&3ySWGhEPQ?-}4q@UEd_u6{Aq9O_c=cSSqsboo!hn$ru-Lr{ZgXju?%k zmK0^KI{9i7p&>qbIxFS(z{u>t+J%p{b{_=4i`ku}bnyxS`Vc=`P^}4i0s9(bY`g#% z^e({wd$YQ;Bx4uh@EH%h2-E$q}yZXhoccr>KDNls8ic zwgi>CMfnw9ucYxnCCLo?4}I&sOg8dZ9T6cvyR^Tf03Y<_o|n2}W3CBwcNOTr)*4F% z!$Mtc*5_0TWKLB3LQ5~>mj*k|=S*n$2*bxGDF98!aF&m`Pke61eBA=JmdR??bOpg zdD59op!=EIF-pDS&(hebFN!9@gYJv~v0s~J7e$RqSeTt$aH?vsYGPUN^5biF0m-+o z9VU)k9i+AXtT3H!Z1&m9n;*RQL5W_LDXPjvEJ(grE(jBq30`>t7{Pj@i=UQjCtPV= zdOz;mg|#U0XiF`{e->84`%A`__v1UX#?$cT6yN zQs=D#n^^DU(_?t}dULL)B<0;5y2Nffc?~DXhKqtuRJ5?~G-fR>o5K3}5>0MgYmxCC z9mkBEi!_e|l>cqed&hworuhML7li@$gr20ld$4=kt)hfH!LqfHl+;m_k~c8C#hKK& zrq=7hscqzH2S)1EoyiO1BE-?`+QHho4`Fo9V>q3QPz!NFbHa|>6N^hlpQl#@UdYJR z;U@G-Qxx)n&_n-KSA`C1-6jnD)I8WFWQn`DCcf(f1pb0xkcn0BBS2J^nO_d;K*S7x z$aG`{yvZ%T6vcT?gn~sBe;?e>WAX5`4UFQylQM>aZHx1fir`7ys`uz{S5Y~V zqo5%I@#*-(Bo*sLWKvLa=f`BqE3__UE4AR{9(GZ^oq6(Lt#S8rd>m7FY3p4_v3px; zbkgQKfkM(n3z#M3xh*Zet*dB=T}v`K4p92{P>hw4p2;1F{J~6HUG%eZCNHGy%b;jr z9Ua|XbmFW$QiDUvedAG~+m$t=8)R~{igS_eszU_LMhtNs?n}V&8mF(wzlK8R4Jmt7 zTwWVkbrq>0hp&5u?Ytka=vEd*N7RFzaoO>JvN4Y>Z3f1Xa?~3|7!dutoyL!5u#qXzKrMU~p&OnSp=i&2YL8#zL2`2b*_A&)r2czAIKel4MEjF zu758re4cL^M4i~JGMOeaT#{FQz;Ab+T??)WAmjO>ey@*l%>G@q+(>Uy?xJsf#b3c0!+%1Gs^)bz=t2R zaliDj6Qkf_ISY;=tUPt5>Btau*m93F#AK$NIRK0bsK_cZz!}+sQP(4&9)e%tX?pJa z6hhd)k12e8qk)9_E(v6U{v zH#Yo|d1m)BCs1*(O*$XGZi#z2ij_TN>Sm-C$QoK~LUERhi$FizzbWkPoEm--HC2A;#u;-T8}blGMul8^VE z5%(c)FDy%kpGKF+6!~Gp*QYaP5}t)&j6(|4O^$4_M(G;hN|N>~$3T3IhWJCReJyTI zR$#f3<3jz2!?Pe^UV=PQ17r^xueTJ<;_o0zeTBH_al6kngMdry##E4ik4e_9OZT~4 zi^&yc2kq3-th8nrXO%BS>^Ili_jQ95J^YG;nYAi7I;h$s8i;3xR2VyisC2Tv*D9N) zu{OT(jh=LC_94e*X(V=kizk6hCzof(r@O=Ue4z%2F62E%ml8aCCBr)qvJf?R*oO(& zPjd=G1b*LV>~eq@+x-%jmA#F2z{n8Z+R3zIgulsLc*WItmneNbi7{$sd(_zXWp&WF zA4{;Jy6l5ymEm6Dd5>+pBkx@Dn=irHonjowflJNu>2)i30|;ugvS>fLD>f#aRHy((Ml8kD9blcIe!SnRgfzqTsM-d{{mfm5#*8tf!Rr9Tc6CJdA&^&kM*teHn+PQfD?Qh&566cEB&_4%)~PElYMM zB)~T}4}JWMjy(1GxTjY!*`^A}RUKpK$&3>Q1#zAB*)WDf=M`pHu~kb|kobJGr=KHq0Bb**7GCoo`zYXKk|#DA&$Ru5!DW+j^SU9$iP|w|H#M2~Ep&XIbAmFR-$8}YJVWZ+?ZFxjQ}>DecQ(lP z#n}N8UCXNMReu2UeL0=mvPbSIkZ~)Xr5TEl!LqTG6TV) zRut!MO*ja36Z$Dr5DD_S{)+G{)maLeGl`8Y zqH!sw3czuW)8g-4^|=|gsIzo`7f4CgZ~(c_^~>Ve(5)D~WLq^kP5t1>xlfR2=Iz_J z6}xlf5~Q;dx!~MFacMl5Z#b?7)zg*7=s0A`YaxVgpnR^cxTVL5zruVONE}BENm^)L zR@Uc7mZ9<(t|m|F`it{W35u=Y3x`@iO278M(@>;20uUUwoH>w2rFJ zaP7+r5@;E^?QK9rQs%J7i^$ep`CD6bK>M9L1=$N!&Gy(!MTgJ4;M{OqTOp~!mrN$8 zWa3u%<2*2)x%onn+)uFA?Id5vX*Zpq-CL$Ach#TmVvBwgcp@u1pn`^|_cljlOK?|z zVWp;kyL1~FEHn4zWS`1(Ud8$h0cwEvrDsr^pa#}$!X z{}tJ)Ov_vyn;^w&_w%mE^_yeZOI#;F-sa8K7sz-h0G9laG@q7mOtkYMO+6Zb+}KRy zsglRorSW-`1#b{%S`NR7S+RDWhWxKdY9Xpt^dV_ue(@opd%?Oml<$qBrmftZ*b8LA zcIN&zmYP8kF}TER#LdkZ_mfI&=`+}B%lk!h0zX%H?1urx&?WQD{t2K z&T5!wG*;B9+-Z$dKsHk-8ki=T@6Es}J;)btLd>jphl5R8e*?WiKS!h)Jm6*s6W}k^KaFWJv0g!C7f#pvt2J z-&dV7{9Vh)5VQHjHZ&b*BDxf%!KJRnDGb4oZO?@d{KDn~&Rcqe*iBmyy>$#(3mF06 zM74=t?`ThUj}gPizy4KyS`mv4?B!vYw79mrt`dT|5__KC5{9wPW zL#K=cNqhZ1`X1l*C$+$$qM{eHg8H)qK=MgDM<`JFF##|5CNg_!@SWGuyJ(1Jac>l>WR|kb?ptJAyfj{_yAADhTlwjf6Nm%+ne$<073r5gSr>j=cd%luj#tv zx4cxa&jMckdyp$mwRW&V~o6eyu zS6pJ6;`)1$g!Y*0AM745SRuQ)xNsNsZX+e5LMK?Qe&YlZG2EOjK;ieRUiz9e$(?e5Bv4276?J=^y+B~hI22ui@YH3Wt%TUUsOZnx*&m3c=& zdR4GGihXOW=5aeE28}pAbnE3lG8Gqqx^_I`115&x0ilRS+4q^dwi;!9VF(L%bIMWLex)5-$To;$Zci;tX-Ou*f_Sx?S^x9KNF_pz2N=_mP&WV(3Xgah=}sz zfplJLs z7VJc&rY$GD`fw5zyqYrCe>fzL%Z7~3pRd{+f!%;fQDYkT!lMY}naGfsCHpw3%@=RBrm0lB45Tg1?Gh-Fxas8_}_ALSO@~;+}3fy~U@Lbl&)qsi- zUjF1X&0a(xpS(e=Up;_c=VsTFZ+53B%hFp_1{x9c@Y1Nef8QI$fPIpN>E0Q%d zhxxXq(_dc9m0=_HSp<$_oXDH;FW6Zy0Hp3N{IFK+0Bhw-g0j^0v(`%9r?P{r{`AeS zvJ5($C7sk7bhTLBvmsiSll&cPeRU>%VDkrVu=~S@K7ch|8GAfGT&h4>E}AA^JON)_ zRi!ju?K|7Tpxxo%-%?yY{pMgOxg>jMUnw|TEOl-Z&aJ*mo?|)?MYSQS>D|LX)|U-h z)N!PK*6$;z42r2gR2R9jZ0N9>B56eGc5wwDBWr9V;707<|E0Go;1px^#{B z&|&QM4~Hzrk+yes#`>~#>U_O@d@5fHJp>9-42B=RNR|}Yx8SokNaow&&)V4!JP1~A zyfp}My?s||C*GM4*}ap-U?3q+@o!wTvaw;`Oz%KN9KBj0k}k|SVMQz?9Ri-5I`P*{?5AiK~z3&N*I z!k1m$^HTA(;UmQT2(YvYM0Mz(VBwdLA;b^$9gAEwhJW_FMp~Ih07{^&uOpC8PSY8|-V#9JeyUL|R&Gybjkn zT!}F!0U4whZ_iet63={PZyARlVQF*zhUt7I*(=C^f6Q0kL z(BJK+&F>P&g>Lnc*`3X62YF!k6tOo7TMRN}^Mdzsn{|{?N(uVRwVm&ce8?UWr6!^x8n5Bx z-)LHQEzcujOb=wWYr(36&%Y-auCCXbV!`y;W=SvNz8UNiqd{6K_0^Z`mQ!5W_jpR6 zzIQyq8-}s>4Z$gK(L(#6w6Lm>vF=YYJ+$b26&3L^0~FYN6*NDXjfE#|UP(oh5Yg6n zO~N!g3*U)z&U##Z04aM&(f1?ngIAjAZ56jR#VK%L=__MeohwNnn~5FT?RIig^na%B zgsJFa&r#+beixd*erIC6&Qo8nFiFtwuNNsXqN8Kjh|Sl4;hV^ceA}*&_ZX5QpEH&$ zQ1fumKA|EQ50~^QS3mmxh5F&wB2fwdN6juvvNj{o_6G4ZC>FES64_Oe)f}>F=7(mM z_Mz_<3v{#t6*Z*}&2*8FA8+35UFps!{hDNAz+iaj+7no)nL$h(_7$$8GJMi?-z$bI zcT@aee~STx`!4wkE7RM>3O*VOD=!3%~u%YER}M zby{2F0gV4%F>b@wp-W{YHv_}+D7SMEK0a=jzHkZzCEqk2wobM*aL)(jQt+QAoQ&I z8Yv?+@m+r&H>IkBuxSwUrTnZ1O5zNba%bZ-V9pW!FL=6`{6u=}`S{uVOq?lgX&)rL zL;FQ)!zRnct|imMDnq4BQqSnC5T-7o3=yPaS}bgQ<^{hT7*$C|l=~!&ztVkgeZEJO z1jKk)BGtqaz~A1HBukJbzLy;Q+}aYBYl^8bp^~c&^=VRWYBieG(t>-y8^zkUlu2Yd zXAD!T`P)0D`{)f!)S@-^t#b%(sMdVm3K|{qES9G$Ej)n{#0}Z&Zko$3D+`QP>C0t@ zRQZ^N(BV&feC8U`A+x*~su-}at3OUCTWlq=7jiYzW~wYDFFZl=GmE_4Cd12p8HLZ+ zrbqica@Mp>>?6FLcdsi|GF80h*ZkuesUpdb%LkPljvwI_X9w;92^F&MV%4c0a>~KY zs*Pa}neIP%Ma8Ul@M3~GbhsHa?||N7uSQJ22je(;GGi?)A9&Vp7r4=5qMp@PDb=hQ zQs*k$t2?an8am;FX=DS{te*-YRRtQ>*#0OaZpQBqwy2tS5vpX_Sv9#kpAO=9=$_Ya6pF`o)kLC2 z#GZg4+JPljAEB!W%ZAm*lCYc*4c=;PN~~t)m7<}%CW+U^wPGT={ig&i0#P199uoGS z`J6bZEEo)`*%~GH;u3Rxu=1m;U-^2AIVsJj8-*4t2k-Q@edo{6Y*r3ZoF0s4lE>sX zX3!eq-k;yep35_y@#+ibYF6w)%fOWnD>gisx5|JKz2y>#T=7K;!Dzco2@#uhTb{zx zk~MwnQWUb~kk#OX%d5$XIO)2bPm2|!E)6WjwInvqfw#Poaf0;5qKnAc%{bU0kMxudgdd_9%B1ozM;{);Ai>`uqKWV|dZxhP z)E?kL)H)6?8ra8lb!^owI3o`)IPNINX@r3TJg?8aX`_u{y1ST~aj`#?jhtigBiOuF@+PbCO`!=Js4I^%M~m?zp9_M409FD_ zd2@<##U#a$_V~BU6<0@qGHm4AAXRtDs6uW^6%)E|!F^x9d(${CCxLi_=jIwpUrzyb z!LFkU$UHYPc(|VkLVOQ#w|$?y!rV@WU4$y_Tf(Y)wwXiy24A$1k@IhCPdmolpk%z9 zcIOqJjrn|e-Y1onbvmIWm~yh#Lx`uaDlI5BUkB8C**~Hx&Q|;em93OFFk&?_?N}Re zpAL|~elx_MP^@TB#dKbXWue>k@8be-Ev5DyLG7^kh7hVa*(zh8GMk(7P%U1ljXjm= zpg6TF!So}y^FSjxv+8Y@j#>9FB_zoml*#00ZTRo5#^khg@7>6QVbXFpXzWZ8dPb_D;P*O=NwMY+D8xg56pdb7VN`P zDmsgc->6hvu;ZiW_3oe{Zq`dpvjNJjo8W+?b7L#zz&}XiLl2r@oNcz!v?C#2H#=K%gm%={q0~QKNyF<3f6F%L1#+ zN5W8YcL~{-wZW0*nUs$zhHT+y zcR|Ep#>`h}k`BOS>c-rqKwqWQ*Ze+n&qsxDMkI5QW)nzcw2l`MN+&yW6WR|Ps_XZ4S#qF&EXS28xm#5^Qc9xoKY?(B4dn7BNzb=dSpD zRN+2sZPwYkvob12`^kq0JQ6^`fp9kZ^No+VJz{==cZ?7#A>~@M+KAkx!TBGa6tT;W z>vo9&MOgepuH6hri5UdeUKf6BJPGlq^s>kKH&i_B4KAf&RKABSn>g2HdmP^il`20H zT7Gr*dVXbo9>bjv5iq=7pk-mcC6@ViQ<^?Xg;3hmxt&aD9puqYmZ();#7@5ea!#y* zapnwQ!BRP*h{(7vD+Tw-Q!jJG0^4We<++ zs^T7EfGQ#%qydtpZop3Z9#Hsnn)w#rP*%H%1r(u_e^_?ieqRo%L1>5}0rLODenJK8 zr@e0OyE*jExs5=Bl2!(7c|dSLw*BIKUfYA#^u@1GKzHYp@l^w2O*w7&2BxvW?_joP+9ki)hYIW# z8lRX1o1&9rFRMgfT0-6(NH!F2+b@T4#;|U1WMQ!(WR3^uc!cAl)W{2os-R|1H^AAu zMMHA~E3So*i0Cq9Qj!11wUBI%r3|k#hgiq-C^kV02UOS9<|Sc%qGHL|s4k2lFyo;DV2GG{VjWpz;f z2h!BAw&@2_GX9^Fz zwku&6bjmBn4->qm)@u4Ytv_hhK%W)IBO;Es?3XZcnbRIc-TW>x6{dbSG|3}AbTb03 zyx9Zk+PZ<#C+n=tdkDI43N8+)W+J_5m|-{3NA^bvW86L-og+r+Dz?-~n_?!`26+R} zW%de8*J`*kOS5NW8YYP|lN?dxkhk5RDBWA7E?HG?;j z*FV8U-IEj<@B1L1?OOQp%dOW~_2|~EnR<#+K#b7H&nk2zo-)caYhX|^fXNBu4 zAjLc^>t1Mcibs8a&*onsc`=fw;_DZLdAhBQ@f#*cYOGaGaw#)%vC*onR`P5-AqFa? zaMn?h&J;m$7&rp0J?WD@d^Zcy`GF$|@2M4&NBH#(qUuTisCA$Ud2^j^@OofF$xUWl zC$`?TI4xJkF||hogNd5GJIe>@y4@raZ1KC!aIkY*dWn?B?VnrMDS$L5i5MLkQiy&8 zPpxM*^Gl^WL)WJLo?odiw3x~=J43=Z(R((#Ss-GP8a*jaol-TU>>)x9z132$@ZOms zGr>mdDIOt6yDRr_ftR<)N5h@^8}wvbuz+RPk(H@0Og===HCqW(YXJf6Bq6=jl%p=h zo&m97mT~}X2z8Cz6&pR|%T2*cfn@b8u95wIae=!xNJ`QHzUNEA-phXqWxVNWu=#S? ze%0y?^)Yy4z3z`MjSBN#p zYo!przl?gc&?YECD51nEt?VoMs-)ljq|gTF^4)S&SKhlNtR5;=7=nM%i&G&+xur^=sOY{Je`$$P-jmga ziW9l&-jJE?5VK)hmX|5kgi{laEs+Bs7Nj{+;FPoZ#;&H&cu=s~+iu@L#}`DMj z_PfSu`ZTg6jUiS{rq_9TFVU6t^A@@AFPnqGCNK_ zSo{{z4dhFWmT(w0oqCuJL}HX85(`-Yy>#9U{WSrFZ)b!4=_xwd~ zTJR`?auMj6wzZ=V7EtZ;97C~4GOqSw-{*;U4buA42;e83+kpY-^_&0QkUKGbQiu9PvyyyfaoS%%4d!-oShB?wd;`or; zbgVz*anGZDUOUarRqM&XOdU)*?gPGM^VC0cqA~JO{(dE{WEze*QMI?yn?8(6%hpu> zTmkLMO!Q;mJF77DoM;A!MPFXAtdP4O0aU`dE6j{-F%vmUmXKsjMos%`Rd8#J>J z2MI`Z#TZG`UqJq@==9?$IbV`O?vYwoJP#}#x7@_l68_YXzQ@1^1}q$vFuvYPMbzmy zq^?cihTg)bDbQHI*Me>150+n_QHay|Vo4+2QQV+XlxJ~>Gssfy-5R?nhd*9byxTq9 z)r(6_q->=cCJRJ}9s4xO$wk%UlB!pabPD z{0r9DYgiS;q|4(~V7>)-k-56 z7!9c=WMnDaJJ796FgsEOPk@&91XAH^nqREJwKC^ubvL*aF{)AQW`5?>q+*!P&X?4r zTI$QKxv68y9rq$ijA4B1iyisw7#7GH!tT(dMtstF*(>fzJ}zUIR{0qfi7xjFXDaoM zT_oq2_xkI)PEbxJf;7y!p?J1rY8{lf`Li-0R)Cmv1#)ak1mM`clzrk}E3tarno=~? z-ob;hdhsCYK6Bq-41S@ah`?Qnn=+1ta279=->DvCO6Rx+9k*?F!vT3_UL08j;42-Y zSX)Fu^}GI%pShf`B}q7pv1*;`K=0~UuM>tyhNhV)qvlak66RZ}=MVdBz`J z@Oi@KuS;wX+XxFM*B#1_Tp|TU^upl;MMf#)>3*U@L(I{XT9T~x-q>ku#G#XK?8qI; z#-{F?qodPlw4@Oow(fpLDIcqHSWZf#GCDC?aSffHNDPx)Vfm!9A^-{~9}aLk@*9N*JY{UK}ialIypk{5ioUq{h-{>42fY z%m*v(@n9iawKw1~wx;mklb?9bYaHzg*ygc}M$HqWTI=yr_(u++q$CirK(N410zSTw zC(c2NlXI{^ZXP-0;^JayW@OMpp((whmrVc-^yVxeODOo3#uQ2q^mVHgNfsW;%Gp$V zCsn1`KB)H2-fij3)+ow@JhWk!-;C~eqa@B3UiW-{jbac+XyUHo0&M27Q9ObU^jAcD zsec4`CKl+-Yb{0;Ll2Nv;4F;hnwus_t<_f_h4ewM3=$|9BEwqDZyyem9yl#lN6xu? zpplPon5}+HD?gBD68V+B+8a%HFpsy|8`mNg?QJkR^~W@PG&)RiVj>#Og)Cv>=u))Q zYT_dYs7wyhv=tJi$_g;rM|i*#yZ^{9&CQ*Mbmj5x(7fw={}5iVrHxumN}eWgx;}(& zn9T}bn>KR$IBN8dIppt7PqyJFHG)bR=H+T{N@+InYJR;8^ydp2;pJR=1SkDoj_W|jPyjv&K7?Kf z@t_8riST$aAG(yr$mFdFB(RKE!I&Z_yjoEnZKiuS>w|^%mGb4&YHwH87MDcoJ>Z)i z4B^UOf~=k%*_`Q_K);Om;)AY8DDb1GC5{ zw;P{)Mdv1@CF;h5D9AT&Mjl?i4l1-@2JItYjk~TWdtVx_!lazTYnb{YF4Z+H3hl*| zKaK;)-dXN}sqnQ=y9M+X-1HsrPmqzgVZvH(#xNG7i&`5a=iq^mS+Vhc!7X z@OXVtpPyUR^$}s-5Fl$~IJU zJ~6-9+p-Or5W^AOYpbd){vR;<2n%!l+f##voUkQkg4(v2@y7WiD6fO&`sk`V8jk3p zdfhuU9xQLVfc;>u%Ca>}!88S9{Bd_wYnHqZ)`Q*0-O|AA3vym>91CleGv5!&kWkd3 z!eai>Yana4;7(op^9O_wscQM4tMB+lm91WBSg9)$LKHU~9`iYHZQxr`)=7gELac^# z`fkMxeTrhynzxW-)1blJFF`4>T9+GZ>Dp`n>`(thGre}C;g*AD{AQl!-gI;5;ARCk zICeV~UMqF1SApa4JiGqO(sPHgi{xfT_CBUobGDF}dB-OO)~ZDs+7G&(j-CACw*Y@! zP=>y0zol@uUzy)32R}RzFlxzf`5OQwTTg^=T%WfD(`HJ345_kw8W}q5Nw7}CA|HW2 zQgmEXrAY)d8vj&)9s4}lW&Ecv9!***nFj-AHQKZBc2f^>Un|A<%j3mBUolf0sbcjQ z0Rf?}x3_a{ZfkXnIk5`N7MCr?{y)aPIxMQLds~qZ2|*e}LIFWKq#07A zrKN`ukPhi)=nzB!X^?IZ>Fx&U?jAaZq4^H@ywCIK`~I%$^N(@Pbq(k2z1G@muXV3` zud|ceU!w>{=j_8pcMK6T!UVSOj5 zivEmr+1PubFM$^TgiB<`KCCtpacl38eW?e%YI^9G3QqCvI2299-3Se#)i`tL8dJpf zfHPLXcKpth9LlH(x7;$`Ejt#cCxr+^q58Re+d00ZO5bgH)gGqY#ddb87inLTc$@Fd zfBh{W)x_^)k;?Cj>Yg+c_)a*piOKeE$*3}gZAmgWoH>ovusxC@)Jl({;KVmqWkT@7 z)jEIA*9#ngce0egxc&1Ye>L2r-eBnw?*5BB_3_hQM&msbw;Qq;`Z~)UT^GzOp=mi3 zxKJdq^3N)HQ_b3gFlYTA`fyA_ZYx|m)qJ0D42ri!7IzI%`rCSF+3r4k$Zj%;#Shx0 zy&Af=)}PouFCb!NwJYT41aQ2LEpzVTpB}5$KRnAdeavlzNF^C92%nu{+0P9;Ilq`& zWEke$*A9aZy2W3-V*&^O7d6tuLat4}I*vg5zw?V-1Z8c$x*`o6Ev)fEs_zr`qDiR! zk(6I=mH(@aN%$h$INrXVQpFUp!wrA4YvqLra`RGOr1cPRZ2zK4k2DCR^%%sI6zHLU z-TFTWqbE+2^qL=QLU0z^<+F@+T?G!7zPsN==pJd?OV+)i%SM^K z%}c~ctv`8SB$psId*L)vA}NrxYy=cTPVA4$!>yGIt@)2gMhQ|#L_Qlaci67j)ni@} z?UoB1c(XvY2c0cLOQWALM%w;T-f!Kn{Xi%regPms2sxQenLL z@|!E_gtH!8qlKD4R6{Dxc`(vRwvLhy{?OB1T z;T)#;9Tk0_$5bOp9L_PTjmtft+C8{ztrt0XdaWe|kaFE; zXOh6cZ|1x82Tnm^ltrzByv9y9QUcXLLc zd>q7(iRF|ZiM<{|z)n>Zm7Wz949y`tyi~m@A_r`PSo9jAPu8FyXm$A3kTt+axkN{H zIJ=5anbxYezx3K|(Q)HchaE2MXq|2m@*A_e>y+F*eXxu(*ErYcp=}v8N<|O1)ktEB zG9`Z@ealXl#oimYakX1K-OQA)Gfq}yY!L3n;j*$vd$4jW0^U&7(hda9S~lDqi@eQA zE7QC%HCyoRwH**ZJ&ErU@;ad(?@R_`GVtn+btbG&OEdY~k^ z9dQF)Cjt-7mMr(9oTZ92Jzei2DCJZeL8wek8H%a(5pHp zy1sxF&uh!5)!z9`OcK@4LsT!#sTTih>4gy@23Z*0h|ZgB`{VXeoeJ+!Ses07y}U9@ zTwdEy=o1O{3QFw|)?$vGs6LG|LGJ}?mKR1{hn_BoL8g)6Y0mIcjDQmlH^I7k9f10A(^0D{(4Z zpfgZNWXcQcgA<9GsTzKFbyg9KKKH#of8+5VcW+3Yh|NYimyj&=5|cl4%Igz^SDk38y7_n=cPX zqJ`+&?%-!|IQ;sOWpI0Yhpy4;e9l9xN$Ne_hf^X{E>!~ z5_rqDe2!if&L<`Xar&j9!e5X9q3j~s8$sy^$N%JJGbH)8HNac=|q zwRAD5sNm1_#`^>*C$^tp3oXua`AV?S01alK z=R;eOCBwGqLV_ann%BY3@f#K=Um?DO(h7F3Fu5`Z=0dr zW=i8s^VokM=ZQOGMLyQ?Hj1VC*Lgg~!lv8fJeBb?R0Qm71)76`D2$6iclA%vN*}l6 z0{j@|p@ubtVoC#F;`?gH-8Rw*yhu0P;CFiSG6i;wV-<7tE+FIpAJ4}j5@&qzKr0i+ z6mj8UR;o+TR@kF-$()!+2T#XhqwYPoF@$KUf?Pgs=^YCjKIU~*!no+gxyNsl9q_Tx zp^5CAGv_Y&RBF7wxOh=!$)!KZIdU_%e0YTY zOk2A((V`4SPyhhnZ3ma2z-UJtYw@k%L#uC%z-A-IOXlz~-`e5YQ$upCi=P1Klyrch zQ+d=v{mCrc61!XObF9pW#+FeF$u{c-EdMq0+l{52eM%I)l}B%Hxb$}2G`le2rRzHH z2TV#W9wyf)6#nJpQ1ve-yVpVPmm&<}M|A{a@2zgg#=kp&A?~)s~cE(Vz$tEd6DmLw}pl`e>s$UCsEgyFT>U>iW zOh-wC)3$!8v6t+}CAx=7NxS;_MW*`f`jCa2m|iZCdZ}qT(D+4Y@GL3b_1>sMNWyb?tjL?rs)G*FInr%eb$%8fsvgr@{dHCh3Fc znv#W}aE*czPPmVl^x{Y^gw|_I>)C^RQhylciP?gchp%bYWXz0lP(<`__ui@>La4o>FGchT<=!ki6V9p~a z$26R;q*{w{CAQmWB7w?n&I@(^;#{qC^=%92y0cwf)=r}56zGqG?pEa zV0_^-yePl{chS~wBl`y>g$tW=XWzsGAF3+NESQcHI{-I%LQn#yA;jDP`M_H>QpJML z`QMC%ox!n6wELERyp32sluzQF##F_hzqMnMfEdJse9O~~Lw7QklH`;#-b%9=X}*rN zfEow?P_m4VPL9luu+b4m%H3KRY(+r|zpTDCy&eY|I`OtoBE)D)WyDOMSWP%EmfHGAv-hWw z)owd=euSPRqjkg}Tl%-x-klya4+Gae#-rM5J8f;6+=Zo`LXAuuNVyhqe*GqJVL24|PNhN;(BdV{?2|B;#D^x! zIAkYZ(?f0~bXf-Fl*VCky6CWGK`Y4sgyb^{wC0NV9Q{4NaMbwQ?A!kPaeUR?@aUtK z^~<@ycM9j$@SR~N#o=yN;3SMAc_(c&inRI}1OM3fCYED9oxGonqrX^X@h|3CK8US} z=Av71MK|$Fac-h{zd%->DQV9epaU8wI*_Wi)L!_iQ{yfZuN-^^Smuz@-9U~Gjk5PC zqfy$mc9D`;9p!FlKvzTh#r!=jy#_JNj!rMGQ+uO9>VBKRw7Qw|>=xgPj?(=Vn>!Mw zINckIs*GCl^>4b%yH;#yuFYXOFe&F$0-eTX?#~GkU_xHA$A*Pp)1l9zJ3npO(rkB< zN7IJidkr|3*6+RDpXZz%3`069$}d0v-Hc1=KkTuxuKN12wwY#Bzb|?(-y=gbBSZ@_ z8JAveJdN&*fOs63H?d(G0fh8Oft@D+bw8XP>&5xX6=E}RmWn)@l@~Vl{>SFQ%bZ}2 zot3o(#OBFuQ}QG&o3K#TmXc+HVo?c+V#Lr8l$f+|R6kTbwzF!zr|WVp0?fD6x7V8S zih%)6pkCF${(jJt=jFljfg!_*I;@R9^TZS*5eK$YrE12V3#TH$;U$v-P!G$tZqFx4 z3)xL_87~^i;b-BO3^F;qt5oucxREL34q8!?pdLwP&G0WzEHGB19VZ?5SF ze$<4F;@XW1_xxE5w!n4_l`*GYwJ5`JfViKDC%p$;bPW7celgv3ts2pHg&uH4xz6Wl z4Iyf(IafFa$5Y9^j))TW72<+SNE=8WEu}R%4oeGw+X|S>z9VsuYilD*f5XC2c_(xkJmS@j!{hH2)WUus3kUoR~&-ynPO`?57UFAfBsNmgSiG^m1rqAyKf|d z3CBMK$p8@J8nD+oJL90+WXIs4sotEX8R@CQhpFdGe1KaX#Ir*vXkSGfgTpshEN@!2 zJtIqrYTX=$^8eU4iIV(~iK_@SDx(!pM0P!jAtt{PG&^EgN;g-mM3GmOHzX+)Rd5E9 zM34Su!=tb%NQ#;7cgI@Y7t$9S8XebB68k_OHRWwiy2fOYnsMABWHK|;JWMPS+iuaw zcypY6Ri9X`r$#Dmf$t^^SS6`ntZZc$BD?vHk9&+f3d3lfMYqx4<-vf0Cw<6)= zyMS|uAy1gHeUqe_pSjUQ%|$9!IjnqFFD}1QCMnawzuxKaC|`C*zpFRF;czy7c-fjT z*m6l3_k02{09_rcVTGGWk@cNHaJJHX2%Ki2%y z;th!WzWC=`8<~0wSsc)#ocRU3lLAuzj9KI_B%ixQqp%^?ueiq*K32C&nV=$X3@i26 z=V)iP-D8tmzEiJhxIG3Mqb$~-{WQ)ec;VNN1ShWIY=tMh>G6pQIQqj9nt)D<+7j{HVXrQ2LwM#Kx z>K5%BS1${hG_k+&qrbBIT$#j`)g7CtnZA5V?#NaR^8$C(6>Vl7NRY67O^) zSbik4LKmrsdPLSLAi}zEHA0|27zt3TKS`n0onP@ni>+1;@JbDm^7>8luXA7K&HZry ztUkGWkly`VwfM0gGKReQ%f*=0o&M+}K2!5hWg>{k89JWz5sl62fl#^j%&-I6>8JBo zo#~@HZF9;b$Cv~qDa(HSQ{`r=CBiGxvJ;sm{g}mMA=Na5@i12jx%yb^9$+u5N4?@{ zby0OEBV_^mIHFQ2xzZdzI`949YH%vR2DLomFn6mXbaRXP#iJPo`b#y&UPkSx$H9;q zCGQz+v@LseA^_dDceENR#%lcmMp(Wz*^dHFi#D;~7U2YxM`#kI;2N$`vZ|zB$AMb^ zqa3m&vBImLwJ!lE*yVNM8H@H2^x=%DzG=<+?32;3TUcN7DbUi(G+w4UNKFTe%gRta zx_Y?@c!(77T7wuN9CIEZA!#hR|Emoi=cj!=FIk7=pb!F-J=Xbb0OTLvz+{ivU%aD@ z%on*IahG?-i+YNf2~#sD^H-sSD!xmeu63k3>pZx;OS%8&DxRhm1 zYv?94-w;zqq5HbEC+LL;-ENC8AzZd)B-uE1DRCcyy=L|wF2FaTOrGKgAp|swRk{Nq z1e{w+G%o}97ezHW>0|d7{Su%p)BrLqn@1O%aSP6d0tKa?S#0t0cqMh@R`WeXwb^np zz_ol;#u#3IfkVf)`yJ=V2J zQkTTw$5-{oJBWSlHrLlDF7*1OQlnc^-)T%ox0Z)blQ0wBtUXQ_OP85|il%%NRq8_Z zPcuN3f0S__(nU-pPYD7g{*hE*6lNTZ(IYJnbig55~E z9NP8Rt6N-nCN43hVG+%KMv=*DrTHi|n_!5|K*e=9SnZUVpBUPDgdT%O2Gdg!kPf3B zhv}U;2R^)uh$TqQg}_eey=1)g+~U%^!Iw>6it@QvrLw9Js(=sV4V=9 zDu@swz#a)}l3~i!o-k)qXg>PwN}*_z$|r@D9RZlXrx0*cbjerGg?~-omojcOWJ5(O z=Bd@`>nxWcHr%(ajRh`Hsv_5~?(8rT=XeNP3#as@hd)}C78;&=HZ+tk*VWL=?8nnjb zG9jOJ{$bI1x#(u-i=SFD*`%RV306c$%up%_wLn65hV~$8w9vy;ZC2yOp~@}rm=sU* zJToAVJ9ooSe}ebkHVqON{E~8h4A4(}Cg%lbc3r0g2#fVHg(-^X6Utd z|D>w%P3{MChAG@sptm(*8n;o~<5v2hfn3Xa=T5Gf9Vb!vhgxwNjTz#fk5hQYZv~E@ z6B5f(pkcSt2EFETk`9zWmE`-Byd)vcVRVVYV|aN!{X$XH@LfmKi-)tJ4C)OS~DbA z6o=fGw7ol_GqqcMmB6j2veJcbcbwG0H%V5n;s23rIEF1V{XVT}#_Pzc0`DuOOYKn2 z6Fv+-@UzaJEZ$o13x~m{S3 zphBMesjWt+-^ZY?M5fnI+0buSNfMH#IrGJj6`I;%Q_rBt=NWiXDp^2pQ|GhO%r8_@ z_%QdpusTiB?eQc&Cxy^l_JJ=>ibq|xh3kzpPu^To!xEAvc<=9c90{y#p5c+KuPi{= zi+0J~zFTEj0rnIx@}x>)^nS1H4l2;;GUAg5A|P}pT^HIMzq z`WCoI-}NNhpM7Blp3nVsBz+1ra(dNQFe+E%k-}I0b$vWcxn6Vg#Z}WnDVouyN`WoZ z@Vf9kU%g7q#o-#d%(d{^+pnHgn5-vpo!;#-7Dmhie%m$nxlLxUWni?Ql5+U?<4fqi z@Wah8?9tj&Q{!zWQ^i<~V~g0xDHR@_!XLys5FNri2nuG7?Byi%THRx%H+pP@M`Ck1 zB`5bsoAu7+S}fukF|usQ{Ixyl}y?7?Pu_7j)z+yqtbeKy`lHz zT2IXPG@p07hJ}iKSVNvhKXBs}Ebe$ud{y@FVIOf&GIfhKw)!T=&mp_$B&>1FQj@4gX zGOctaNZECd*%G5m5ipK?(II@-ukb85EPs%=Gq7r|ybV};3L}#u~PmnPD zsxUT9#;Zk2?MKw@YB%rZe`loAd2b}jCCN5k|0Xfm>BKv+4{6%@oko`KX<0Noam|N? zt&`#<6UFk-vxggn_~oTm2fPQR>t}(%rxv^vTw=Xl!_QK9ObLKi(VsS>D77Ejv zIVlz;v1rHJpZaQRl;9g29UbjRH`}j=Dh_OM_X63)ZdeLvRLuY6x2xGx*!QJ!hFCIQ z<<`TX$M%=L`EZxnYN+NwBSE{sc5Q)3Iykl#xT{sR#-`;~O`1HYxp;L8en51O ze@vX|jcSG_@QiX(_f4`49A+4m*_;)KG{?2?qo8L3jpPB@gY&O(!$aJgc zA1xS>ag{4nIifT-CscEX`rDSjj;(erD0pFY>~w#P7=6Olz@aUc+bLu5Vf$vk!=+lS zHZowHCG!G0}If<9aNnoaLHY61R8}eTnYpdx-!3WPD7qZl>-X3XJ5!yY1t} zE)oVZuf0n<T7Tw(L2^>17;SHB(%;8Vz>fStN9V0R zby!_mc&rgqa8s$wrQVaD8cEDRMB0-_;g_L02bKw+@>M?ZZp|;L{)wtpS% zV%^AvJ7U=P^#W-oUel4{5wsOejg{sVwDrHQB3_JiXXuO>U$teA&e?9}ilx^(>(aIa z06HSadUY30g03Q>`w>R#ac<`hX>?yd$RCL_8Tasm&TzikY?cFA&FXU!R@v|7_{orP z>68}?y@I?#mwwOObG{n<1?>#bkvJxqb>b#i=Vb0`$R6gJ7JZm~$hy+SRW9&74&it+ zUv+Mf*3_YiZPe{(d#nvsJc_hDBgykAO=3x4aJiC^Mbx5HCL{-@kuJ3NuxwJSL#!Wr& zc=>JT;-t*|&TlcY5Z&?QF^-LrhY3l&u{=_20R3#wwN>ptry4z#diwinDvyT$TCIsbzYq_J*q>XIys7d{-;MUqalJ=jNOt~caHT#64(}VuVxk;i!t9A<>x78m*;35(n{n^K<17x+iW)meFYPkz< zx~q`JQu_sR{33}r(sh~C`}d4f7a)G7)4WRw$jx3AmuDJr)v)99r^+iCbj}VZ)#`^c z>b0EuuCHxybA%w51d(rAiFU00#o-D=k`CPER*Nf4UQ44oS@@QTJ6qGJqruLoQE5&~ zZq~zs?pqeYwiu6L1?P zG%oAsPrK`3BS;)NYi)(S5kLu-PRn6J_)s0|~M^hba^he-OJc{4cL&qJ? zw50Eby*U#@0wa#O*;wUFvM}ONTYFm9$}_2Veen{(sRX>`YCJ{Po;f2~8D7mOT;Bo)qJF`=g^49ac`& zK^1I}Y|Ui%Q$mjR_rS4e;R^y(v++`;*8Dd4C6Y;`UO#74;&khK@=R^%E$g0H1gS&% znpVTX*0d}<Mp+_ z;Q`Y*FGQj|MwmypLI%3F(MHk~P?&tALb9>mRyp}Wqs&v((}^<|XWqM@!6IHVG1bI5 z?*m&&;3sDm8UpPApWm*Y5yB4MZK1bbRY{uXQ;W2I<8;xT_-mJVc9$4k$eZ1cu$&Rm zrM3}lA|MmkonOWJ?s6vN(_nzkbbb=Qw6 zj$iI$RO@{K8Bxqw57l-N5#fCg&x#Ho@G}QPsmzA!z6Ln&ixD@M-T&<>;LV8CU^dQ8 z#99Ajc7rc2W!Mg{sf?Oh_$Ga@0uT@g?~ZQVNIqI4J7H6lo+`CXZh@wn*`3b?Go{jp zoYL|e!ja1CazrbgYJ%L9wkXV${(cJcj)U^8WV~5!;`Zy&b1$>aOJ#G^=v*J>rfLhv z%*=<$)FRFe_diu&a__U)8+-NJ6@H(yTKzdmZ;3q{`|~+<8M^5w7p=^cw^CsNq!J<_ zW{hCHDa?%F_j7sZi#!07D2^sYac!ZTPS$H$&X&uHeX+Q-~i)v!D3wBH># z#`_JQ<0|*KAyVF^RrjmzVE83}j^#+!An<nNm33n!6N|G4n|c-E2EA92S;BTbws6!3} zr@d2Lz)NmkDjZ5WIC}`?-H5Nl6EIcR#1DYu7y|D(=mLOHMz+P7U8rqb8a=ietb11@ zlC#|oI_nz#UiZ<%|0wfTqu@M2pqtO?HM7{h$736dm=A8VgKce+;9G@9-wNjseJ%!# zDOZ0`H08C#3spDR)eQJqV(u4%Prd)UVA{~~uHFTf0{%u_#ia0TMV*L@=`Q8XGvo1+ z8|M!tt!|bTvIjwwhTXdeXxY6uT+;L6Dvs@4pVGtmF!hR4oSCt#atYl>Xul13p2%zN z<5i9mNLqQcM{T4`oo1z5zJaY6=Q;4RaQPd{<~zJ~Wb?aCfd7zINT{;;LWi4Opq?K4 z6&Jtw_hBXy1Xw4!an4n~Qvg;he`TT%2-Ea($MOf&jteQb{ZA)%GA1@8n|Ix*uH0xj z3IlPV!&eh)|=!T28=I;;@6MG0jhBzF=6uiE*A_}dIgB~G^kw{uW7fj2skP-bO zgYL!*AJNDEUP+#wg^2c~XA@2@MHcv|@?JI1bVX758;zD*jM8OwDk+P7lcbgF zL+HYenPS^F`XLxbL#kXef*2HhEyj!4l4A~&?Sr&C`M&Zze9B&sLd?OjhJxYuW`l&a z;MUv-74A?xXa+pRfXyJ^)|MTP5?9Vw;xCq;SM|C7UUk3LA2|#G?WKDj!}%yrxpY-= zhfFr;r?114k|%HZjV_;&N=_F-M=juh1gf!+6p<;dPR8BM$MI4xxY0H~#_cHgbz}+p zEsY-{a)D}eFjaX#l-fE!52b2^QnK3j@ER9P!fRzqcb)#4DU^HOwJ8A}L*xU08{)k}A_jkYU_yE<02 zYRJbb;O=iOr2`9rM)q=Hn)SU$lQ!V60LZ#A%dT*OC^d45AV(q{^Hr-D^9I3RF@i3? zOR@DC%ED`L&%|%q>HKXGv6WLHIr#IPi3bndp3V7wj;-Qwt1jp$NrB6CU zwR{u!>2a#mRCU$nm8m88unQLkYV&8%M85E9XqPwt!n^+-6?rqNYrTtaNnFYRMtP-8 z?UK#>@jcTywQ;#}6^nlJPp(Rzj<>;4X0h2j49NTQYj+O1PORvDo=Vm)#?#$k2@r~);cCd9(5m6sIVdQMW=622f`gNqR6=WG5RC9XRc z0bKgE;~9(fqH#r`!ByW1E}ns}7kONTch$6_B=u<+a7daS=E zdtdWa!<9>HZ5)N=$IZM~BH{p~j%S|bYr6IvZdol>f?wA*41hk~I5&!YQE$8Mw2yQVnFi2@vdz4ep0UXE z59=b4MZgkE7x#Pmk5YjMFp*+W$$#&mx&*fQW28rDd6WC4K)y4o ze3hZmRVsZgsLB*7)zQ1#dv!!}PWXL1>N`qhhVX6B-$OkH9YO%j{^D6+&_jKz{BZC*JPSKGjOf6-=Takv3x}27{ zfHAHF{xwFMxBRgGZ~^`nDfZWT>mW)tBUK<=t6~Lx{==R4h=UL7N=HkGGIC6ct4x$D zMXIP*5KOAZ`E0Q;{2CNme;Mcyn3-SX#y)ikUjNz!Mp8$xR{9b-w+`8EbGU!}fuJMO z6_-;_<-0*4Q_kv3Grz^t?sVEajL8{+-L{rMdiS>L*P=5fM$a$`I00hdEd;>BGn_|v zPvzKQo=;2PqL#PO;qAwKV#B0XH~X9kJA1ZOHSvszSCDXN(%j#lA5q| z6ci%x8Pu11hKqDX#KgpWbyYG18>GDyJ83v5tD>u3?&Jaqcr4K~`P;1FAJ^(5J^!=# zUY~^@+vxR{5gTrY*0OhH)HR>ZNbs2IuoLfWr6=+C4}>g0F4i#E@GrsW1~4%)%8CUi z)`nME%S}&9M5mbi4cHruyWka`#8F==UZMg|T<5;^RJTHVj=nztzJHHZ(eV{{xoItbwFyG^ zsqNg{+0Y9y=pRkodmI^=LW&}LqmZ%|w?bLFY7*>+ zLjQ>S_S;vAy^+Q+#R(GAm{r^y4*NXXa=3_qZL zYkWka&#t@b!sB-KLnZ%hx^pNLX&B5EYo#>hF zFA%GrcdG+9Tqk#d%EqL6rFr@xOmdTQd!7J!bKVP*1H421tbSC->2L+ZbO#|J1i5(x zDODe&a%q#FzZS7vy+u;{6I`@eqFCL>8)mw6_7-EEqhX?v`Lwok@=>*N+)K0X zeU-I2^3{H4wP)Tot6h=bAr$+@-*(P7FV9TZc%7|6% zP^z-?HjNR(CH4v3FV@IE>hw_qpIgS2HdF<~?zK~p%1M4O?2n)gF6h=$nz7hk%SLYA z*KZ%mupohv4eBshn5Gn4hK_WM&5mxhmK02a-(JCNFxJ`L;26~wPRm!q#^j{CD{@f5R8F9!0)V-PDND0U!p^@e( zek)VjgT{NpkVfr=?_%BT)>|mVmjl(3l9E+skcC;^9lO4`bFyk39j24X%F6*@1f_bN zHla?od2MoJnLa0orIrIiifc0RdW*$q#OU@V=0UK_1J0;1MK~xQXfnQDC%)@IjLrh0 zA4&D_7(UbH;J>_3^ujYI1U56QS7sRio@9$eUErt%v}jcx(X;Epi0Jq&ioA>1#K`P6 zQhjE9jZBp!F$%CQ`uZ%dUkHyf`fZ&Z%3S{p;m{vR+4-y+#=wum7+?N#!k2mBGiu*L zrfi}@b64omz}ja(Y>qpDYf8s{HqHQ9Abx4pomCa*icJV8u~h5*-<3q0mA=TtJuPlO zz}hA_|EgQsx{p`8Fj}ymq&Hga-h<}=sO%5NX1#a)iM>LVwF9KkO9duV0%6(9&1?ep zlWmV&!16G7h(PU*FDj5@cY@%=}x5?di#hbo_X>03yEh1K9D8+jmDB z_0J=%E&@uwvW<#0NOzfC+Aa>0CBA1KFMofkJM*gcV6~eT_5Ikq95MGoz7#R~T;Y{= zGjiR|H01_Roj3hIs&|s9;x)NpZE`A&O-M|yug0_Wm9kz_^y4C8_?c#!41c@g`C8HM ztnXv2wRmrBNh=X|Dvef=DqYEmlqZVvZNj@;J#0vZK*D{8V!K@P_fE*&G#oQvz@D4t1F)Bu=Ho> z2{#7v(?LP3`{eUco|h6|h;rN4JEI^o54@cIkP{!{e56Lhc#pq3D6I@;F%<5gtvoa; z!?7C5Lfa4Kaytv*Dc?-%&qvETMOz+gCavB$iW!oHG+Ic96f?R`!ZZ5~XP&J7s%+u) z{HwB+6AGJO`~RtI)nu~vVFDKQ;^`K6{oTxiPWQ4Vel6bI=2{0@L-~-i54)+prgekE z@XYt8i|?k<9B=?1Mzx0{Lv7UsaFz909) z#8MK(c^!Phc^DU!302^N$2E;TO+&`F z!p%BotSGTWYN2~Xht76YTK#eDcxCUP*CmQlue0NJ!@@nKJW&?8m9K!08F~kT==PmG z{PF?nGZdbqb*93RZROI{oLFn)u4#%QlH}#~<8qCZ_~#*&dEiVAV8;-~71=t3ofve&s2qw4+j7d0>2EhH2njZtOo>-+O;XL_xgVk@DnSDLygkbPQ8 zK}4+kkqx^5qC>#;_9mz<+kb~K!X6U*dt@|kd$&+iuvUS13uSDQmw0Q6biYc;6Bur; z9>BhllnyEHzSJzasJSy;N3zU!I@gyFrde{MRFby1rP_)Khy?+5HxD9fz4TTtKB>BST_as^H4N@CDQCd{wRIY1E zju2^d<3En}_8V)#?TlUi=ZvZU6&oR&OC#Ublm3-ud!l7s6#(V6{~;`DX6x3?9L0>7 z98DcwV|`Yv-^Y9hK~oUsfB}TS|28c1pGQVq@DavJ;&qR%-7TlSpNJA)o%hM6?$88<`4fCGl z(`aB~I?^NmYo@n}>%87w#^keV>YI#Q=Z}p1{@Z`KE9&`T!rxO};8w)wM7hW}e|lqG z|Ish5-wD7;{b4*nX_VoIK|%-oyUno{uuu3|WS8`ibHz7Px*}*z9jV{p|B;ge{XdbE zJTl|{5JL)c3jqN^<~Olm9=3dqa#`RFD)*c7q`d?k5rGAs11#`iV1X~hgsc5&3h}q5 zVDqOwo3O|*{Fuqvd|x00(kueWW_h9ohiGNVn%#e=^n3NU_t5|I9ybeUkYNuNHNoS4 z@O4soGk%AK>yHI=$IBF0K`?dU*(>fF{j~op7rjZ+Hy&|Cx+*-a+e$wtfhjjm8(H3M zL;HOV-tl^KA3ba*JtJ~Q;bX^i3SlCP7A{xf2c=+6xiPlU5>lshIRMpnv(V;(PM>01 zw>djefb~QHT`2(VB>!IBv6CW>WPcj$#v{Udvw**l?#xX`OjcF$ojsth8MyX|@$}d< zIYwB>Ve=j$64q=bP`MDf6Yv;FS`8nBXr9zusUHeOcIfo|ce32Ti6G?>pIa+#pX1y~ zU8jbF#{Ht`AIKU-f(MBUG>E$9)!dgr#!ko=PA@Ea$7>iZZ-yw9!FqEzduJ&d6yWd& z{y`ot>je~CHWPd=KbWo@cSV<2@rW-l5Ba-9|SUHP7D0V z`P9>M13^CuT|sIyy6V6;=YM2HC!*j`m~Q!Bki38zcK5%E_7LpkJ8R;ep2$z!uTBBl zP6HzLBZT{DazK^MB!9a2wHEQQ^xU1tzQ6BVo`O6`8HG<$1EkfID#sZ%9(y$dZz=cz z*Px=I+1TL;`}!ziZE5~u&-H;KzSaKCeLf)DOM_0ru!?JmOo zw+K`2Q)%m=WVsqcSB`&r6QFQ=gW-5I<3=(nsfitakxz_A`tp>u+;pFLf8++QR{js# zTU@~KBY#u^fP1|eL=oc2D7-NSin6%@q5IrV(!~}euR%~${ixvEG@_jQ;q#tskJ+Do zPxBcVG;V)WM01+sA?0>|11v`tRwZ)PGQO_3R8Knm>=a@ul0CsS=4AjpL+=oZrMxX& zk?Y=2$17-x*7h)afzUy&?D3X1R!_(u3b?%#L(P8-ay& z>&yH(pj&AW(gLtZ#}6!AK*uWc^8;?g^;^m?V-6)XRwJ2h-J072Bmd3mPpc>2iwn>c#~U+*dKc4r2%)-i2awl$p_qx zeB-njgzb6ipZfpx9KjqQh)RnM;-vKSlt6gluAN-rGgEcAT0re9M*lyBWdk3NZ44@TZ}(F&+Q$a01Th`Pomh=2=_1Kz}tO<<`d*P&sCZ0i_+Z5H;4<3@^%6Plc6-K=;M}^cH{3LJ$xz!r~Gb2sFzDw+eGhZO$p} z-jB0QE3*BUH~clLcp5>7mkqGu_+HjT9%muG5(jLz0fHDJ&UBBb_J*1+^IpNd(Oqt zAMignd;js9VV^(n#=pkzaZ?88EYCx~tApC34o?gc#Z!eGn7fuT;zRz|P(4+FKXBx* zZ3z`be#GVYA*qeQMY;b6aSnYrs0j3AGun0iEi+Cjm2B@z=JrP>H zFRqYq{^!rJm_-4G2t^<{IuIe(2rdRXh6e29XR2e!D;?J`|A7YoWx$>kfUeOc5Ov8t z8&;6&W?41b<(?>3nzy3e&FL*N^YTs}yK|$gYgK%|~|JUA? zM?<~-e@Rlwl8U;Ox?L5;NQA66BxNbvgeGIpOqPrp`<^94l6_AYh8Z73v9obR7sf6O@?&*xb_@8`X~UhlCd0rU3wM>NRaH=G@SG{nV) zZ`@Em!ymGYEjF|_W#$O2oj21WA>Z`_aHQc{Tm25)zP_QyR8bTAN>VSz94jHldQ=^1 z12vA!{T>dP=6QZ~&B(wka>n{i7Ojf1$X6iWw7d|w(|V)7Ut(K=iDQS3bYFT8R0y?m zHOwjyQq8zt0+JCNtvTf5B<%7?o>;HK z0=bp_j&|UDJ5CPl1tFBVAE~5)_A$C1aC3%vnx0NW58*imhSc7%+{4CmHm)YOjagbL znRIAZ-+JZrT3w_kj4{Vt?#u|8LpVN>Ib`GR9*ds<%$k{O1~^3cd2HIt$Ls@7KF z?(9#=jK9?2SIQ}S#{S#9Y}OxWHy zKOVqDBfPvLOK@AajTd_J_l*J4E-%;hAmF2lh))Fr_={995cu=yD%a(qhm;<3>|$4W zEErtuJuCfY+iYdT9yvU~vX$;R)Ml!&=JLgn(?xbv8d1Y~@8QSi!|U!5!?hZP`~vS0 zx9L1ZyE?-dg*e#zNAy^J=qX%H`tGnlDM!4LnhJg)qKD55RgKa8=DwD|M+Zm#jf=`N zdKh*6Xf`qY{0oEio7E6rl~#`;(&V#o)KeZ?07NaenVtioG^EHpMHlD+S62~ENo zx?1@=4?7sbOX}=@HQANDqs~ruRt$4Yln2=ck#rG(Q0s8svxtH@H^1Un)+LkqUDWV8 zo?mDkwqt#g0{`HXgMBwzvYcRxwNK3`2NX8Y7oNC56;XCZ@Zrri6S#vHBl%Rj_a)%P zZ8f^a`HOqjzau0>$$O+hZp-2*?d3$7sIKdT%BdtTlcaCBYoh#Imv{OJ?2c9d7j|{0 zqY9`s?@$WjRTkZo#HS7Sp)VuPZod!!d{iJ0Ps0GdT;r-WQon=kZ z9_$X>mqN{WTKSp*G?+|x3e3A9q<9G0!UE2(YIv9%L$k}DYVe2Y=yb#LV`h9IXLHC5@;$&l*v@!1$eYUc_%P4$X>xFrGk(!>&e}4&aC5_@`PhZ-zU*_OaLjrnF2I_TuZZ090aAx!w2;9iqn_P|`#;5{A z_3uY0Pc!vXQ|t36UCaTQ?S3HF7nmn&afLwqvacs25*|s@6Wy4^Cm988H9#s443BB6 zs3l?#$msKe;W7EU2Ep((#dR-~U^ai!noV>2zh`EyhQ}E!9$-|^HE09}mq8I&yb$_X zDV#t6q7S;Z!IQBkXQWTFBZ_uTtG^D{yKn_iQ%qR#T1t(&OX_0^(s*Xc&Bx+ zS60B(e)W`p+`r>4Ft;KM)EpIc3eUfy=puyHQo>hglYtp8Q$!=rw;UIRI5F^)8vy;R zB6Am!46vYKbBd`8=dcV$tPZ38g++OR@nJzyYSu42T!0a-9mX<0HsNgD;s^x2937OZ zvqN_;K9Y&1)3wxu3{qHF`$5M99x>av4;i(6+bWuJDlf0;i-7@`vDb+RuPXOgCZq{s zr;mU|OzT(&J`Q(g4Izj`VBFn+ac9Du$8`07IUh3=*0$sS01An$?(FfO2Fi`em|X90 z6Q+^3@uiYgA-w{gLS{Hp!Nr8;){2Qv4SavAOSkj3LHh>ErC`_%6ZwYEAj^Kjl|5l{ zsXX+ngNp|auGMWSS@Zuj-%`1|&%8KQv`y0>lzLrwH;6SMZ=y7tXEzdWQ2r>^F7#qV zvMlSvloJ*a<~!>~s3oSAmdWh>D7e^Xi2l`-5;g zsC;!;9uC)oS7rVd_U?XSR^psbOa9Mk$!~iS<8dsmoo4>1Nv6fr!ZAS z(+7T6_9>oSzsS{kc#94F(ZWQLsQ%vX&y!`nAZ^Gicr2i0BCdt8@ydu&IU@pa4aSeqxm5uRGWDW`D=#diM5@B(wXI&pwSYR6vr64> zGo4E11dF(c*Iev!&w|t3&+_$n^(LOsMU*e=rA$qYqh18&)$#y%I6V$s0DK5BO4Xm3 z@?6k?0fF!^K4ed5AZb@(LwvO7_MFRfT&+k}SI~|wpV$A<>kv_%U zLufg82HiC<z-;t!*6z+s1p*e`*ksdLc>9n1Jl}r?*v-diKl)B^KLeGjSixA~mfe4Zo-)Se;&bp~MWa z)x!l(BN`KkE(+-eG_|3l6X6`{D07oJpZ z*Y0!Hr?|8rFuFI2R9Vm!W`iiMEnFHK+TCoEe(1HkX<4Sz*n?`9k_ZQ)Sfq2AdBA}K ziaoZTw-V1?tstd>S~9@$%r z6PxkQjr)v|+qcL!PH~x}1+S*u7dF+zRXOzf=U%5C(vBXWuBeDz6|Xpn3fX-UJyJos z$w+N7SJbOoid((wBlGeP{jEV-)kIoOZ#4;EOhzo7T7_=0!=wauzBU;dKOK*hiXHFB zjm~K=bUNrYGk+g66G-(Kkd3L#NG7A}yc?G-iHDYK{Km(JxtQ)BSCy`w>FJ}@6z8Xi<6`J`K@HMc%(5nk=$aEl| zryiu7pf$WWvBO8^ici&86P=-}!vvpH?wcskiEF_1tPYT2d8dbdOX=P|Ts;nYOD30` zeNtbQ`e_0*tH$^C@k`>~`@pV>cRqM4w{INYmXs))=09rzU_-rlwPmdM^)yBf$541f zmW9U6yo(S065$nk4xU~0CjfnYsmF4;&_44DfNtGgdW4=nd@FgHCbTtZ3hqNPTW{(@ zZ?)H+=qelbf2Q0&wguhrz9mjl02P~;W$+emL~-t+st+E{X@6d}sa|_;=4rLfSyRVw z{YNOZ&83gi@-MH}1$HHDgBCwc9>?lhq}28gm(o6!FfkJ2!D79}mAOI7HuxSdiz3CP znf1d8xPT1wED_aNeCtk{;qJSI$M_RL#%atKg~N_#`Ps|bwbGw&O~3N zor38R!!ctADZQ36#>W_7CkuC$6+HadTY~JD6V{zr7MM77_pIQC zZGyS?(i|3jFsY4ZQ^-qXui}T6jrSBNUoMp(Q3Qqu4ZRTDwu5rOaD7mv^P|{&_qT<( z0DGbbW~6od`zW2{Q$$&Gy)lJ9NCf0U{@j<+5AAT*9A$lWQs7#Rji9$-Qo&n+?!_8~ z@|-b+h*3Rm`GR{L{1*XQVLSQAnaG~S_|N%47&!s44rIK9SzavcinZ72?A## z9dCKqRJ0*wcuGut(3p}QZ=d^l@ATECxP0K~Digx2bvU<1#A&Ikc>Bs7d9J(oq2F}+320> zFq}Qv8M(U7fN)U@U_l-2SBJ9rx3a^m!Y)CE^c=SU|2e4n*v*Sg{2YLA+`q+LX>9@`U6LMdyJ3by*Hle2;enOy%#}51<_ACI*-@q znG&%tBLc$$KCAo$oz)l|A&jD#E{Ww99sCM{;%RfZB zAqN9P$e6&lgqi9)YHAxnTVvn0CX49o?K4EUZN7>-G$hUnZQ@u5b;+v5Ao=nx@=}_1! zE?*vpCm;!l$5WHARzWPkNCWta@*EfYKs^EAb!v z!@k4dIsbFEU#t%07jlAzZhS3J@UH5DmI(GN!>^*{nqO9u4c@+MXCC+pUbLSsoY)e4 z&76-eVi{%u%BE1s$+OYEpq)r$e3g6n?-EuQhs;av+p_drOJ(LJMPy|)TsqvIGWzgG zJM?&pSTyqp>7t>2kPlMc-~I1AYAHguT%%jAxyq~Yr!*=oel;nf+(}`>x{XM4#D`U8*wF$JCQ!6E)$YoG|>YO2J`XtJ-9>IW{@-`P2C)qK348LEW4uoyAk z3=FS>?y8(-<*dBUJxszXzFr-RT?ptL8ZHGF0O9Y!K%xrdrLrTOR zCHzyqkG{J^e&Y6Dc1y>gZ1IHlb7iZFFlx-AvFcxX`iB>#{A~N&st845R)dTuv49^9 MHEmVmX{7)E0S2Xjv;Y7A literal 0 HcmV?d00001 diff --git a/deepstream-examples/deepstream-tracking/README.md b/deepstream-examples/deepstream-tracking/README.md index db960a1..a197f0b 100644 --- a/deepstream-examples/deepstream-tracking/README.md +++ b/deepstream-examples/deepstream-tracking/README.md @@ -1,7 +1,7 @@ -# Deepstream Tracking +# 1 Deepstream Tracking This example re-implements the example from https://github.com/NVIDIA-AI-IOT/deepstream_python_apps/blob/master/apps/deepstream-test2/deepstream_test_2.py, hopefully -in a bit more readable format. The example detects and tracks following objects seen in a h264 encoded video stream: +in a bit more readable format. The example detects and tracks following objects seen in a video stream: * PGIE_CLASS_ID_VEHICLE = 0 * PGIE_CLASS_ID_BICYCLE = 1 @@ -21,7 +21,7 @@ Following inference and tracker components are used: * Tracker * Configuration file: [dstest2_tracker_config.txt](dstest2_tracker_config.txt) -## Versions +## 1.1 Versions There are two versions: * [gst-tracking.py](gst-tracking.py) @@ -30,7 +30,7 @@ There are two versions: * This version draws the information so that bounding- and text boxes for smaller objects are drawn first. Everything else being the same, smaller objects tend to be further away from the camera. Also bounding bbox colors are different for each object type. -## Requirements +## 1.2 Requirements * DeepStreamSDK 6.1.1 * Python 3.8 @@ -40,7 +40,7 @@ There are two versions: * gstreamer1.0-plugins-bad * gstreamer1.0-plugins-ugly -## How to Run the Example +## 1.3 How to Run the Example In order to get help regarding input parameters, execute the following: @@ -60,3 +60,32 @@ If you have DeepStream with samples installed, you can execute the following: python3 gst-tracking-v2.py -i /opt/nvidia/deepstream/deepstream/samples/streams/sample_1080p_h264.mp4 ``` +## 1.4 Test Pipelines + +Following are test pipelines that can be launched with `gst-launch-1.0`. Requirements for running the pipelines: + +* Deepstream 6.3 + +**Processing pipeline with videosink** + +```bash +gst-launch-1.0 \ +nvurisrcbin uri=file:///opt/nvidia/deepstream/deepstream/samples/streams/sample_1080p_h264.mp4 ! \ +m.sink_0 nvstreammux name=m width=1280 height=720 batch-size=1 ! nvinfer config-file-path=dstest2_pgie_config.txt ! \ +nvtracker tracker-width=640 tracker-height=480 ll-config-file=config_tracker_NvDCF_perf.yml \ +ll-lib-file=/opt/nvidia/deepstream/deepstream/lib/libnvds_nvmultiobjecttracker.so ! nvdsosd display-clock=1 ! \ +nvvideoconvert ! nveglglessink +``` + +**Processing pipeline with video- and filesinks** + +```bash +gst-launch-1.0 \ +nvurisrcbin uri=file:///opt/nvidia/deepstream/deepstream/samples/streams/sample_1080p_h264.mp4 ! \ +m.sink_0 nvstreammux name=m width=1280 height=720 batch-size=1 ! nvinfer config-file-path=dstest2_pgie_config.txt ! \ +nvtracker tracker-width=640 tracker-height=480 ll-config-file=config_tracker_NvDCF_perf.yml \ +ll-lib-file=/opt/nvidia/deepstream/deepstream/lib/libnvds_nvmultiobjecttracker.so ! nvdsosd display-clock=1 ! \ +tee name=t t. ! queue ! nvvideoconvert ! 'video/x-raw(memory:NVMM), format=NV12' ! nvv4l2h264enc ! h264parse ! matroskamux ! \ +filesink location=output.mkv t. ! queue ! nvvideoconvert ! nveglglessink +``` + diff --git a/deepstream-examples/deepstream-tracking/config_tracker_NvDCF_perf_uniqueid.yml b/deepstream-examples/deepstream-tracking/config_tracker_NvDCF_perf_uniqueid.yml new file mode 100644 index 0000000..6868694 --- /dev/null +++ b/deepstream-examples/deepstream-tracking/config_tracker_NvDCF_perf_uniqueid.yml @@ -0,0 +1,74 @@ +%YAML:1.0 +################################################################################ +# SPDX-FileCopyrightText: Copyright (c) 2019-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +################################################################################ + +BaseConfig: + minDetectorConfidence: 0.5 # If the confidence of a detector bbox is lower than this, then it won't be considered for tracking + +TargetManagement: + enableBboxUnClipping: 1 # In case the bbox is likely to be clipped by image border, unclip bbox + maxTargetsPerStream: 150 # Max number of targets to track per stream. Recommended to set >10. Note: this value should account for the targets being tracked in shadow mode as well. Max value depends on the GPU memory capacity + + # [Creation & Termination Policy] + minIouDiff4NewTarget: 0.5 # If the IOU between the newly detected object and any of the existing targets is higher than this threshold, this newly detected object will be discarded. + minTrackerConfidence: 0.2 # If the confidence of an object tracker is lower than this on the fly, then it will be tracked in shadow mode. Valid Range: [0.0, 1.0] + probationAge: 5 # If the target's age exceeds this, the target will be considered to be valid. + maxShadowTrackingAge: 30 # Max length of shadow tracking. If the shadowTrackingAge exceeds this limit, the tracker will be terminated. + earlyTerminationAge: 1 # If the shadowTrackingAge reaches this threshold while in TENTATIVE period, the target will be terminated prematurely. + +TrajectoryManagement: + useUniqueID: 1 # Use 64-bit long Unique ID when assignining tracker ID. Default is [true] + +DataAssociator: + dataAssociatorType: 0 # the type of data associator among { DEFAULT= 0 } + associationMatcherType: 0 # the type of matching algorithm among { GREEDY=0, GLOBAL=1 } + checkClassMatch: 1 # If checked, only the same-class objects are associated with each other. Default: true + + # [Association Metric: Thresholds for valid candidates] + minMatchingScore4Overall: 0.0 # Min total score + minMatchingScore4SizeSimilarity: 0.6 # Min bbox size similarity score + minMatchingScore4Iou: 0.0 # Min IOU score + minMatchingScore4VisualSimilarity: 0.7 # Min visual similarity score + + # [Association Metric: Weights] + matchingScoreWeight4VisualSimilarity: 0.6 # Weight for the visual similarity (in terms of correlation response ratio) + matchingScoreWeight4SizeSimilarity: 0.0 # Weight for the Size-similarity score + matchingScoreWeight4Iou: 0.4 # Weight for the IOU score + +StateEstimator: + stateEstimatorType: 1 # the type of state estimator among { DUMMY=0, SIMPLE=1, REGULAR=2 } + + # [Dynamics Modeling] + processNoiseVar4Loc: 2.0 # Process noise variance for bbox center + processNoiseVar4Size: 1.0 # Process noise variance for bbox size + processNoiseVar4Vel: 0.1 # Process noise variance for velocity + measurementNoiseVar4Detector: 4.0 # Measurement noise variance for detector's detection + measurementNoiseVar4Tracker: 16.0 # Measurement noise variance for tracker's localization + +VisualTracker: + visualTrackerType: 1 # the type of visual tracker among { DUMMY=0, NvDCF=1 } + + # [NvDCF: Feature Extraction] + useColorNames: 1 # Use ColorNames feature + useHog: 1 # Use Histogram-of-Oriented-Gradient (HOG) feature + featureImgSizeLevel: 2 # Size of a feature image. Valid range: {1, 2, 3, 4, 5}, from the smallest to the largest + featureFocusOffsetFactor_y: -0.2 # The offset for the center of hanning window relative to the feature height. The center of hanning window would move by (featureFocusOffsetFactor_y*featureMatSize.height) in vertical direction + + # [NvDCF: Correlation Filter] + filterLr: 0.075 # learning rate for DCF filter in exponential moving average. Valid Range: [0.0, 1.0] + filterChannelWeightsLr: 0.1 # learning rate for the channel weights among feature channels. Valid Range: [0.0, 1.0] + gaussianSigma: 0.75 # Standard deviation for Gaussian for desired response when creating DCF filter [pixels] diff --git a/deepstream-examples/deepstream-tracking/dstest2_tracker_config.txt b/deepstream-examples/deepstream-tracking/dstest2_tracker_config.txt index d56ab39..5395225 100644 --- a/deepstream-examples/deepstream-tracking/dstest2_tracker_config.txt +++ b/deepstream-examples/deepstream-tracking/dstest2_tracker_config.txt @@ -28,5 +28,5 @@ tracker-height=384 gpu-id=0 ll-lib-file=/opt/nvidia/deepstream/deepstream/lib/libnvds_nvmultiobjecttracker.so ll-config-file=config_tracker_NvDCF_perf.yml -enable-past-frame=1 -enable-batch-process=1 +#enable-past-frame=1 +#enable-batch-process=1 diff --git a/deepstream-examples/deepstream-tracking/gst-tracking-v2.py b/deepstream-examples/deepstream-tracking/gst-tracking-v2.py index 573b6cc..0603a3c 100644 --- a/deepstream-examples/deepstream-tracking/gst-tracking-v2.py +++ b/deepstream-examples/deepstream-tracking/gst-tracking-v2.py @@ -1,6 +1,6 @@ """ This file re-implements deepstream (Python) example deepstream-test2.py, hopefully in a cleaner manner. In essence -the example reads a h264 encoded video stream from a file, like mp4, and tracks objects like: +the example processes videos or images and detects pedestrians and vechiles, and tracks these. PGIE_CLASS_ID_VEHICLE = 0 PGIE_CLASS_ID_BICYCLE = 1 @@ -18,19 +18,23 @@ In order to process a file: -python3 gst-tracking-v2.py -i /opt/nvidia/deepstream/deepstream/samples/streams/sample_1080p_h264.mp4 +python3 gst-tracking-v2.py -u file:///opt/nvidia/deepstream/deepstream/samples/streams/sample_1080p_h264.mp4 """ from collections import namedtuple from operator import attrgetter import argparse import configparser -import os import sys import signal import pyds from helpers import gsthelpers import gi +import logging +import platform +from typing import Any + +logger = logging.getLogger(__name__) gi.require_version("Gst", "1.0") from gi.repository import Gst, GLib # noqa: E402 @@ -55,7 +59,9 @@ } -def osd_sink_pad_buffer_probe(pad, info, u_data): +def osd_sink_pad_buffer_probe( + pad: Gst.Pad, info: Gst.PadProbeInfo, u_data: Any +) -> Gst.PadProbeReturn: frame_number = 0 obj_counter = { PGIE_CLASS_ID_VEHICLE: 0, @@ -244,10 +250,17 @@ def osd_sink_pad_buffer_probe(pad, info, u_data): class Player(object): """ - A simple Player-class that processes files with h264 encoded video content. + A simple Player-class that processes streams based on a give URI """ - def __init__(self): + def __init__(self, output_file: str = ""): + """Player constructor. + + Parameters + ---------- + output_file : str, optional + Output file path. If empty, no video is created, by default "" + """ # Initialize gst Gst.init(None) @@ -265,11 +278,8 @@ def __init__(self): assert self.pipeline is not None # Create all the elements - self.source = gsthelpers.create_element("filesrc", "source") - self.demuxer = gsthelpers.create_element("qtdemux", "demuxer") + self.urisrcbin = gsthelpers.create_element("nvurisrcbin", "urisrcbin") self.video_queue = gsthelpers.create_element("queue", "video-queue") - self.h264_parser = gsthelpers.create_element("h264parse", "h264-parser") - self.h264_decoder = gsthelpers.create_element("nvv4l2decoder", "h264-decoder") self.stream_muxer = gsthelpers.create_element("nvstreammux", "stream-muxer") self.primary_inference = gsthelpers.create_element( "nvinfer", "primary-inference" @@ -293,28 +303,27 @@ def __init__(self): self.videosink_queue = gsthelpers.create_element("queue", "videosink-queue") self.video_sink = gsthelpers.create_element("nveglglessink", "nvvideo-renderer") # File sink branch - self.filesink_queue = gsthelpers.create_element("queue", "filesink-queue") - self.file_sink_converter = gsthelpers.create_element( - "nvvideoconvert", "file-sink-videoconverter" - ) - self.file_sink_encoder = gsthelpers.create_element( - "nvv4l2h264enc", "file-sink-encoder" - ) - self.file_sink_parser = gsthelpers.create_element( - "h264parse", "file-sink-parser" - ) - self.file_sink_muxer = gsthelpers.create_element( - "matroskamux", "file-sink-muxer" - ) - self.file_sink = gsthelpers.create_element("filesink", "file-sink") + if output_file != "": + self.filesink_queue = gsthelpers.create_element("queue", "filesink-queue") + self.file_sink_converter = gsthelpers.create_element( + "nvvideoconvert", "file-sink-videoconverter" + ) + self.caps_filter = gsthelpers.create_element("capsfilter", "capsfilter") + self.file_sink_encoder = gsthelpers.create_element( + "nvv4l2h264enc", "file-sink-encoder" + ) + self.file_sink_parser = gsthelpers.create_element( + "h264parse", "file-sink-parser" + ) + self.file_sink_muxer = gsthelpers.create_element( + "matroskamux", "file-sink-muxer" + ) + self.file_sink = gsthelpers.create_element("filesink", "file-sink") # Add elements to the pipeline - self.pipeline.add(self.source) - self.pipeline.add(self.demuxer) - self.pipeline.add(self.video_queue) - self.pipeline.add(self.h264_parser) - self.pipeline.add(self.h264_decoder) + self.pipeline.add(self.urisrcbin) self.pipeline.add(self.stream_muxer) + self.pipeline.add(self.video_queue) self.pipeline.add(self.primary_inference) self.pipeline.add(self.tracker) self.pipeline.add(self.secondary1_inference) @@ -327,21 +336,32 @@ def __init__(self): self.pipeline.add(self.videosink_queue) self.pipeline.add(self.video_sink) # File sink branch - self.pipeline.add(self.filesink_queue) - self.pipeline.add(self.file_sink_converter) - self.pipeline.add(self.file_sink_encoder) - self.pipeline.add(self.file_sink_parser) - self.pipeline.add(self.file_sink_muxer) - self.pipeline.add(self.file_sink) + if output_file != "": + self.pipeline.add(self.filesink_queue) + self.pipeline.add(self.file_sink_converter) + self.pipeline.add(self.caps_filter) + self.pipeline.add(self.file_sink_encoder) + self.pipeline.add(self.file_sink_parser) + self.pipeline.add(self.file_sink_muxer) + self.pipeline.add(self.file_sink) + # Set properties for file_sink_encoder + self.file_sink_encoder.set_property("profile", 4) + # Set properties for the caps filter + self.caps_filter.set_property( + "caps", Gst.Caps.from_string("video/x-raw(memory:NVMM),format=NV12") + ) + + # Set properties for the nvusrsrcbin + # self.urisrcbin.set_property("cudadec-memtype", 2) # Set properties for the streammux self.stream_muxer.set_property("width", 1920) self.stream_muxer.set_property("height", 1080) self.stream_muxer.set_property("batch-size", 1) self.stream_muxer.set_property("batched-push-timeout", 4000000) - - # Set properties for file_sink_encoder - self.file_sink_encoder.set_property("profile", 4) + self.stream_muxer.set_property("attach-sys-ts", True) + self.stream_muxer.set_property("enable-padding", True) + # self.stream_muxer.set_property("live-source", 1) # Set properties for the inference engines self.primary_inference.set_property( @@ -357,72 +377,35 @@ def __init__(self): "config-file-path", "dstest2_sgie3_config.txt" ) - # Set properties for the tracker + # Configure tracker tracker_config = configparser.ConfigParser() tracker_config.read("dstest2_tracker_config.txt") - tracker_config.sections() - for key in tracker_config["tracker"]: - if key == "tracker-width": - tracker_width = tracker_config.getint("tracker", key) - self.tracker.set_property("tracker-width", tracker_width) - if key == "tracker-height": - tracker_height = tracker_config.getint("tracker", key) - self.tracker.set_property("tracker-height", tracker_height) - if key == "gpu-id": - tracker_gpu_id = tracker_config.getint("tracker", key) - self.tracker.set_property("gpu_id", tracker_gpu_id) - if key == "ll-lib-file": - tracker_ll_lib_file = tracker_config.get("tracker", key) - self.tracker.set_property("ll-lib-file", tracker_ll_lib_file) - if key == "ll-config-file": - tracker_ll_config_file = tracker_config.get("tracker", key) - self.tracker.set_property("ll-config-file", tracker_ll_config_file) - if key == "enable-batch-process": - tracker_enable_batch_process = tracker_config.getint("tracker", key) - self.tracker.set_property( - "enable_batch_process", tracker_enable_batch_process - ) - if key == "enable-past-frame": - tracker_enable_past_frame = tracker_config.getint("tracker", key) - self.tracker.set_property( - "enable_past_frame", tracker_enable_past_frame - ) + value = tracker_config["tracker"][key] + if value.isdigit(): + value = int(value) + self.tracker.set_property(key, value) + + # Set video sink properties + self.video_sink.set_property("sync", 1) # --- LINK IMAGE PROCESSING --- # Link video input and inference as follows: # - # filesrc -> demux -> queue -> h264parser -> h264decoder -> streammux -> - # primary_inference1 -> tracker -> secondary_inference1 -> secondary_inference2 -> secondary_inference3 -> - # videoconverter -> osd (bounding boxes) -> tee + # urisrcbin -> streammux -> video_queue -> primary_inference1 -> tracker + # -> secondary_inference1 -> secondary_inference2 -> secondary_inference3 + # -> videoconverter -> osd (bounding boxes) -> tee # # After the tee element we have two output branches that are described later. - # Link source to demuxer - gsthelpers.link_elements([self.source, self.demuxer]) - - # Connect demux to the pad-added signal, used to link demuxer to queue dynamically - demuxer_pad_added = gsthelpers.PadAddedLinkFunctor() - demuxer_pad_added.register("video_", self.video_queue, "sink") - - assert self.demuxer.connect("pad-added", demuxer_pad_added) is not None - - # Link video pipeline - gsthelpers.link_elements( - [self.video_queue, self.h264_parser, self.h264_decoder] - ) - - # Link decoder to streammux - source = self.h264_decoder.get_static_pad("src") - assert source is not None - sink = self.stream_muxer.get_request_pad("sink_0") - assert sink is not None - assert source.link(sink) == Gst.PadLinkReturn.OK + # Connect urisrcbin to the pad-added signal, used for linking urisrcbin to streammux dynamically + self.urisrcbin.connect("pad-added", self.on_pad_added, "vsrc") # Link inference, tracker and visualization gsthelpers.link_elements( [ self.stream_muxer, + self.video_queue, self.primary_inference, self.tracker, self.secondary1_inference, @@ -448,32 +431,44 @@ def __init__(self): assert sink is not None assert src.link(sink) == Gst.PadLinkReturn.OK - # Link video_queue to video_sink - gsthelpers.link_elements([self.videosink_queue, self.video_sink]) + # If Jetson + if platform.machine() == "aarch64": + self.video_sink_transform = gsthelpers.create_element( + "nvegltransform", "video-sink-transform" + ) + self.pipeline.add(self.video_sink_transform) + gsthelpers.link_elements( + [self.videosink_queue, self.video_sink_transform, self.video_sink] + ) + # Non-jetson + else: + gsthelpers.link_elements([self.videosink_queue, self.video_sink]) # --- File-sink output branch --- - src = self.tee.get_request_pad("src_1") - assert src is not None - sink = self.filesink_queue.get_static_pad("sink") - assert sink is not None - assert src.link(sink) == Gst.PadLinkReturn.OK - - gsthelpers.link_elements( - [ - self.filesink_queue, - self.file_sink_converter, - self.file_sink_encoder, - self.file_sink_parser, - ] - ) + if output_file != "": + src = self.tee.get_request_pad("src_1") + assert src is not None + sink = self.filesink_queue.get_static_pad("sink") + assert sink is not None + assert src.link(sink) == Gst.PadLinkReturn.OK + + gsthelpers.link_elements( + [ + self.filesink_queue, + self.file_sink_converter, + self.caps_filter, + self.file_sink_encoder, + self.file_sink_parser, + ] + ) - src = self.file_sink_parser.get_static_pad("src") - assert src is not None - sink = self.file_sink_muxer.get_request_pad("video_0") - assert sink is not None - assert src.link(sink) == Gst.PadLinkReturn.OK + src = self.file_sink_parser.get_static_pad("src") + assert src is not None + sink = self.file_sink_muxer.get_request_pad("video_0") + assert sink is not None + assert src.link(sink) == Gst.PadLinkReturn.OK - gsthelpers.link_elements([self.file_sink_muxer, self.file_sink]) + gsthelpers.link_elements([self.file_sink_muxer, self.file_sink]) # --- Meta-data output --- # Add a probe to the sink pad of the osd-element in order to draw/print meta-data to the canvas @@ -481,65 +476,141 @@ def __init__(self): assert osdsinkpad is not None osdsinkpad.add_probe(Gst.PadProbeType.BUFFER, osd_sink_pad_buffer_probe, 0) - def play(self, input_file: str, output_file: str): + def on_pad_added(self, src: Gst.Element, new_pad: Gst.Pad, user_data: str): """ - - :param input_file: path to the h264 encoded input file - :param output_file: path to the h264 encoded output file - :return: + Handles the addition of a new pad to the source element. + + This method is called whenever a new pad is added to the source + element. It verifies if the new pad matches the user-specified criteria + and links it to a sink pad on the streammuxer if applicable. + + Parameters + ---------- + src : Gst.Element + The source element that emitted the pad-added signal. + new_pad : Gst.Pad + The newly added pad from the source element. + user_data : str + The string prefix used to match the name of the new pad. + + Raises + ------ + RuntimeError + If a sink pad cannot be obtained from the streammuxer. + RuntimeError + If linking the new pad to the sink pad fails. + + Notes + ----- + - The method logs the process of receiving and linking the new pad. + - Only pads whose names start with `user_data` are processed and linked. + + Returns + ------- + None + This function does not return a value. """ - print(f"PLAY(input_file={input_file}, output_file={output_file})") + logger.info(f"Received new pad '{new_pad.get_name()}' from '{src.get_name()}'") + + # Check that the new_pad name starts with the name given by the user + if new_pad.get_name().startswith(user_data): + + # Request a sink pad from the streammuxer + sink_pad = self.stream_muxer.get_request_pad("sink_0") + + if not sink_pad: + raise RuntimeError("Could not get a sink pad from the streammuxer") + + # Link the pad + if new_pad.link(sink_pad) != Gst.PadLinkReturn.OK: + raise RuntimeError( + f"Failed to link {new_pad.get_name()} to {sink_pad.get_name()}" + ) + else: + logger.info( + f"Connected '{new_pad.get_name()}' to '{sink_pad.get_name()}'" + ) + + def play(self, uri: str, output_file: str = ""): + """Starts the pipeline. + + Parameters + ---------- + uri : str + URI of the file or rtsp source + output_file : str, optional + path to the h264 encoded output file, by default "" + """ - # Check if the file exists - if not os.path.exists(input_file): - raise RuntimeError(f"Input file '{input_file}' does not exist") + logger.info(f"PLAY(uri={uri}, output_file={output_file})") # Set source location property to the file location - self.source.set_property("location", input_file) + self.urisrcbin.set_property("uri", uri) # Set location for the output file - self.file_sink.set_property("location", output_file) + if output_file != "": + self.file_sink.set_property("location", output_file) # Create a bus and add signal watcher bus = self.pipeline.get_bus() bus.add_signal_watch() bus.connect("message", self.on_message) - print("Setting pipeline state to PLAYING...", end="") + logger.info("Setting pipeline state to PLAYING") if self.pipeline.set_state(Gst.State.PLAYING) == Gst.StateChangeReturn.FAILURE: - print("failed") + logger.error("Failed to set the pipeline to state PLAYING") else: - print("done") + logger.info("Pipeline set to state PLAYING") # Start loop self.loop.run() def stop(self): - print("STOP()") - print("Setting pipeline state to NULL...", end="") - self.pipeline.set_state(Gst.State.NULL) - print("done") + logger.info("Stopping the pipeline") + if self.pipeline: + self.pipeline.set_state(Gst.State.NULL) self.loop.quit() - def on_message(self, bus, message): - """ - Message handler function. + def on_message(self, bus: Gst.Bus, message: Gst.Message) -> None: + """Message handler function - :param bus: bus - :param message: message - :return: nothing + Parameters + ---------- + bus : Gst.Bus + Gst bus + message : Gst.Message + Gst message + + Returns + ------- + None """ + message_type = message.type if message_type == Gst.MessageType.EOS: - print("EOS message type received") + logger.info("EOS message type received") self.stop() elif message_type == Gst.MessageType.ERROR: - err, dbg = message.parse_error() - print(f"Error from {message.src.get_name()}: {err.message}") + err, debug = message.parse_error() + logger.info(f"Error from {message.src.get_name()}: {err.message}, {debug}") self.stop() + # State changed + elif message_type == Gst.MessageType.STATE_CHANGED: + old_state, new_state, pending = message.parse_state_changed() + + src = message.src + if isinstance(src, Gst.Pipeline): + element_name = "Player" + else: + element_name = src.get_name() + + logging.info( + f"Player, {element_name} state changed: {old_state.value_nick} -> {new_state.value_nick}" + ) + def stop_handler(self, sig, frame): """ Even handler for stopping the pipeline. @@ -548,24 +619,29 @@ def stop_handler(self, sig, frame): :param frame: stack frame :return: """ - print("Signal SIGINT received") + logger.info("Signal SIGINT received") self.stop() -if __name__ == "__main__": +def main(): + logging.basicConfig(level=logging.INFO) argParser = argparse.ArgumentParser() - argParser.add_argument("-i", "--input_file", help="input file path", default="") argParser.add_argument( - "-o", "--output_file", help="output file path", default="output.mp4" + "-u", "--uri", help="URI of the file or rtsp source", default="" ) + argParser.add_argument("-o", "--output_file", help="output file path", default="") args = argParser.parse_args() - player = Player() + player = Player(output_file=args.output_file) try: - player.play(args.input_file, args.output_file) + player.play(args.uri, args.output_file) except Exception as e: print(e) player.stop() sys.exit(-1) sys.exit(0) + + +if __name__ == "__main__": + main() diff --git a/deepstream-examples/deepstream-triton-tracking/README.md b/deepstream-examples/deepstream-triton-tracking/README.md index ea717f5..e2d6468 100644 --- a/deepstream-examples/deepstream-triton-tracking/README.md +++ b/deepstream-examples/deepstream-triton-tracking/README.md @@ -1,4 +1,4 @@ -# Deepstream Tracking with Triton Inferenfce Server +# 1 Deepstream Tracking with Triton Inferenfce Server This directory contains several different implementations related to using Triton Inference Server for inference in a Deepstream pipeline. The programs detect the following objects: @@ -17,7 +17,7 @@ Following inference and tracker components are used: * Tracker * Configuration file: [dstest2_tracker_config.txt](dstest2_tracker_config.txt) -## Versions +## 1.1 Versions * [gst-triton-tracking.py](gst-triton-tracking.py) * This version draws bounding box and object information using slightly modified version of the original function. @@ -42,18 +42,18 @@ Following inference and tracker components are used: * Uses tiling * Input videos are expected to be h264 encoded -## Observations +## 1.2 Observations -### DeepstreamSDK 6.1.1 +### 1.2.1 DeepstreamSDK 6.1.1 When using the `nvv4l2h264enc` encoder in the file-sink branch the pipeline became unresponsive after having processed some frames. It seems to work with `x264enc` without any problems. -### DeepstreamSDK 6.3 +### 1.2.2 DeepstreamSDK 6.3 The Gst plug-in `x264enc` apparently has been removed. -## Requirements +## 1.3 Requirements * DeepStreamSDK 6.1.1 or 6.3 * Nvidia Container toolkit and Docker compose (if using Docker) @@ -65,13 +65,13 @@ The Gst plug-in `x264enc` apparently has been removed. * gstreamer1.0-plugins-ugly * Triton Inference Server (locally built or Docker image) -## How to Run the Example Locally +## 1.4 How to Run the Example Locally Since the `gst-triton-tracking-v2.py` uses configuration files from `/opt/nvidia/deepstream/deepstream/samples/configs/deepstream-app-triton-grpc`, the expectation is that the Triton server is running in the same machine as where the code `gst-triton-tracking-v2.py` is run from. If this is not the case, then you need to modify the IP-address of the Triton server in the configuration files. -### Building Models +### 1.4.1 Building Models The first step is to build the TensorRT models: @@ -87,7 +87,7 @@ tritonserver \ --model-repository=/opt/nvidia/deepstream/deepstream/samples/triton_model_repo ``` -### Running the Tracking Example +### 1.4.2 Running the Tracking Example To get help regarding input parameters, execute: @@ -107,7 +107,7 @@ If you have DeepStream with samples installed, you can execute the following: python3 gst-triton-tracking-v2.py -i /opt/nvidia/deepstream/deepstream/samples/streams/sample_1080p_h264.mp4 ``` -## How to Run the Example Using Docker +## 1.5 How to Run the Example Using Docker Here the expectation is that both the Docker container running the Triton server and the container where the `gst-triton-tracking-v2.py` code is executed from, are running in the same host. We need to modify @@ -119,7 +119,7 @@ cd gstreamer-examples/docker docker build -t deepstream-6.3 -f ./Dockerfile-deepstream-6.3-triton-devel . ``` -### Launch Triton Server Using Docker Compose +### 1.5.1 Launch Triton Server Using Docker Compose Launch Triton server using Docker compose as follows: @@ -151,7 +151,7 @@ If Triton is running correctly, you should get an answer similar to: * Connection #0 to host localhost left intact ``` -### Launch gst-triton-tracking-v2.py +### 1.5.2 Launch gst-triton-tracking-v2.py Next we launch the Docker container that we use for executing the tracking code. Following commands are run in the host. First we enable any client to interact with the local X server: @@ -203,7 +203,7 @@ cd /home/gstreamer-examples python3 gst-triton-tracking-v2.py -i /opt/nvidia/deepstream/deepstream/samples/streams/sample_1080p_h264.mp4 ``` -### Launch gst-triton-parallel-tracking-v1.py +### 1.5.3 Launch gst-triton-parallel-tracking-v1.py We use the same Docker container where the Triton server is running. First find out the container ID: @@ -219,7 +219,7 @@ cd /home/gstreamer_examples python3 gst-triton-parallel-tracking-v1.py -n 2 -i /opt/nvidia/deepstream/deepstream/samples/streams/sample_1080p_h264.mp4 ``` -### Launch gst-triton-parallel-tracking-v2.py +### 1.5.4 Launch gst-triton-parallel-tracking-v2.py We use the same Docker container where the Triton server is running. First find out the container ID: @@ -233,3 +233,73 @@ Use the ID that corresponds to the `deepstream-triton-tracking-triton-server` co ```bash cd /home/gstreamer_examples python3 gst-triton-parallel-tracking-v2.py -i /opt/nvidia/deepstream/deepstream/samples/streams/sample_1080p_h264.mp4 /opt/nvidia/deepstream/deepstream/samples/streams/sample_1080p_h264.mp4 +``` +### 1.5.5 Test Pipelines + +Following are test pipelines that can be launched with `gst-launch-1.0`. + +**Single input stream** + +```bash +gst-launch-1.0 \ +nvurisrcbin uri=file:///opt/nvidia/deepstream/deepstream/samples/streams/sample_1080p_h264.mp4 ! \ +m.sink_0 nvstreammux name=m width=1280 height=720 batch-size=1 ! nvinferserver config-file-path=/opt/nvidia/deepstream/deepstream/samples/configs/deepstream-app-triton/config_infer_plan_engine_primary.txt ! \ +nvtracker tracker-width=640 tracker-height=480 ll-config-file=config_tracker_NvDCF_perf.yml \ +ll-lib-file=/opt/nvidia/deepstream/deepstream/lib/libnvds_nvmultiobjecttracker.so ! nvdsosd display-clock=1 ! \ +nvvideoconvert ! nveglglessink +``` + +**4 Input streams with video- and filesinks** + +This pipeline connects 4 `nvurisrcbin` to a single image processing pipeline. + +```bash +gst-launch-1.0 \ +nvurisrcbin uri=file:///opt/nvidia/deepstream/deepstream/samples/streams/sample_1080p_h264.mp4 ! queue ! m.sink_0 \ +nvurisrcbin uri=file:///opt/nvidia/deepstream/deepstream/samples/streams/sample_1080p_h265.mp4 ! queue ! m.sink_1 \ +nvurisrcbin uri=file:///opt/nvidia/deepstream/deepstream/samples/streams/sample_1080p_h264.mp4 ! queue ! m.sink_2 \ +nvurisrcbin uri=file:///opt/nvidia/deepstream/deepstream/samples/streams/sample_1080p_h265.mp4 ! queue ! m.sink_3 \ +nvstreammux name=m width=1280 height=720 batch-size=4 ! \ +nvinferserver config-file-path=/opt/nvidia/deepstream/deepstream/samples/configs/deepstream-app-triton/config_infer_plan_engine_primary.txt ! \ +nvtracker tracker-width=640 tracker-height=480 ll-config-file=config_tracker_NvDCF_perf.yml \ +ll-lib-file=/opt/nvidia/deepstream/deepstream/lib/libnvds_nvmultiobjecttracker.so ! \ +nvmultistreamtiler rows=2 columns=2 width=1280 height=720 ! \ +nvdsosd ! tee name=t t. ! queue ! nvvideoconvert ! 'video/x-raw(memory:NVMM), format=NV12' ! nvv4l2h264enc profile=High bitrate=10000000 ! h264parse ! matroskamux ! \ +filesink location=triton_4_stream_output.mkv t. ! queue ! nvvideoconvert ! nveglglessink +``` + +**20 Input streams with video- and filesinks** + +This pipeline connects 20 `nvurisrcbin` to a single image processing pipeline. + +```bash +gst-launch-1.0 \ +nvurisrcbin uri=file:///opt/nvidia/deepstream/deepstream/samples/streams/sample_1080p_h264.mp4 ! queue ! m.sink_0 \ +nvurisrcbin uri=file:///opt/nvidia/deepstream/deepstream/samples/streams/sample_1080p_h265.mp4 ! queue ! m.sink_1 \ +nvurisrcbin uri=file:///opt/nvidia/deepstream/deepstream/samples/streams/sample_1080p_h264.mp4 ! queue ! m.sink_2 \ +nvurisrcbin uri=file:///opt/nvidia/deepstream/deepstream/samples/streams/sample_1080p_h265.mp4 ! queue ! m.sink_3 \ +nvurisrcbin uri=file:///opt/nvidia/deepstream/deepstream/samples/streams/sample_1080p_h264.mp4 ! queue ! m.sink_4 \ +nvurisrcbin uri=file:///opt/nvidia/deepstream/deepstream/samples/streams/sample_1080p_h265.mp4 ! queue ! m.sink_5 \ +nvurisrcbin uri=file:///opt/nvidia/deepstream/deepstream/samples/streams/sample_1080p_h264.mp4 ! queue ! m.sink_6 \ +nvurisrcbin uri=file:///opt/nvidia/deepstream/deepstream/samples/streams/sample_1080p_h265.mp4 ! queue ! m.sink_7 \ +nvurisrcbin uri=file:///opt/nvidia/deepstream/deepstream/samples/streams/sample_1080p_h264.mp4 ! queue ! m.sink_8 \ +nvurisrcbin uri=file:///opt/nvidia/deepstream/deepstream/samples/streams/sample_1080p_h265.mp4 ! queue ! m.sink_9 \ +nvurisrcbin uri=file:///opt/nvidia/deepstream/deepstream/samples/streams/sample_1080p_h264.mp4 ! queue ! m.sink_10 \ +nvurisrcbin uri=file:///opt/nvidia/deepstream/deepstream/samples/streams/sample_1080p_h265.mp4 ! queue ! m.sink_11 \ +nvurisrcbin uri=file:///opt/nvidia/deepstream/deepstream/samples/streams/sample_1080p_h264.mp4 ! queue ! m.sink_12 \ +nvurisrcbin uri=file:///opt/nvidia/deepstream/deepstream/samples/streams/sample_1080p_h265.mp4 ! queue ! m.sink_13 \ +nvurisrcbin uri=file:///opt/nvidia/deepstream/deepstream/samples/streams/sample_1080p_h264.mp4 ! queue ! m.sink_14 \ +nvurisrcbin uri=file:///opt/nvidia/deepstream/deepstream/samples/streams/sample_1080p_h265.mp4 ! queue ! m.sink_15 \ +nvurisrcbin uri=file:///opt/nvidia/deepstream/deepstream/samples/streams/sample_1080p_h264.mp4 ! queue ! m.sink_16 \ +nvurisrcbin uri=file:///opt/nvidia/deepstream/deepstream/samples/streams/sample_1080p_h265.mp4 ! queue ! m.sink_17 \ +nvurisrcbin uri=file:///opt/nvidia/deepstream/deepstream/samples/streams/sample_1080p_h264.mp4 ! queue ! m.sink_18 \ +nvurisrcbin uri=file:///opt/nvidia/deepstream/deepstream/samples/streams/sample_1080p_h265.mp4 ! queue ! m.sink_19 \ +nvstreammux name=m width=1280 height=720 batch-size=20 ! \ +nvinferserver config-file-path=/opt/nvidia/deepstream/deepstream/samples/configs/deepstream-app-triton/config_infer_plan_engine_primary.txt ! queue ! \ +nvtracker tracker-width=640 tracker-height=480 ll-config-file=config_tracker_NvDCF_perf.yml \ +ll-lib-file=/opt/nvidia/deepstream/deepstream/lib/libnvds_nvmultiobjecttracker.so ! queue ! \ +nvmultistreamtiler rows=5 columns=4 width=1280 height=720 ! queue ! \ +nvdsosd ! tee name=t t. ! queue ! nvvideoconvert ! 'video/x-raw(memory:NVMM), format=NV12' ! nvv4l2h264enc profile=High bitrate=10000000 ! h264parse ! matroskamux ! \ +filesink location=triton_20_stream_output.mkv t. ! queue ! nvvideoconvert ! nveglglessink +``` + diff --git a/deepstream-examples/deepstream-triton-tracking/gst-triton-parallel-tracking-v1.py b/deepstream-examples/deepstream-triton-tracking/gst-triton-parallel-tracking-v1.py index 5648f61..b1cfbe1 100644 --- a/deepstream-examples/deepstream-triton-tracking/gst-triton-parallel-tracking-v1.py +++ b/deepstream-examples/deepstream-triton-tracking/gst-triton-parallel-tracking-v1.py @@ -1,9 +1,8 @@ """ This file implements a simple pipeline with detector and classifiers, using Triton Inference Server -for doing inference. The same pipeline can be spawned n-number of times, using the same input. The -idea of this program is to do some testing using Triton. +for doing inference. Several streams can be processed using the pipeline. -This example uses a probe attached to the osd plug-in in order to modify the way how the detections +This example uses a probe attached to the osd element in order to modify the way how the detections are drawn to the video. Following objects are detected: PGIE_CLASS_ID_VEHICLE = 0 @@ -17,15 +16,18 @@ away from the camera first, object IDs and labels will be easier to read. For more information regarding the input parameters, execute the following: ->> python3 gst-triton-parallel-tracking-v2.py -h +>> python3 gst-triton-parallel-tracking-v1.py -h In order to process a single video file: ->> python3 gst-triton-tracking-v2.py -i /opt/nvidia/deepstream/deepstream/samples/streams/sample_1080p_h264.mp4 +>> python3 gst-triton-tracking-v1.py -u /opt/nvidia/deepstream/deepstream/samples/streams/sample_1080p_h264.mp4 -In order to process the same input file in parallel: ->> python3 gst-triton-tracking-v2.py -n 4 -i /opt/nvidia/deepstream/deepstream/samples/streams/sample_1080p_h264.mp4 +In order to process several video files: +>> python3 gst-triton-tracking-v1.py -u \ + /opt/nvidia/deepstream/deepstream/samples/streams/sample_1080p_h264.mp4,/opt/nvidia/deepstream/deepstream/samples/streams/sample_1080p_h265.mp4 """ +import platform +from urllib.parse import urlparse from collections import namedtuple from operator import attrgetter import argparse @@ -36,9 +38,13 @@ import pyds from helpers import gsthelpers import gi +import logging +from typing import Any + +logger = logging.getLogger(__name__) gi.require_version("Gst", "1.0") -from gi.repository import Gst, GLib # noqa: E402 +from gi.repository import Gst, GLib, GObject # noqa: E402, F401 PGIE_CLASS_ID_VEHICLE = 0 PGIE_CLASS_ID_BICYCLE = 1 @@ -61,7 +67,9 @@ } -def osd_sink_pad_buffer_probe(pad, info, u_data): +def osd_sink_pad_buffer_probe( + pad: Gst.Pad, info: Gst.PadProbeIndo, u_data: Any +) -> Gst.PadProbeReturn: frame_number = 0 obj_counter = { PGIE_CLASS_ID_VEHICLE: 0, @@ -248,345 +256,332 @@ def osd_sink_pad_buffer_probe(pad, info, u_data): return Gst.PadProbeReturn.OK -class Player(object): - """ - A simple Player-class that processes files with h264 encoded video content. - """ - - def __init__(self): +class MultiPlayer: + def __init__(self, uri_list: str): + """MultiPlayer constructor. - # Initialize gst + Parameters + ---------- + uri_list : str + A comma separated list of URIs + """ Gst.init(None) - - # Create mainloop self.loop = GLib.MainLoop() + self.bin_cntr = 0 - # Register a signal handler for SIGINT + # Register signal handlers signal.signal(signal.SIGINT, self.stop_handler) signal.signal(signal.SIGTERM, self.stop_handler) signal.signal(signal.SIGHUP, self.stop_handler) + # If the uri_list contains files, check that these exist + for uri in uri_list.split(","): + uri_parsed = urlparse(uri) + if uri_parsed.scheme == "file": + if not os.path.exists(uri_parsed.path): + logger.error(f"File '{uri_parsed.path}' does not exist") + sys.exit(-1) + # Create an empty pipeline - self.pipeline = Gst.Pipeline.new("video-pipeline") + self.pipeline = Gst.Pipeline.new("input-pipeline") assert self.pipeline is not None + # Create elements + nvmultiurisrcbin = gsthelpers.create_element( + "nvmultiurisrcbin", "multiurisrcbin" + ) + demuxer = gsthelpers.create_element("nvstreamdemux", "demuxer") + + # Add elements to the pipeline + self.pipeline.add(nvmultiurisrcbin) + self.pipeline.add(demuxer) + + # Set the multiurisrcbin properties + logger.info(f"URI-list: {uri_list}") + nvmultiurisrcbin.set_property("uri-list", uri_list) + nvmultiurisrcbin.set_property("width", 1920) + nvmultiurisrcbin.set_property("height", 1080) + nvmultiurisrcbin.set_property("live-source", 1) + + # Link elements + gsthelpers.link_elements([nvmultiurisrcbin, demuxer]) + + # Create the image processing pipelines, one for each stream + for i, el in enumerate(uri_list.split(",")): + logger.info(f"Connecting processing bin for stream {el}") + + # Create elements + processing_bin = self.create_processing_bin() + + # Add to the pipeline + self.pipeline.add(processing_bin) + + # Connect tee to muxer + src = demuxer.get_request_pad(f"src_{i}") + if src is None: + logger.error( + f"Failed to request 'src_{i}' pad from the demuxer for stream {el}" + ) + sys.exit(-1) + + sink = processing_bin.get_static_pad("sink") + if sink is None: + logger.error( + f"Failed to get 'sink' pad from the processing bin for stream {el}" + ) + sys.exit(-1) + + if src.link(sink) != Gst.PadLinkReturn.OK: + logger.error( + f"Failed to the demuxer to the processing bin for stream {el}" + ) + sys.exit(-1) + else: + logger.info( + f"Linked demuxer 'src_{i}' pad to the processing bin 'sink' pad for stream {el}" + ) + + # Get hold of the bus and add a watcher + bus = self.pipeline.get_bus() + bus.add_signal_watch() + bus.connect("message", self.on_message) + + def create_processing_bin(self) -> Gst.Bin: + """Creates a processor bin + + Returns + ------- + Gst.Bin + Created processor bin. + """ + + # Create a bin + bin = Gst.Bin.new(f"video_processing_bin_{self.bin_cntr}") + # Create all the elements - self.source = gsthelpers.create_element("filesrc", "source") - self.demuxer = gsthelpers.create_element("qtdemux", "demuxer") - self.video_queue = gsthelpers.create_element("queue", "video-queue") - self.h264_parser = gsthelpers.create_element("h264parse", "h264-parser") - self.h264_decoder = gsthelpers.create_element("nvv4l2decoder", "h264-decoder") - self.stream_muxer = gsthelpers.create_element("nvstreammux", "stream-muxer") - self.primary_inference = gsthelpers.create_element( - "nvinferserver", "primary-inference" + primary_inference = gsthelpers.create_element( + "nvinferserver", f"primary-inference-{self.bin_cntr}" ) - self.tracker = gsthelpers.create_element("nvtracker", "tracker") - self.secondary1_inference = gsthelpers.create_element( - "nvinferserver", "secondary1-inference" + tracker = gsthelpers.create_element("nvtracker", f"tracker-{self.bin_cntr}") + secondary1_inference = gsthelpers.create_element( + "nvinferserver", f"secondary1-inference-{self.bin_cntr}" ) - self.secondary2_inference = gsthelpers.create_element( - "nvinferserver", "secondary2-inference" + secondary2_inference = gsthelpers.create_element( + "nvinferserver", f"secondary2-inference-{self.bin_cntr}" ) - self.secondary3_inference = gsthelpers.create_element( - "nvinferserver", "secondary3-inference" + secondary3_inference = gsthelpers.create_element( + "nvinferserver", f"secondary3-inference-{self.bin_cntr}" ) - self.video_converter = gsthelpers.create_element( - "nvvideoconvert", "video-converter" + video_converter = gsthelpers.create_element( + "nvvideoconvert", f"video-converter-{self.bin_cntr}" ) - self.osd = gsthelpers.create_element("nvdsosd", "nvidia-bounding-box-draw") - self.tee = gsthelpers.create_element("tee", "tee") - # Video sink branch - self.videosink_queue = gsthelpers.create_element("queue", "videosink-queue") - self.video_sink = gsthelpers.create_element("nveglglessink", "nvvideo-renderer") - - # Add elements to the pipeline - self.pipeline.add(self.source) - self.pipeline.add(self.demuxer) - self.pipeline.add(self.video_queue) - self.pipeline.add(self.h264_parser) - self.pipeline.add(self.h264_decoder) - self.pipeline.add(self.stream_muxer) - self.pipeline.add(self.primary_inference) - self.pipeline.add(self.tracker) - self.pipeline.add(self.secondary1_inference) - self.pipeline.add(self.secondary2_inference) - self.pipeline.add(self.secondary3_inference) - self.pipeline.add(self.video_converter) - self.pipeline.add(self.osd) - self.pipeline.add(self.tee) - # Video sink branch - self.pipeline.add(self.videosink_queue) - self.pipeline.add(self.video_sink) - - # Set properties for the streammux - self.stream_muxer.set_property("width", 1920) - self.stream_muxer.set_property("height", 1080) - self.stream_muxer.set_property("batch-size", 1) - self.stream_muxer.set_property("batched-push-timeout", 4000000) - - # Set properties for sinks - self.video_sink.set_property("async", False) + osd = gsthelpers.create_element("nvdsosd", f"draw-overlays-{self.bin_cntr}") + videosink_queue = gsthelpers.create_element( + "queue", f"videosink-queue-{self.bin_cntr}" + ) + video_sink = gsthelpers.create_element( + "nveglglessink", f"nvvideo-renderer-{self.bin_cntr}" + ) + queue1 = gsthelpers.create_element("queue", f"queue1-{self.bin_cntr}") + queue2 = gsthelpers.create_element("queue", f"queue2-{self.bin_cntr}") + queue3 = gsthelpers.create_element("queue", f"queue3-{self.bin_cntr}") + queue4 = gsthelpers.create_element("queue", f"queue4-{self.bin_cntr}") + + # Add elements to the bin + bin.add(primary_inference) + bin.add(tracker) + bin.add(secondary1_inference) + bin.add(secondary2_inference) + bin.add(secondary3_inference) + bin.add(video_converter) + bin.add(osd) + bin.add(videosink_queue) + bin.add(video_sink) + bin.add(queue1) + bin.add(queue2) + bin.add(queue3) + bin.add(queue4) + + if platform.machine() == "aarch64": + # Add egl-transform for Jetson + video_sink_transform = gsthelpers.create_element( + "nvegltransform", f"video-sink-transform-{self.bin_cntr}" + ) + bin.add(video_sink_transform) + + # Link inference, tracker and visualization + gsthelpers.link_elements( + [ + queue1, + primary_inference, + queue2, + tracker, + queue3, + secondary1_inference, + secondary2_inference, + secondary3_inference, + queue4, + video_converter, + osd, + video_sink_transform, + video_sink, + ] + ) + else: + # Link inference, tracker and visualization + gsthelpers.link_elements( + [ + queue1, + primary_inference, + queue2, + tracker, + queue3, + secondary1_inference, + secondary2_inference, + secondary3_inference, + queue4, + video_converter, + osd, + video_sink, + ] + ) # Set properties for the inference engines - self.primary_inference.set_property( + # Since we're reusing the same configuration, we need to tweak the unique-id and infer-on-gie-id + # in order to avoid clashes + primary_inference.set_property( "config-file-path", "/opt/nvidia/deepstream/deepstream/samples/configs/deepstream-app-triton/config_infer_plan_engine_primary.txt", ) - self.secondary1_inference.set_property( + # primary_inference.set_property("unique-id", 1 + self.bin_cntr * 10) + # logger.info( + # f"Primary detector unique ID: {primary_inference.get_property('unique-id')}" + # ) + + secondary1_inference.set_property( "config-file-path", "/opt/nvidia/deepstream/deepstream/samples/configs/deepstream-app-triton/config_infer_secondary_plan_engine_carcolor.txt", ) - self.secondary2_inference.set_property( + secondary1_inference.set_property("unique-id", 2 + self.bin_cntr * 10) + # secondary1_inference.set_property("infer-on-gie-id", 1 + self.bin_cntr * 10) + # logger.info( + # f"Secondary 1 detector unique ID: {secondary1_inference.get_property('unique-id')}" + # ) + + secondary2_inference.set_property( "config-file-path", "/opt/nvidia/deepstream/deepstream/samples/configs/deepstream-app-triton/config_infer_secondary_plan_engine_carmake.txt", ) - self.secondary3_inference.set_property( + secondary2_inference.set_property("unique-id", 3 + self.bin_cntr * 10) + # secondary2_inference.set_property("infer-on-gie-id", 1 + self.bin_cntr * 10) + # logger.info( + # f"Secondary 2 detector unique ID: {secondary2_inference.get_property('unique-id')}" + # ) + + secondary3_inference.set_property( "config-file-path", "/opt/nvidia/deepstream/deepstream/samples/configs/deepstream-app-triton/config_infer_secondary_plan_engine_vehicletypes.txt", ) + # secondary3_inference.set_property("unique-id", 4 + self.bin_cntr * 10) + # secondary3_inference.set_property("infer-on-gie-id", 1 + self.bin_cntr * 10) + # logger.info( + # f"Secondary 3 detector unique ID: {secondary3_inference.get_property('unique-id')}" + # ) - # Set properties for the tracker + # Configure tracker tracker_config = configparser.ConfigParser() tracker_config.read("dstest2_tracker_config.txt") - tracker_config.sections() - for key in tracker_config["tracker"]: - if key == "tracker-width": - tracker_width = tracker_config.getint("tracker", key) - self.tracker.set_property("tracker-width", tracker_width) - if key == "tracker-height": - tracker_height = tracker_config.getint("tracker", key) - self.tracker.set_property("tracker-height", tracker_height) - if key == "gpu-id": - tracker_gpu_id = tracker_config.getint("tracker", key) - self.tracker.set_property("gpu_id", tracker_gpu_id) - if key == "ll-lib-file": - tracker_ll_lib_file = tracker_config.get("tracker", key) - self.tracker.set_property("ll-lib-file", tracker_ll_lib_file) - if key == "ll-config-file": - tracker_ll_config_file = tracker_config.get("tracker", key) - self.tracker.set_property("ll-config-file", tracker_ll_config_file) - if key == "enable-batch-process": - tracker_enable_batch_process = tracker_config.getint("tracker", key) - self.tracker.set_property( - "enable_batch_process", tracker_enable_batch_process - ) - if key == "enable-past-frame": - tracker_enable_past_frame = tracker_config.getint("tracker", key) - self.tracker.set_property( - "enable_past_frame", tracker_enable_past_frame - ) - - # --- LINK IMAGE PROCESSING --- - # Link video input and inference as follows: - # - # filesrc -> demux -> queue -> h264parser -> h264decoder -> streammux -> - # primary_inference1 -> tracker -> secondary_inference1 -> secondary_inference2 -> secondary_inference3 -> - # videoconverter -> osd (bounding boxes) -> tee - # - # After the tee element we have two output branches that are described later. - - # Link source to demuxer - gsthelpers.link_elements([self.source, self.demuxer]) - - # Connect demux to the pad-added signal, used to link demuxer to queue dynamically - demuxer_pad_added = gsthelpers.PadAddedLinkFunctor() - demuxer_pad_added.register("video_", self.video_queue, "sink") - - assert self.demuxer.connect("pad-added", demuxer_pad_added) is not None - - # Link video pipeline - gsthelpers.link_elements( - [self.video_queue, self.h264_parser, self.h264_decoder] - ) - - # Link decoder to streammux - source = self.h264_decoder.get_static_pad("src") - assert source is not None - sink = self.stream_muxer.get_request_pad("sink_0") - assert sink is not None - assert source.link(sink) == Gst.PadLinkReturn.OK - - # Link inference, tracker and visualization - gsthelpers.link_elements( - [ - self.stream_muxer, - self.primary_inference, - self.tracker, - self.secondary1_inference, - self.secondary2_inference, - self.secondary3_inference, - self.video_converter, - self.osd, - self.tee, - ] - ) - - # --- LINK OUTPUT BRANCHE --- - # We have videosink output as follows: - # - # osd -> tee -> queue -> videosink - # + value = tracker_config["tracker"][key] + if value.isdigit(): + value = int(value) + tracker.set_property(key, value) + + # Create ghost pads for external linking + sink_pad = Gst.GhostPad.new("sink", queue1.get_static_pad("sink")) + if not sink_pad: + logger.error("bin failed to create a GhostPad for sink") + sys.exit(-1) + bin.add_pad(sink_pad) - # --- Video-sink output branch --- - src = self.tee.get_request_pad("src_0") - assert src is not None - sink = self.videosink_queue.get_static_pad("sink") - assert sink is not None - assert src.link(sink) == Gst.PadLinkReturn.OK - - # Link video_queue to video_sink - gsthelpers.link_elements([self.videosink_queue, self.video_sink]) - - # --- Meta-data output --- # Add a probe to the sink pad of the osd-element in order to draw/print meta-data to the canvas - osdsinkpad = self.osd.get_static_pad("sink") + # using the function osd_sink_pad_buffer_probe + osdsinkpad = osd.get_static_pad("sink") assert osdsinkpad is not None osdsinkpad.add_probe(Gst.PadProbeType.BUFFER, osd_sink_pad_buffer_probe, 0) - def play(self, input_file: str): - """ - - :param input_file: path to the h264 encoded input file - :return: - """ - - print(f"PLAY(input_file={input_file})") + self.bin_cntr += 1 - # Check if the file exists - if not os.path.exists(input_file): - raise RuntimeError(f"Input file '{input_file}' does not exist") + return bin - # Set source location property to the file location - self.source.set_property("location", input_file) - - # Create a bus and add signal watcher - bus = self.pipeline.get_bus() - bus.add_signal_watch() - bus.connect("message", self.on_message) - - print("Setting pipeline state to PLAYING...", end="") + def start(self) -> None: + logging.info("MultiPlayer starting") if self.pipeline.set_state(Gst.State.PLAYING) == Gst.StateChangeReturn.FAILURE: - print("failed") + logging.error("MultiPlayer, failed to start pipeline") + self.stop() + return else: - print("done") + logging.info("MultiPlayer, started") - # Start loop + # Run the main loop to keep all pipelines running self.loop.run() - def stop(self): - print("STOP()") - print("Setting pipeline state to NULL...", end="") - self.pipeline.set_state(Gst.State.NULL) - print("done") - self.loop.quit() - - def on_message(self, bus, message): - """ - Message handler function. - - :param bus: bus - :param message: message - :return: nothing - """ + def on_message(self, bus: Gst.Bus, message: Gst.Message) -> None: message_type = message.type + + # End of stream if message_type == Gst.MessageType.EOS: - print("EOS message type received") + logging.info("MultiPlayer, EOS received") self.stop() + # Error elif message_type == Gst.MessageType.ERROR: - err, dbg = message.parse_error() - print(f"Error from {message.src.get_name()}: {err.message}") + err, debug = message.parse_error() + logging.error(f"MultiPlayer error {err.message}, debug {debug}") self.stop() - def stop_handler(self, sig, frame): - """ - Even handler for stopping the pipeline. + # # State changed + # elif message_type == Gst.MessageType.STATE_CHANGED: + # old_state, new_state, pending = message.parse_state_changed() - :param sig: signal - :param frame: stack frame - :return: - """ - print("Signal SIGINT received") - self.stop() - - -class MultiPipelinePlayer: - def __init__(self, num_pipelines, input_file): - Gst.init(None) - self.loop = GLib.MainLoop() - signal.signal(signal.SIGINT, self.stop_handler) - self.pipelines = [] - self.num_pipelines = num_pipelines - - for i in range(num_pipelines): - # Create a Player instance for each pipeline - player = Player() - - # Add player and its corresponding output file to the list - self.pipelines.append(player) - - def start(self, input_file): - # Configure each pipeline with the same input file but different output files - for i, player in enumerate(self.pipelines): - print(f"Starting pipeline {i+1}") - player.source.set_property("location", input_file) - - # Create a bus for each pipeline - bus = player.pipeline.get_bus() - bus.add_signal_watch() - bus.connect("message", self.on_message, player) + # src = message.src + # if isinstance(src, Gst.Pipeline): + # element_name = "Pipeline" + # else: + # element_name = src.get_name() - # Set the pipeline to PLAYING state - player.pipeline.set_state(Gst.State.PLAYING) - - # Run the main loop to keep all pipelines running - self.loop.run() - - def on_message(self, bus, message, player): - msg_type = message.type - if msg_type == Gst.MessageType.EOS: - print("End of stream reached for a pipeline.") - player.stop() - self.check_all_pipelines_stopped() - elif msg_type == Gst.MessageType.ERROR: - err, debug = message.parse_error() - print(f"Error: {err.message}") - player.stop() - self.check_all_pipelines_stopped() - - def check_all_pipelines_stopped(self): - if all( - player.pipeline.get_state(Gst.CLOCK_TIME_NONE)[1] == Gst.State.NULL - for player in self.pipelines - ): - print("All pipelines have stopped. Exiting...") - self.loop.quit() + # logging.info( + # f"MultiPlayer, {element_name} state changed: {old_state.value_nick} -> {new_state.value_nick}" + # ) def stop(self): - print("Stopping all pipelines...") - for player in self.pipelines: - player.stop() + logging.info("MultiPlayer stopping") + + if self.pipeline: + self.pipeline.set_state(Gst.State.NULL) self.loop.quit() def stop_handler(self, sig, frame): self.stop() -if __name__ == "__main__": +def main(): + logging.basicConfig(level=logging.INFO) + argParser = argparse.ArgumentParser() - argParser.add_argument("-i", "--input_file", help="input file path", default="") - argParser.add_argument( - "-n", "--num_pipelines", help="Number of pipelines to run", type=int, default=1 - ) + argParser.add_argument("-u", "--uri", help="Input uri", default="") args = argParser.parse_args() - multi_player = MultiPipelinePlayer(args.num_pipelines, args.input_file) + multi_player = MultiPlayer(args.uri) try: - multi_player.start(args.input_file) + multi_player.start() except Exception as e: - print(e) + logging.error(f"Failed to start the pipeline: {e}") multi_player.stop() sys.exit(-1) sys.exit(0) + + +if __name__ == "__main__": + main() diff --git a/deepstream-examples/deepstream-triton-tracking/gst-triton-parallel-tracking-v2.py b/deepstream-examples/deepstream-triton-tracking/gst-triton-parallel-tracking-v2.py index 5ad0988..6c4d8da 100644 --- a/deepstream-examples/deepstream-triton-tracking/gst-triton-parallel-tracking-v2.py +++ b/deepstream-examples/deepstream-triton-tracking/gst-triton-parallel-tracking-v2.py @@ -13,6 +13,8 @@ >> /opt/nvidia/deepstream/deepstream/samples/streams/sample_1080p_h264.mp4 """ +import logging +import platform import argparse import configparser import sys @@ -20,19 +22,29 @@ from helpers import gsthelpers import gi import math +from typing import List + +logger = logging.getLogger(__name__) gi.require_version("Gst", "1.0") from gi.repository import Gst, GLib # noqa: E402 -def set_tiler_layout(tiler, num_streams, tile_width=1920, tile_height=1080): - """ - Automatically calculate rows and columns for the tiler and set its properties. - - :param tiler: The nvmultistreamtiler element. - :param num_streams: Number of input streams. - :param tile_width: Width of the tiled output. - :param tile_height: Height of the tiled output. +def set_tiler_layout( + tiler: Gst.Element, num_streams: int, tile_width=1920, tile_height=1080 +): + """Automatically calculate rows and columns for the tiler and set its properties. + + Parameters + ---------- + tiler : Gst.Element + The nvmultistreamtiler element + num_streams : int + Number of input streams + tile_width : int, optional + Width of the tiled output, by default 1920 + tile_height : int, optional + Height of the tiled output, by default 1080 """ # Calculate number of rows and columns rows = math.ceil(math.sqrt(num_streams)) @@ -44,26 +56,26 @@ def set_tiler_layout(tiler, num_streams, tile_width=1920, tile_height=1080): tiler.set_property("width", tile_width) tiler.set_property("height", tile_height) - print( + logger.info( f"Tiler layout: {rows} rows x {columns} columns, Output size: {tile_width}x{tile_height}" ) class Player: - def __init__(self, input_files): + def __init__(self, input_files: List[str]): Gst.init(None) self.loop = GLib.MainLoop() signal.signal(signal.SIGINT, self.stop_handler) # Register signal handlers - signal.signal(signal.SIGINT, self.stop_handler) # Handle Ctrl+C - signal.signal(signal.SIGTERM, self.stop_handler) # Handle termination signals + signal.signal(signal.SIGINT, self.stop_handler) + signal.signal(signal.SIGTERM, self.stop_handler) # Create pipeline self.pipeline = Gst.Pipeline.new("multi-stream-pipeline") - # Elements + # Create all the elements self.stream_muxer = gsthelpers.create_element("nvstreammux", "stream-muxer") self.primary_inference = gsthelpers.create_element( "nvinferserver", "primary-inference" @@ -84,6 +96,12 @@ def __init__(self, input_files): ) self.osd = gsthelpers.create_element("nvdsosd", "nvidia-bounding-box-draw") self.video_sink = gsthelpers.create_element("nveglglessink", "video-sink") + self.queue1 = gsthelpers.create_element("queue", "queue1") + self.queue2 = gsthelpers.create_element("queue", "queue2") + self.queue3 = gsthelpers.create_element("queue", "queue3") + self.queue4 = gsthelpers.create_element("queue", "queue4") + self.queue5 = gsthelpers.create_element("queue", "queue5") + self.queue6 = gsthelpers.create_element("queue", "queue6") # Configure streammux self.stream_muxer.set_property("width", 1920) @@ -95,6 +113,7 @@ def __init__(self, input_files): # Configure video sink self.video_sink.set_property("sync", True) + # self.video_sink.set_property("max-lateness", -1) # Configure inference engines self.primary_inference.set_property( @@ -129,30 +148,72 @@ def __init__(self, input_files): # Add elements to pipeline self.pipeline.add(self.stream_muxer) self.pipeline.add(self.primary_inference) + self.pipeline.add(self.tracker) self.pipeline.add(self.secondary1_inference) self.pipeline.add(self.secondary2_inference) self.pipeline.add(self.secondary3_inference) - self.pipeline.add(self.tracker) self.pipeline.add(self.tiler) self.pipeline.add(self.video_converter) self.pipeline.add(self.osd) self.pipeline.add(self.video_sink) - - # Link elements - gsthelpers.link_elements( - [ - self.stream_muxer, - self.primary_inference, - self.secondary1_inference, - self.secondary2_inference, - self.secondary3_inference, - self.tracker, - self.tiler, - self.video_converter, - self.osd, - self.video_sink, - ] - ) + self.pipeline.add(self.queue1) + self.pipeline.add(self.queue2) + self.pipeline.add(self.queue3) + self.pipeline.add(self.queue4) + self.pipeline.add(self.queue5) + self.pipeline.add(self.queue6) + + # If arm (Jetson) add and link nvegltransform + if platform.machine() == "aarch64": + self.video_sink_transform = gsthelpers.create_element( + "nvegltransform", "video_sink_transform" + ) + self.pipeline.add(self.video_sink_transform) + # Link elements + gsthelpers.link_elements( + [ + self.stream_muxer, + self.queue1, + self.primary_inference, + self.queue2, + self.tracker, + self.queue3, + self.secondary1_inference, + self.secondary2_inference, + self.secondary3_inference, + self.queue4, + self.tiler, + self.video_converter, + self.queue5, + self.osd, + self.queue6, + self.video_sink_transform, + self.video_sink, + ] + ) + # In other platforms nvegltransform is not required + else: + # Link elements + gsthelpers.link_elements( + [ + self.stream_muxer, + self.queue1, + self.primary_inference, + self.queue2, + self.tracker, + self.queue3, + self.secondary1_inference, + self.secondary2_inference, + self.secondary3_inference, + self.queue4, + self.tiler, + self.video_converter, + self.queue5, + self.osd, + self.queue6, + self.video_sink, + ] + ) # Add sources dynamically for i, input_file in enumerate(input_files): @@ -195,36 +256,40 @@ def play(self): bus.add_signal_watch() bus.connect("message", self.on_message) - print("Setting pipeline state to PLAYING...") + logging.info("Setting pipeline state to PLAYING") if self.pipeline.set_state(Gst.State.PLAYING) == Gst.StateChangeReturn.FAILURE: - print("Failed to set pipeline to PLAYING") + logging.error("Failed to set pipeline to PLAYING") + sys.exit(-1) else: - print("Pipeline is now PLAYING") + logging.info("Pipeline is now PLAYING") self.loop.run() - def on_message(self, bus, message): + def on_message(self, bus: Gst.Bus, message: Gst.Message) -> None: msg_type = message.type if msg_type == Gst.MessageType.EOS: - print("All streams have sent EOS. Stopping pipeline...") + logging.info("All streams have sent EOS. Stopping pipeline...") self.stop() + elif msg_type == Gst.MessageType.ERROR: err, debug = message.parse_error() - print(f"Error from {message.src.get_name()}: {err.message}") + logging.error(f"Error from {message.src.get_name()}: {err.message}") self.stop() def stop(self): - print("Stopping pipeline...") + logging.info("Stopping pipeline.") if self.pipeline: self.pipeline.set_state(Gst.State.NULL) # Transition to NULL state self.loop.quit() # Quit the GLib main loop - print("Pipeline stopped.") + logging.info("Pipeline stopped.") def stop_handler(self, sig, frame): - print("Signal received. Stopping pipeline...") + logging.info("Signal received. Stopping pipeline...") self.stop() -if __name__ == "__main__": +def main(): + logging.basicConfig(level=logging.INFO) + argParser = argparse.ArgumentParser() argParser.add_argument( "-i", "--input_files", nargs="+", help="Input video files", required=True @@ -235,8 +300,12 @@ def stop_handler(self, sig, frame): try: player.play() except Exception as e: - print(f"Error: {e}") + logging.error(f"Failed to start the pipeline: {e}") player.stop() sys.exit(-1) sys.exit(0) + + +if __name__ == "__main__": + main() diff --git a/docker/README.md b/docker/README.md index 0d87e19..adc289f 100644 --- a/docker/README.md +++ b/docker/README.md @@ -2,7 +2,7 @@ This directory contains docker files used for generating docker images where the examples can be run. -* [Dockerfile-deepstream-6.3-triton-devel]Dockerfile-deepstream-6.3-triton-devel) +* [Dockerfile-deepstream-6.3-triton-devel](Dockerfile-deepstream-6.3-triton-devel) * Docker container with DeepStream 6.3 plus samples, Triton, and DeepStream Python bindings * Based on nvcr.io/nvidia/deepstream:6.3-triton-multiarch * glmark2 for testing OpenGL inside the container @@ -71,7 +71,6 @@ You should see output following (or similar) output: | ID ID Usage | |=============================================================================| +-----------------------------------------------------------------------------+ - ``` ## 1.2 Create the Docker Image @@ -79,6 +78,70 @@ You should see output following (or similar) output: After this you can create the docker image used in the examples. ```bash -docker build -t nvidia-deepstream-samples -f ./Dockerfile-deepstream . +docker build -t deepstream-6.3 -f ./Dockerfile-deepstream-6.3-triton-devel . +``` + +## 1.3 Test the Docker Image + +Some of the examples use GStreamer plugin `nveglglessink` for showing the results in realtime. `nveglglessink` +depends on OpenGL, so making sure that OpenGL works inside the container is essential. Make sure that `DISPLAY` +environment variable has been set: + +```bash +env | grep DISPLAY +``` +If it is not set, then you need to set it: + +```bash +export DISPLAY=: +``` + +Replace `` with the actual display which is typically `0` or `1`. + +Then start the container: + +```bash +xhost + +docker run -i -t --rm \ + -v /tmp/.X11-unix:/tmp/.X11-unix \ + -v $(pwd):/home/gstreamer-examples \ + -e DISPLAY=$DISPLAY \ + -e XAUTHORITY=$XAUTHORITY \ + -e NVIDIA_DRIVER_CAPABILITIES=all \ + --gpus all deepstream-6.3 bash ``` +Then execute the following inside the container: + +```bash +glxinfo | grep OpenGL +``` + +You should see something similar to: + +```bash +OpenGL vendor string: NVIDIA Corporation +OpenGL renderer string: NVIDIA GeForce GTX 1070/PCIe/SSE2 +OpenGL core profile version string: 4.6.0 NVIDIA 525.60.13 +OpenGL core profile shading language version string: 4.60 NVIDIA +OpenGL core profile context flags: (none) +OpenGL core profile profile mask: core profile +OpenGL core profile extensions: +OpenGL version string: 4.6.0 NVIDIA 525.60.13 +OpenGL shading language version string: 4.60 NVIDIA +OpenGL context flags: (none) +OpenGL profile mask: (none) +OpenGL extensions: +OpenGL ES profile version string: OpenGL ES 3.2 NVIDIA 525.60.13 +OpenGL ES profile shading language version string: OpenGL ES GLSL ES 3.20 +OpenGL ES profile extensions: +``` + +If the `OpenGL vendor string` is `NVIDIA Corporation`, execute an OpenGL test application inside the container: + +```bash +glmark2 +``` + +A window should pop-up, displaying a horse. + diff --git a/gst-examples/gst-rtsp-server.py b/gst-examples/gst-rtsp-server.py deleted file mode 100644 index 2144fb7..0000000 --- a/gst-examples/gst-rtsp-server.py +++ /dev/null @@ -1,181 +0,0 @@ -""" -RTSP Server Script - -This script creates an RTSP server that streams a media file specified via command-line arguments. -The stream can be played back using an RTSP-compatible player like VLC or ffplay. - -Example usage to start the server: - python3 rtsp_server.py --file /path/to/media/file.mp4 - -To playback the stream, use one of the following: - - ffplay rtsp://localhost:8554/test - - gst-launch-1.0 rtspsrc location=rtsp://127.0.0.1:8554/camera1 protocols=tcp latency=500 ! - rtph264depay ! h264parse ! nvv4l2decoder ! queue ! nvvideoconvert ! queue ! - mux.sink_1 nvstreammux name=mux width=1920 height=1080 batch-size=1 live-source=1 ! queue ! - nvvideoconvert ! queue ! nvdsosd ! queue ! nveglglessink - -The server will stream the media file over the RTSP protocol, converting raw video to H.264 format. -""" - -import gi -import os -import argparse -from gi.repository import Gst, GstRtspServer, GLib - -gi.require_version("Gst", "1.0") -gi.require_version("GstRtspServer", "1.0") - - -class RTSPServer: - """RTSP Server to stream media files over RTSP protocol. - - Parameters - ---------- - file_path : str - The path to the media file to be streamed. - - Raises - ------ - FileNotFoundError - If the specified file path does not exist. - """ - - def __init__(self, file_path: str) -> None: - if not os.path.isfile(file_path): - raise FileNotFoundError(f"File not found: {file_path}") - - Gst.init(None) - - server = GstRtspServer.RTSPServer() - server.set_address("0.0.0.0") - server.props.service = "8554" - - factory = GstRtspServer.RTSPMediaFactory() - launch_str = f""" - filesrc location="{file_path}" ! - decodebin name=decodebin ! - queue ! - videoconvert ! - x264enc ! - h264parse ! - rtph264pay name=pay0 pt=96 config-interval=1 - """ - factory.set_launch(launch_str) - factory.set_shared(True) - factory.set_eos_shutdown(False) # Ensure the pipeline is created immediately - - # factory.connect("media-configure", self.on_media_configure) - - server.get_mount_points().add_factory("/test", factory) - server.attach(None) - print("RTSP server is running on rtsp://0.0.0.0:8554/test") - - def on_media_configure( - self, factory: GstRtspServer.RTSPMediaFactory, media: GstRtspServer.RTSPMedia - ) -> None: - """Configure the media pipeline when it is created. - - Parameters - ---------- - factory : GstRtspServer.RTSPMediaFactory - The media factory that triggered this event. - media : GstRtspServer.RTSPMedia - The media object that contains the pipeline. - """ - print("Media is being configured.") - pipeline = media.get_element() - bus = pipeline.get_bus() - - bus.add_signal_watch() - bus.connect("message::state-changed", self.on_state_changed) - bus.connect("message::error", self.on_error) - bus.connect("message::eos", self.on_eos) - - decodebin = pipeline.get_by_name("decodebin") - if decodebin is not None: - decodebin.connect("pad-added", self.on_pad_added) - - def on_pad_added(self, element: Gst.Element, pad: Gst.Pad) -> None: - """Handle the addition of a new pad from decodebin. - - Parameters - ---------- - element : Gst.Element - The element that generated the pad. - pad : Gst.Pad - The pad that was added. - """ - print(f"New pad '{pad.get_name()}' added.") - caps = pad.query_caps(None) - structure_name = caps.get_structure(0).get_name() - print(f"Pad Caps: {structure_name}") - - if "video" in structure_name: - print("Video pad detected, linking elements.") - sink = element.get_static_pad("sink") - if pad.can_link(sink): - print(f"Linking pad {pad.get_name()} to sink pad.") - pad.link(sink) - else: - print( - f"Skipping non-video pad: {pad.get_name()} with caps: {structure_name}" - ) - - def on_state_changed(self, bus: Gst.Bus, message: Gst.Message) -> None: - """Handle state changes of the GStreamer pipeline. - - Parameters - ---------- - bus : Gst.Bus - The bus associated with the pipeline. - message : Gst.Message - The message describing the state change. - """ - if message.src.get_name() == "pipeline0": - old, new, pending = message.parse_state_changed() - print(f"Pipeline state changed: {old.value_nick} -> {new.value_nick}") - if new == Gst.State.PAUSED or new == Gst.State.PLAYING: - print("Pipeline is ready, attaching bus watch.") - - def on_error(self, bus: Gst.Bus, message: Gst.Message) -> None: - """Handle error messages from the GStreamer bus. - - Parameters - ---------- - bus : Gst.Bus - The bus associated with the pipeline. - message : Gst.Message - The message describing the error. - """ - err, debug = message.parse_error() - print(f"ERROR: {err}, Debug info: {debug}") - - def on_eos(self, bus: Gst.Bus, message: Gst.Message) -> None: - """Handle end-of-stream (EOS) messages from the GStreamer bus. - - Parameters - ---------- - bus : Gst.Bus - The bus associated with the pipeline. - message : Gst.Message - The message indicating that the end of the stream has been reached. - """ - print("End of stream reached!") - - -if __name__ == "__main__": - parser = argparse.ArgumentParser( - description="RTSP Server to stream a media file over RTSP protocol." - ) - parser.add_argument( - "--file", type=str, required=True, help="Path to the media file to be streamed." - ) - args = parser.parse_args() - - file_path: str = args.file - try: - server = RTSPServer(file_path) - loop = GLib.MainLoop() # Start the GLib main loop - loop.run() - except FileNotFoundError as e: - print(f"ERROR: {e}") diff --git a/gst-examples/rtsp-server/Dockerfile b/gst-examples/rtsp-server/Dockerfile new file mode 100644 index 0000000..c331aa8 --- /dev/null +++ b/gst-examples/rtsp-server/Dockerfile @@ -0,0 +1,22 @@ +FROM ubuntu:20.04 + +RUN apt-get update && \ + DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \ + gstreamer1.0-tools \ + gstreamer1.0-plugins-base \ + gstreamer1.0-plugins-good \ + gstreamer1.0-plugins-bad \ + gstreamer1.0-plugins-ugly \ + gstreamer1.0-libav \ + libgirepository1.0-dev \ + python3-gi python3-pip + +RUN apt-get install -y \ + gstreamer1.0-rtsp \ + gir1.2-gst-rtsp-server-1.0 \ + gir1.2-gstreamer-1.0 \ + ffmpeg \ + net-tools + +WORKDIR /home +COPY ./rtsp-server.py /home diff --git a/gst-examples/rtsp-server/README.md b/gst-examples/rtsp-server/README.md new file mode 100644 index 0000000..ccf9ae5 --- /dev/null +++ b/gst-examples/rtsp-server/README.md @@ -0,0 +1,76 @@ +# 1 RTSP Server + +This directory implements RTSP server that can be used for playing back video files as RTSP streams. Contents are as follows: + +* [Dockerfile](./Dockerfile) + * Docker container used for running the RTSP server +* [docker-compose.yml](./docker-compose.yml) + * Docker compose file that can be used for starting the service. By default + video files from `$HOME/Videos` are used. You need to copy the files + to that folder and and the modify the docker compose file so that the correct files + are played back +* [rtsp-server.py](./rtsp-server.py) + * The file that starts the RTSP server + +# 2 Usage + +## 2.1 Starting the RTSP Server + +Copy the video files that you want to playback to the `$HOME/Videos` folder and modify the file `./docker-compose.yml` so +that the correct files are being played back. Each file will be mapped to RTSP streams, starting from `camera1`. +For example, if we have the following in the `./docker-compose.yml` + +```bash +command: python3 /home/gst-rtsp-server.py --files file:///Videos/video1.mp4 file:///Videos/video2.mp4 +``` + +These will be mapped so that: + +* `file:///Videos/video1.mp4` --> `rtsp://localhost:8554/camera1` +* `file:///Videos/video2.mp4` --> `rtsp://localhost:8554/camera2` + +Once the `./docker-compose.yml` has been modified, you can start the service with + +```bash +docker compose up --build +``` + +## 2.2 Playback using ffplay + +You can playback the stream, for example, using `ffplay` as follows: + +```bash +ffplay rtsp://localhost:8554/camera1 +``` + +## 2.3 Playback Using GStreamer + +You can display the RTSP stream using only GStreamer plug-ins by running the following from the host machine: + +```bash +gst-launch-1.0 rtspsrc location=rtsp://localhost:8554/camera1 protocols=tcp latency=200 ! \ +rtph264depay ! h264parse ! avdec_h264 ! videoconvert ! timeoverlay ! autovideosink +``` + +### 2.4 Playback Using Deepstream in X86/X64 + +You can display the RTSP stream using GStreamer and Deepstream by running the following from a non-Jetson device: + +```bash +gst-launch-1.0 rtspsrc location=rtsp://localhost:8554/camera1 protocols=tcp latency=500 ! \ +rtph264depay ! h264parse ! nvv4l2decoder ! queue ! nvvideoconvert ! queue ! \ +mux.sink_1 nvstreammux name=mux width=1920 height=1080 batch-size=1 live-source=1 ! \ +queue ! nvvideoconvert ! queue ! nvdsosd ! queue ! nveglglessink +``` + +### 2.5 Playback Using Deepstream in Jetson + +You can display the RTSP stream using GStreamer and Deepstream by running the following from a Jetson device: + +```bash +gst-launch-1.0 rtspsrc location=rtsp://localhost:8554/camera1 protocols=tcp latency=500 ! \ +rtph264depay ! h264parse ! nvv4l2decoder ! queue ! nvvideoconvert ! queue ! \ +mux.sink_1 nvstreammux name=mux width=1920 height=1080 batch-size=1 live-source=1 ! \ +queue ! nvvideoconvert ! queue ! nvdsosd ! queue ! nvegltransform ! nveglglessink +``` + diff --git a/gst-examples/rtsp-server/docker-compose.yml b/gst-examples/rtsp-server/docker-compose.yml new file mode 100644 index 0000000..399edb2 --- /dev/null +++ b/gst-examples/rtsp-server/docker-compose.yml @@ -0,0 +1,10 @@ +services: + + celery_worker: + build: ./ + container_name: rtsp-server + command: python3 /home/rtsp-server.py --files file:///Videos/ + volumes: + - $HOME/Videos:/Videos + network_mode: host + diff --git a/gst-examples/rtsp-server/rtsp-server.py b/gst-examples/rtsp-server/rtsp-server.py new file mode 100644 index 0000000..1bb2077 --- /dev/null +++ b/gst-examples/rtsp-server/rtsp-server.py @@ -0,0 +1,164 @@ +""" +RTSP Server Script + +This script creates an RTSP server that streams a media file specified via command-line arguments. +The stream can be played back using an RTSP-compatible player like VLC or ffplay. + +Example usage to start the server: + python3 rtsp_server.py --file /path/to/media/file.mp4 + +To playback the stream, use one of the following: + - ffplay rtsp://localhost:8554/camera1 + - gst-launch-1.0 rtspsrc location=rtsp://127.0.0.1:8554/camera1 protocols=tcp latency=500 ! + rtph264depay ! h264parse ! nvv4l2decoder ! queue ! nvvideoconvert ! queue ! + mux.sink_1 nvstreammux name=mux width=1920 height=1080 batch-size=1 live-source=1 ! queue ! + nvvideoconvert ! queue ! nvdsosd ! queue ! nveglglessink + +The server will stream the media file over the RTSP protocol, converting raw video to H.264 format. +""" + +from urllib.parse import urlparse +import os +import argparse +import logging +from typing import List +import gi + +gi.require_version("Gst", "1.0") +gi.require_version("GstRtspServer", "1.0") +from gi.repository import Gst, GstRtspServer, GLib # noqa: E402, F401 + +logger = logging.getLogger(__name__) + + +class RTSPServer: + """RTSP Server to stream media files over RTSP protocol using urisrcbin.""" + + def __init__(self, uri_list: List[str]) -> None: + if not uri_list: + raise ValueError("No input files provided.") + + # If the uri_list contains files, check that these exist + for uri in uri_list: + uri_parsed = urlparse(uri) + if uri_parsed.scheme == "file": + if not os.path.exists(uri_parsed.path): + raise RuntimeError(f"File '{uri_parsed.path}' does not exist") + + Gst.init(None) + + self.server = GstRtspServer.RTSPServer() + self.server.set_address("0.0.0.0") + self.server.props.service = "8554" + + self._setup_streams(uri_list) + self.server.attach(None) + logging.info("RTSP server is running. Streams available at:") + for idx, file_path in enumerate(uri_list, 1): + logging.info(f"{file_path} -> rtsp://0.0.0.0:8554/camera{idx}") + + def _setup_streams(self, file_paths: List[str]) -> None: + """Set up RTSP streams for each file.""" + mount_points = self.server.get_mount_points() + + for idx, file_path in enumerate(file_paths, 1): + if not os.path.exists(file_path) and not file_path.startswith("file://"): + raise FileNotFoundError(f"File or URI not found: {file_path}") + + factory = GstRtspServer.RTSPMediaFactory() + factory.set_shared(True) + factory.set_eos_shutdown(False) + + # Use urisrcbin to handle URI inputs + launch_str = f""" + urisourcebin uri="{file_path}" ! + queue ! + decodebin name=decodebin ! + queue ! + videoconvert ! + x264enc ! + h264parse ! + rtph264pay name=pay0 pt=96 config-interval=1 + """ + factory.set_launch(launch_str) + + # Add factory to the RTSP server mount point + mount_points.add_factory(f"/camera{idx}", factory) + + def handle_message_callback(self, message, *user_data): + logger.info("Message") + + def on_message(bus: Gst.Bus, message: Gst.Message, pipeline: Gst.Pipeline) -> None: + """ + Handles GStreamer bus messages. + + This inner function listens for End of Stream (EOS) messages and restarts the pipeline + when EOS is reached. + + Parameters + ---------- + bus : Gst.Bus + The GStreamer bus to listen for messages on. + message : Gst.Message + The GStreamer message received on the bus. + pipeline : Gst.Pipeline + The pipeline to restart if an EOS message is received. + + Returns + ------- + None + """ + if message.type == Gst.MessageType.EOS: + logging.info( + f"EOS received for pipeline: {pipeline.get_name()}. Restarting pipeline..." + ) + pipeline.set_state(Gst.State.NULL) # Stop the pipeline + pipeline.set_state(Gst.State.PLAYING) # Restart the pipeline + elif message.type == Gst.MessageType.ERROR: + error, debug = message.parse_error() + logging.error( + f"Error received from {message.src.get_name()}: {error.message}" + ) + logging.error(f"Debug info: {debug}") + pipeline.set_state(Gst.State.NULL) # Stop on error + elif message.type == Gst.MessageType.STATE_CHANGED: + old, new, pending = message.parse_state_changed() + if message.src == pipeline: + logging.info( + f"Pipeline state changed from {old.value_name} to {new.value_name}." + ) + + def run(self): + """Start the GLib main loop.""" + try: + loop = GLib.MainLoop() + logging.info("Starting GLib Main Loop...") + loop.run() + except Exception as e: + logging.error(f"Error running GLib Main Loop: {e}") + + +def main(): + parser = argparse.ArgumentParser( + description="RTSP Server to stream multiple media files over RTSP protocol." + ) + parser.add_argument( + "--files", + type=str, + nargs="+", + required=True, + help="Paths or URIs of the media files to be streamed.", + ) + args = parser.parse_args() + + file_paths: List[str] = args.files + try: + logging.basicConfig(level=logging.INFO) + server = RTSPServer(file_paths) + server.run() + except (FileNotFoundError, ValueError) as e: + logging.error(f"ERROR: {e}") + + +if __name__ == "__main__": + main() diff --git a/helper-package/src/helpers/gsthelpers.py b/helper-package/src/helpers/gsthelpers.py index 411309a..091e305 100644 --- a/helper-package/src/helpers/gsthelpers.py +++ b/helper-package/src/helpers/gsthelpers.py @@ -1,4 +1,7 @@ import gi +import logging + +logger = logging.getLogger(__name__) gi.require_version("Gst", "1.0") from gi.repository import Gst # noqa: E402 @@ -17,7 +20,7 @@ def create_element(gst_elem: str, name: str): new_element = None # Try creating an element - print(f"Creating element: {gst_elem}") + logger.info(f"Creating element: {gst_elem}") new_element = Gst.ElementFactory.make(gst_elem, name) assert new_element is not None, f"Failed to create a Gst element '{gst_elem}'" @@ -37,9 +40,8 @@ def link_elements(elements: list) -> None: assert len(elements) >= 2, f"At least 2 elements are needed, given {len(elements)}" for idx, x in enumerate(elements[:-1]): - print( - f"Linking element {elements[idx].get_name()} -> {elements[idx + 1].get_name()}...", - end="", + logger.info( + f"Linking element {elements[idx].get_name()} -> {elements[idx + 1].get_name()}" ) assert isinstance( elements[idx], Gst.Element @@ -48,9 +50,9 @@ def link_elements(elements: list) -> None: elements[idx + 1], Gst.Element ), "elements[idx+1] must be of type Gst.Element" if elements[idx].link(elements[idx + 1]): - print("done") + logger.info("Linked") else: - print("failed") + logger.error("Failed to link") raise RuntimeError( f"Failed to link: {elements[idx].get_name()} -> {elements[idx + 1].get_name()}" ) @@ -111,7 +113,7 @@ def __call__(self, element: Gst.Element, pad: Gst.Pad) -> None: pad_name = pad.get_name() element_name = element.get_name() - print(f"New pad '{pad_name}' created") + logger.info(f"New pad '{pad_name}' created") # Search if the new pad corresponds to any of the defined connections index = [i for i, v in enumerate(self.connections) if pad_name.startswith(v[0])] @@ -126,16 +128,14 @@ def __call__(self, element: Gst.Element, pad: Gst.Pad) -> None: ), f"'{target_element.get_name()}' has no static pad called '{target_sink_name}'" if not sink_pad.is_linked(): - print( - f"Linking '{element_name}:{pad_name}' \ - -> '{target_element.get_name()}:{sink_pad.get_name()}'...", - end="", + logger.info( + f"Linking '{element_name}:{pad_name}' -> '{target_element.get_name()}:{sink_pad.get_name()}'" ) ret = pad.link(sink_pad) if ret == Gst.PadLinkReturn.OK: - print("done") + logger.info("Linked") else: - print("error") + logger.error("Failed to link") elif len(index) > 1: raise RuntimeError(