From 18d575cfef1787053b91c5d8fe3f025e5402961a Mon Sep 17 00:00:00 2001 From: Denis Medyantsev <44088010+denisvmedyantsev@users.noreply.github.com> Date: Fri, 17 May 2024 13:20:05 -0300 Subject: [PATCH] Fix extra container (#757) --- docker/Dockerfile.deepstream | 29 +++++++++--- docs/source/advanced_topics/0_extra_image.rst | 28 ++++++------ tests/test_extra.py | 44 +++++++++++++++++++ utils/build_torch2trt.sh | 4 +- 4 files changed, 82 insertions(+), 23 deletions(-) create mode 100644 tests/test_extra.py diff --git a/docker/Dockerfile.deepstream b/docker/Dockerfile.deepstream index 0a148c4c4..33fcef8a8 100644 --- a/docker/Dockerfile.deepstream +++ b/docker/Dockerfile.deepstream @@ -266,12 +266,12 @@ RUN python -m pip install --no-cache-dir torch torchvision torchaudio ENV TORCH_HOME=/cache/models/torch_hub # torch2trt -ARG TORCH2TRT_VERSION=0.4.0 +ARG TORCH2TRT_VERSION=0.5.0 ARG TORCH2TRT_WHL="torch2trt-${TORCH2TRT_VERSION}-py3-none-any.whl" RUN wget -nv -P /usr/local/lib/ ${PACKAGES_URL}/libtorch2trt_plugins.so && \ ldconfig && \ wget -nv -O ${TORCH2TRT_WHL} ${PACKAGES_URL}/${TORCH2TRT_WHL} && \ - python -m pip install --no-cache-dir tensorrt && \ + python -m pip install --no-cache-dir tensorrt~=8.6 && \ python -m pip install --no-cache-dir ${TORCH2TRT_WHL} && \ python -m pip install --no-cache-dir nvidia-pyindex && \ python -m pip install --no-cache-dir onnx-graphsurgeon && \ @@ -305,15 +305,30 @@ RUN apt-get update && \ openmpi-common \ libomp-dev \ libjpeg-dev \ + libpng-dev \ zlib1g-dev \ + cmake \ && rm -rf /var/lib/apt/lists/* \ && apt-get clean # torch, torchvision, torchaudio -COPY --from=dustynv/l4t-pytorch:r36.2.0 /usr/local/cuda-12.2/targets/aarch64-linux/lib/libcupti.* /usr/local/cuda-12.2/targets/aarch64-linux/lib/ -COPY --from=dustynv/l4t-pytorch:r36.2.0 /opt/torch*.whl /opt/ -RUN python -m pip install --no-cache-dir /opt/torch*.whl && \ - rm /opt/torch*.whl +COPY --from=builder /usr/local/cuda-12.2/targets/aarch64-linux/lib/libcupti.* /usr/local/cuda-12.2/targets/aarch64-linux/lib/ +# https://forums.developer.nvidia.com/t/pytorch-for-jetson/72048 +ARG PYTORCH_URL=https://nvidia.box.com/shared/static/mp164asf3sceb570wvjsrezk1p4ftj8t.whl +ARG PYTORCH_WHL=torch-2.3.0-cp310-cp310-linux_aarch64.whl +RUN wget -nv -O ${PYTORCH_WHL} ${PYTORCH_URL} && \ + python -m pip install --no-cache-dir ${PYTORCH_WHL} && \ + rm ${PYTORCH_WHL} +ARG TORCHAUDIO_URL=https://nvidia.box.com/shared/static/9agsjfee0my4sxckdpuk9x9gt8agvjje.whl +ARG TORCHAUDIO_WHL=torchaudio-2.3.0+952ea74-cp310-cp310-linux_aarch64.whl +RUN wget -nv -O ${TORCHAUDIO_WHL} ${TORCHAUDIO_URL} && \ + python -m pip install --no-cache-dir ${TORCHAUDIO_WHL} && \ + rm ${TORCHAUDIO_WHL} +ARG TORCHVISION_URL=https://nvidia.box.com/shared/static/xpr06qe6ql3l6rj22cu3c45tz1wzi36p.whl +ARG TORCHVISION_WHL=torchvision-0.18.0a0+6043bc2-cp310-cp310-linux_aarch64.whl +RUN wget -nv -O ${TORCHVISION_WHL} ${TORCHVISION_URL} && \ + python -m pip install --no-cache-dir ${TORCHVISION_WHL} && \ + rm ${TORCHVISION_WHL} # set the CUDA architectures that torch extensions get built for # Nano/TX1 = 5.3, TX2 = 6.2, Xavier = 7.2, Orin = 8.7 @@ -327,7 +342,7 @@ ARG TORCH2TRT_WHL="torch2trt-${TORCH2TRT_VERSION}-py3-none-any.whl" RUN wget -nv -P /usr/local/lib/ ${PACKAGES_URL}/libtorch2trt_plugins.so && \ ldconfig && \ wget -nv -O ${TORCH2TRT_WHL} ${PACKAGES_URL}/${TORCH2TRT_WHL} && \ - python -m pip install --no-cache-dir tensorrt && \ + python -m pip install --no-cache-dir tensorrt~=8.6 && \ python -m pip install --no-cache-dir ${TORCH2TRT_WHL} && \ python -m pip install --no-cache-dir nvidia-pyindex && \ python -m pip install --no-cache-dir onnx-graphsurgeon && \ diff --git a/docs/source/advanced_topics/0_extra_image.rst b/docs/source/advanced_topics/0_extra_image.rst index 8f3aeeaf1..1583d36fa 100644 --- a/docs/source/advanced_topics/0_extra_image.rst +++ b/docs/source/advanced_topics/0_extra_image.rst @@ -31,23 +31,23 @@ Extra component versions - Jetson Version - Notes * - PyTorch - - 2.2.2 - - 2.1.0 + - 2.3.0 + - 2.3.0 - With CUDA support * - Torchaudio - - 2.2.2 - - 2.1.0 + - 2.3.0 + - 2.3.0 - * - Torchvision - - 0.17.2 - - 0.16.0 + - 0.18.0 + - 0.18.0 - * - TensorRT - 8.6.1 - 8.6.2 - * - Torch2TRT - - 0.4.0 + - 0.5.0 - 0.4.0 - * - ONNX @@ -63,18 +63,18 @@ Extra component versions - 2024.1 - * - Pandas - - 2.2.1 - - 2.2.1 + - 2.2.2 + - 2.2.2 - * - Polars - - 0.20.18 + - 0.20.26 - 0.19.12 - * - Scikit-learn - - 1.4.1 - - 1.4.1 + - 1.4.2 + - 1.4.2 - * - JupyterLab - - 4.1.5 - - 4.1.5 + - 4.2.0 + - 4.2.0 - diff --git a/tests/test_extra.py b/tests/test_extra.py new file mode 100644 index 000000000..5ca349391 --- /dev/null +++ b/tests/test_extra.py @@ -0,0 +1,44 @@ +"""Test extra packages.""" + + +def test_extra(): + """Try to import extra packages and check the version.""" + import torch + print('PyTorch', torch.__version__) + + import torchaudio + print('Torchaudio', torchaudio.__version__) + + import torchvision + print('Torchvision', torchvision.__version__) + + import tensorrt + print('TensorRT', tensorrt.__version__) + + import torch2trt + print('Torch2TRT OK') + + import onnx + print('ONNX', onnx.__version__) + + import onnxruntime + print('ONNX Runtime', onnxruntime.__version__) + + import pycuda + print('PyCUDA', pycuda.VERSION_TEXT) + + import pandas + print('Pandas', pandas.__version__) + + import polars + print('Polars', polars.__version__) + + import sklearn + print('Scikit-Learn', sklearn.__version__) + + import jupyterlab + print('JupyterLab', jupyterlab.__version__) + + +if __name__ == '__main__': + test_extra() diff --git a/utils/build_torch2trt.sh b/utils/build_torch2trt.sh index 386609dc6..e9c06515f 100755 --- a/utils/build_torch2trt.sh +++ b/utils/build_torch2trt.sh @@ -2,7 +2,7 @@ # Builds torch2trt from source (amd64/arm64). # Requires nvidia runtime to share some host libs with the container on Jetson. -: "${TORCH2TRT_VERSION:=v0.4.0}" +: "${TORCH2TRT_VERSION:=v0.5.0}" : "${OUTPUT_DIR:=/opt}" : "${TMP_DIR:=/tmp}" @@ -15,7 +15,7 @@ sed 's|collections.Sequence|collections.abc.Sequence|g' -i torch2trt/converters/ #cat torch2trt/converters/interpolate.py | grep Sequence # install requirements -python3 -m pip install tensorrt torch packaging +python3 -m pip install tensorrt~=8.6 torch packaging python3 setup.py bdist_wheel cp dist/torch2trt*.whl "$OUTPUT_DIR"