diff --git a/.build/Dockerfile b/.build/Dockerfile index c6339d6..294bce4 100755 --- a/.build/Dockerfile +++ b/.build/Dockerfile @@ -7,8 +7,8 @@ # Use NVIDIA CUDA as base image and run the same installation as in the other packages. # The version of cudatoolkit must match those of the base image, see Dockerfile.pytorch -FROM nvidia/cuda:10.2-cudnn8-runtime-ubuntu18.04 -LABEL authors="Christoph Schranz , Mathematical Michael " + FROM nvidia/cuda:10.1-cudnn7-runtime-ubuntu18.04 + LABEL authors="Christoph Schranz , Mathematical Michael " # This is a concatenated Dockerfile, the maintainers of subsequent sections may vary. RUN chmod 1777 /tmp && chmod 1777 /var/tmp @@ -398,22 +398,21 @@ WORKDIR $HOME LABEL maintainer="Christoph Schranz " -# Install Tensorflow, check compatibility here: https://www.tensorflow.org/install/gpu +# Install Tensorflow, check compatibility here: +# https://www.tensorflow.org/install/source#gpu # installation via conda leads to errors in version 4.8.2 RUN pip install --upgrade pip && \ pip install --no-cache-dir "tensorflow==2.3.2" && \ pip install --no-cache-dir keras -# Update cudatoolkit and install PyTorch with dependencies +# Install PyTorch with dependencies RUN conda install --quiet --yes \ pyyaml mkl mkl-include setuptools cmake cffi typing -# RUN conda install --quiet --yes \ -# cudatoolkit=10.1 -c pytorch -# Check compatibility here: https://pytorch.org/get-started/locally/ -# Installation via conda leads to errors installing cudatoolkit=10.2 -RUN conda install pytorch torchvision torchaudio cudatoolkit=10.1 -c pytorch -# RUN pip install torch torchvision torchaudio torchviz +# Check compatibility here: +# https://pytorch.org/get-started/locally/ +# Installation via conda leads to errors installing cudatoolkit=10.1 +RUN pip install torch torchvision torchaudio torchviz # Clean installation RUN conda clean --all -f -y && \ @@ -436,6 +435,48 @@ RUN git clone https://github.com/Syllo/nvtop.git /run/nvtop && \ RUN fix-permissions /home/$NB_USER +USER $NB_UID + + ############################################################################ + ############################ Useful packages ############################### + ############################################################################ + +LABEL authors="Christoph Schranz , Mathematical Michael " + +USER root + +RUN pip install --no-cache-dir ipyleaflet "plotly>=4.14.3" "ipywidgets>=7.5" + +# Install important packages and Graphviz +RUN set -ex \ + && buildDeps=' \ + graphviz==0.11 \ +' \ + && apt-get update \ + && apt-get -y install htop apt-utils iputils-ping graphviz libgraphviz-dev openssh-client \ + && pip install --no-cache-dir $buildDeps + +# Install various extensions +RUN fix-permissions $CONDA_DIR +# jupyterlab/github Does not support jlab 3.x yet, install well maintained alternative instead. +# RUN jupyter labextension install @jupyterlab/github +RUN pip install jupyterlab-git +RUN pip install jupyterlab-drawio +RUN jupyter nbextension enable --py --sys-prefix ipyleaflet +RUN jupyter labextension install jupyterlab-plotly +RUN jupyter labextension install @jupyter-widgets/jupyterlab-manager plotlywidget +# RUN pip install --no-cache-dir jupyter-tabnine --user && \ +# jupyter nbextension install --py jupyter_tabnine --user && \ +# jupyter nbextension enable --py jupyter_tabnine --user && \ +# jupyter serverextension enable --py jupyter_tabnine --user +RUN pip install --no-cache-dir jupyter_contrib_nbextensions \ + jupyter_nbextensions_configurator rise +# jupyter nbextension enable codefolding/main +RUN jupyter labextension install @ijmbarr/jupyterlab_spellchecker + +RUN fix-permissions /home/$NB_USER + +# Switch back to jovyan to avoid accidental container runs as root USER $NB_UID # Copy jupyter_notebook_config.json diff --git a/README.md b/README.md index 9b157fc..50afdd7 100644 --- a/README.md +++ b/README.md @@ -26,12 +26,12 @@ The image of this repository is available on [Dockerhub](https://hub.docker.com/ and [Docker Compose](https://docs.docker.com/compose/install/) version **1.28.0+**. 3. Get access to your GPU via CUDA drivers within Docker containers. You can be sure that you can access your GPU within Docker, - if the command `docker run --gpus all nvidia/cuda:10.2-cudnn8-runtime-ubuntu18.04 nvidia-smi` + if the command `docker run --gpus all nvidia/cuda:10.1-cudnn7-runtime-ubuntu18.04 nvidia-smi` returns a result similar to this one: ```bash Fri Feb 26 12:45:19 2021 +-----------------------------------------------------------------------------+ - | NVIDIA-SMI 460.39 Driver Version: 460.39 CUDA Version: 10.2 | + | NVIDIA-SMI 460.39 Driver Version: 460.39 CUDA Version: 10.1 | |-------------------------------+----------------------+----------------------+ | GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC | | Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. | @@ -54,7 +54,7 @@ The image of this repository is available on [Dockerhub](https://hub.docker.com/ The CUDA toolkit is not required on the host system, as it will be installed within the Docker containers using [NVIDIA-docker](https://github.com/NVIDIA/nvidia-docker). It is also important to keep your installed CUDA version in mind, when you pull images. - **You can't run images based on `nvidia/cuda:11.2` if you have only CUDA version 10.2 installed.** + **You can't run images based on `nvidia/cuda:11.2` if you have only CUDA version 10.1 installed.** Check your host's CUDA-version with `nvcc --version` and update to at least the same version you want to pull. @@ -62,12 +62,23 @@ The image of this repository is available on [Dockerhub](https://hub.docker.com/ environment will be downloaded: ```bash cd your-working-directory - docker run --gpus all -d -it -p 8848:8888 -v $(pwd)/data:/home/jovyan/work -e GRANT_SUDO=yes -e JUPYTER_ENABLE_LAB=yes --user root cschranz/gpu-jupyter:v1.3_cuda-10.2_ubuntu-18.04_python-only + docker run --gpus all -d -it -p 8848:8888 -v $(pwd)/data:/home/jovyan/work -e GRANT_SUDO=yes -e JUPYTER_ENABLE_LAB=yes --user root cschranz/gpu-jupyter:v1.3_cuda-10.1_ubuntu-18.04_python-only ``` - This starts an instance with of *GPU-Jupyter* the tag `v1.3_cuda-10.2_ubuntu-18.04_python-only` at [http://localhost:8848](http://localhost:8848) (port `8484`). + This starts an instance with of *GPU-Jupyter* the tag `v1.3_cuda-10.1_ubuntu-18.04_python-only` at [http://localhost:8848](http://localhost:8848) (port `8484`). The default password is `gpu-jupyter` (previously `asdf`) which should be changed as described [below](#set-password). Furthermore, data within the host's `data` directory is shared with the container. - Other versions of GPU-Jupyter are available and listed on Dockerhub under [Tags](https://hub.docker.com/r/cschranz/gpu-jupyter/tags?page=1&ordering=last_updated). + The following images of GPU-Jupyter are available on Dockerhub: + - `v1.3_cuda-10.1_ubuntu-18.04` (full image) + - `v1.3_cuda-10.1_ubuntu-18.04_python-only` (only with a python interpreter and without Julia and R) + - `v1.3_cuda-10.1_ubuntu-18.04_python-only` (only with a python interpreter and without additional packages) + - `v1.3_cuda-11.0_ubuntu-18.04` (full image) + - `v1.3_cuda-11.0_ubuntu-18.04_python-only` (only with a python interpreter and without Julia and R) + - `v1.3_cuda-11.0_ubuntu-18.04_python-only` (only with a python interpreter and without additional packages) + + The version, e.g. `v1.3`, specifies a certain commit of the underlying docker-stacks. + The Cuda version, e.g. `cuda-10.1`, has to match the host's driver version + and must be supported by the gpu-libraries. + These and older versions of GPU-Jupyter are listed on [Dockerhub](https://hub.docker.com/r/cschranz/gpu-jupyter/tags?page=1&ordering=last_updated). Within the Jupyterlab instance, you can check if you can access your GPU by opening a new terminal window and running @@ -81,7 +92,7 @@ If you want to learn more about Jupyterlab, check out this [tutorial](https://ww First, it is necessary to generate the `Dockerfile` in `.build`, that is based on the NIVIDA base image and the [docker-stacks](https://github.com/jupyter/docker-stacks). As soon as you have access to your GPU within Docker containers -(make sure the command `docker run --gpus all nvidia/cuda:10.2-cudnn8-runtime-ubuntu18.04 nvidia-smi` +(make sure the command `docker run --gpus all nvidia/cuda:10.1-cudnn7-runtime-ubuntu18.04 nvidia-smi` shows your GPU statistics), you can generate the Dockerfile, build and run it. The following commands will start *GPU-Jupyter* on [localhost:8848](http://localhost:8848) with the default password `gpu-jupyter` (previously `asdf`). diff --git a/build_push_all.sh b/build_push_all.sh index 5f55cf8..b494658 100755 --- a/build_push_all.sh +++ b/build_push_all.sh @@ -1,7 +1,7 @@ #!/usr/bin/env bash cd $(cd -P -- "$(dirname -- "$0")" && pwd -P) -export TAGNAME="v1.3_cuda-10.2_ubuntu-18.04" +export TAGNAME="v1.3_cuda-10.1_ubuntu-18.04" ###################### build, run and push full image ########################## diff --git a/src/Dockerfile.gpulibs b/src/Dockerfile.gpulibs index 25f7daa..143de2e 100644 --- a/src/Dockerfile.gpulibs +++ b/src/Dockerfile.gpulibs @@ -1,6 +1,7 @@ LABEL maintainer="Christoph Schranz " -# Install Tensorflow, check compatibility here: https://www.tensorflow.org/install/gpu +# Install Tensorflow, check compatibility here: +# https://www.tensorflow.org/install/source#gpu # installation via conda leads to errors in version 4.8.2 RUN pip install --upgrade pip && \ pip install --no-cache-dir "tensorflow==2.3.2" && \ @@ -10,10 +11,10 @@ RUN pip install --upgrade pip && \ RUN conda install --quiet --yes \ pyyaml mkl mkl-include setuptools cmake cffi typing -# Check compatibility here: https://pytorch.org/get-started/locally/ -# Installation via conda leads to errors installing cudatoolkit=10.2 -RUN conda install pytorch torchvision torchaudio cudatoolkit=10.1 -c pytorch -# RUN pip install torch torchvision torchaudio torchviz +# Check compatibility here: +# https://pytorch.org/get-started/locally/ +# Installation via conda leads to errors installing cudatoolkit=10.1 +RUN pip install torch torchvision torchaudio torchviz # Clean installation RUN conda clean --all -f -y && \ diff --git a/src/Dockerfile.header b/src/Dockerfile.header index 1b69288..293e595 100644 --- a/src/Dockerfile.header +++ b/src/Dockerfile.header @@ -1,6 +1,6 @@ # Use NVIDIA CUDA as base image and run the same installation as in the other packages. # The version of cudatoolkit must match those of the base image, see Dockerfile.pytorch -FROM nvidia/cuda:10.2-cudnn8-runtime-ubuntu18.04 -LABEL authors="Christoph Schranz , Mathematical Michael " + FROM nvidia/cuda:10.1-cudnn7-runtime-ubuntu18.04 + LABEL authors="Christoph Schranz , Mathematical Michael " # This is a concatenated Dockerfile, the maintainers of subsequent sections may vary. RUN chmod 1777 /tmp && chmod 1777 /var/tmp