$ docker build -t [image_name]:[tag] .
$ docker run --name [container_name] [image_name]:[tag]
$ docker logs -f [container_id_or_name]
$ docker exec -it [container_id_or_name] bash
$ docker ps
$ docker ps -a
$ docker images
$ docker stop [container_id_or_name]
$ docker rm [container_id_or_name]
$ docker rmi [image_id_or_name]
$ docker run -it --rm -p 8080:8080 image
$ jupyter notebook --ip 0.0.0.0 --no-browser --allow-root
$ docker file/location/in/base/os container-id:/file-location
Need nvidia-docker installed to run gpu on containers
Nvidia-docker: https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html#docker
$ docker run --runtime=nvidia -it --rm tensorflow/tensorflow:devel-gpu-py3 bash
Create a volume from terminal
$ docker volume create --driver local --opt type=none --opt device=/home/user/test --opt o=bind test_vol
Create a volume in docker-compose file
bind-test: \
driver: local \
driver_opts: \
type: none \
o: bind \
device: /home/user/test \
NOTE: In docker compose, volume doesn't create a local directory on the host. You have to make sure that the directory exists otherwise the mount will fail
$ xhost + && docker run --rm -ti --net=host --ipc=host -e DISPLAY=$DISPLAY -v /tmp/.X11-unix:/tmp/.X11-unix --device /dev/dri:/dev/dri -v volume/to/mount docker-image:tag
First find container_id from docker ps
$ docker commit [container_id]:[tag - optional]
$ docker tag [image_name] [docker_hub_username/image_name]
$ docker push [docker_hub_username/image_name]
$ docker run -it -v [absolute_path_to_our_files]:/[terminal_dir_name] --rm [image_name]
$ docker inspect -f '{{range.NetworkSettings.Networks}}{{.IPAddress}}{{end}}' <container_name_or_id>
$ docker rmi $(docker images -f dangling=true -q )
$ docker-compose down -v
$ docker rm
docker ps --no-trunc -aq``
$ docker save -o <tar_filename>.tar <image_name:tag>