diff --git a/modules/common/default.nix b/modules/common/default.nix index 26011fa03..68b182ae6 100644 --- a/modules/common/default.nix +++ b/modules/common/default.nix @@ -14,6 +14,7 @@ ./users/accounts.nix ./version ./virtualization/docker.nix + ./virtualization/podman.nix ./systemd ./services ./networking diff --git a/modules/common/development/cuda.nix b/modules/common/development/cuda.nix new file mode 100644 index 000000000..d2fff1e69 --- /dev/null +++ b/modules/common/development/cuda.nix @@ -0,0 +1,24 @@ +# Copyright 2022-2024 TII (SSRC) and the Ghaf contributors +# SPDX-License-Identifier: Apache-2.0 +{ lib, config, ... }: +let + cfg = config.ghaf.development.cuda; + inherit (lib) mkEnableOption mkIf; +in +{ + options.ghaf.development.cuda = { + enable = mkEnableOption "CUDA Support"; + }; + + config = mkIf cfg.enable { + #Enabling CUDA on any supported system requires below settings. + nixpkgs.config.allowUnfree = lib.mkForce true; + nixpkgs.config.allowBroken = lib.mkForce false; + nixpkgs.config.cudaSupport = lib.mkForce true; + + # Enable Opengl + # Opengl enable is renamed to hardware.graphics.enable + # This is needed for CUDA so set it if it is already not set + hardware.graphics.enable = lib.mkForce true; + }; +} diff --git a/modules/common/development/default.nix b/modules/common/development/default.nix index 55620bf2a..86abaedac 100644 --- a/modules/common/development/default.nix +++ b/modules/common/development/default.nix @@ -6,5 +6,6 @@ ./usb-serial.nix ./nix.nix ./ssh.nix + ./cuda.nix ]; } diff --git a/modules/common/virtualization/docker.nix b/modules/common/virtualization/docker.nix index 6b9fa77e2..4fd7163f3 100644 --- a/modules/common/virtualization/docker.nix +++ b/modules/common/virtualization/docker.nix @@ -11,10 +11,65 @@ in }; config = mkIf cfg.enable { - virtualisation.docker.enable = true; - virtualisation.docker.rootless = { + # Just ensure containers are enabled by boot. + boot.enableContainers = lib.mkForce true; + + # Enable Opengl renamed to hardware.graphics.enable + hardware.graphics.enable = lib.mkForce true; + + # For CUDA support unfree libraries and CudaSupport should be set + ghaf.development.cuda.enable = lib.mkForce true; + + # Enabling CDI NVIDIA devices in podman or docker (nvidia docker container) + # For Orin devices this setting does not work as jetpack-nixos still does not support them. + # jetpack-nixos uses enableNvidia = true; even though it is deprecated + # For x86_64 the case is different it was introduced to be + # virtualisation.containers.cdi.dynamic.nvidia.enable = true; + # but deprecated and changed to hardware.nvidia-container-toolkit.enable + # We enable below setting if architecture ix x86_64 and if the video driver is nvidia set it true + hardware.nvidia-container-toolkit.enable = lib.mkIf ( + config.nixpkgs.localSystem.isx86_64 && (builtins.elem "nvidia" config.services.xserver.videoDrivers) + ) true; + + # Docker Daemon Settings + virtualisation.docker = { + # To force Docker package version settings need to import pkgs first + # package = pkgs.docker_26; + enable = true; - setSocketVariable = true; + # The enableNvidia option is still used in jetpack-nixos while it is obsolete in nixpkgs + # but it is still only option for nvidia-orin devices. Added extra fix for CDI to + # make it run with docker. + enableNvidia = config.nixpkgs.localSystem.isAarch64 && config.hardware.nvidia-jetpack.enable; + daemon.settings.features.cdi = true; + rootless = { + enable = true; + setSocketVariable = true; + daemon.settings.features.cdi = true; + daemon.settings.cdi-spec-dirs = [ "/var/run/cdi/" ]; + }; + + # Container file and processor limits + # daemon.settings = { + # default-ulimits = { + # nofile = { + # Name = "nofile"; + # Hard = 1024; + # Soft = 1024; + # }; + # nproc = { + # Name = "nproc"; + # Soft = 65536; + # Hard = 65536; + # }; + # }; + # }; }; + + # Add user to docker group and dialout group for access to serial ports + users.users."ghaf".extraGroups = [ + "docker" + "dialout" + ]; }; } diff --git a/modules/common/virtualization/podman.nix b/modules/common/virtualization/podman.nix new file mode 100644 index 000000000..26b00ee50 --- /dev/null +++ b/modules/common/virtualization/podman.nix @@ -0,0 +1,68 @@ +# Copyright 2022-2024 TII (SSRC) and the Ghaf contributors +# SPDX-License-Identifier: Apache-2.0 +{ lib, config, ... }: +let + cfg = config.ghaf.virtualization.podman.daemon; + inherit (lib) mkEnableOption mkIf; +in +{ + options.ghaf.virtualization.podman.daemon = { + enable = mkEnableOption "Podman Daemon"; + }; + config = mkIf cfg.enable { + # Just ensure containers are enabled by boot. + boot.enableContainers = lib.mkForce true; + + # Enable Opengl renamed to hardware.graphics.enable + hardware.graphics.enable = lib.mkForce true; + + # For CUDA support: Enable if not already enabled. + ghaf.development.cuda.enable = lib.mkForce true; + + # Enabling CDI NVIDIA devices in podman or docker (nvidia docker container) + # For Orin devices this setting does not work as jetpack-nixos still does not support them. + # jetpack-nixos uses enableNvidia = true; even though it is deprecated + # For x86_64 the case is different it was introduced to be + # virtualisation.containers.cdi.dynamic.nvidia.enable = true; + # but deprecated and changed to hardware.nvidia-container-toolkit.enable + # We enable below setting if architecture ix x86_64 and if the video driver is nvidia set it true + hardware.nvidia-container-toolkit.enable = lib.mkIf ( + config.nixpkgs.localSystem.isx86_64 && (builtins.elem "nvidia" config.services.xserver.videoDrivers) + ) true; + + virtualisation.podman = { + enable = true; + # The enableNvidia option is still used in jetpack-nixos while it is obsolete in nixpkgs + # but it is still only option for nvidia-orin devices. + enableNvidia = config.nixpkgs.localSystem.isAarch64 && config.hardware.nvidia-jetpack.enable; + # Create a `docker` alias for podman, to use it as a drop-in replacement + dockerCompat = !config.virtualisation.docker.enable; + dockerSocket.enable = !config.virtualisation.docker.enable; + # Required for containers under podman-compose to be able to talk to each other. + defaultNetwork.settings.dns_enabled = true; + # Container file and processor limits + # daemon.settings = { + # default-ulimits = { + # nofile = { + # Name = "nofile"; + # Hard = 1024; + # Soft = 1024; + # }; + # nproc = { + # Name = "nproc"; + # Soft = 65536; + # Hard = 65536; + # }; + # }; + # }; + }; + + # Add user to podman and docker group (due to compatibility mode) + # and dialout group for access to serial ports + users.users."ghaf".extraGroups = [ + "docker" + "dialout" + "podman" + ]; + }; +} diff --git a/targets/nvidia-jetson-orin/flake-module.nix b/targets/nvidia-jetson-orin/flake-module.nix index b98b244f8..a10c535e5 100644 --- a/targets/nvidia-jetson-orin/flake-module.nix +++ b/targets/nvidia-jetson-orin/flake-module.nix @@ -31,7 +31,7 @@ let # For WLAN firmwares hardware = { enableRedistributableFirmware = som == "agx"; - wirelessRegulatoryDatabase = true; + wirelessRegulatoryDatabase = som == "agx"; }; services.dnsmasq.settings.dhcp-option = [ @@ -58,6 +58,9 @@ let { ghaf = { + #virtualization.podman.daemon.enable = true; + virtualization.docker.daemon.enable = true; + hardware.nvidia.orin = { enable = true; somType = som;