diff --git a/README.md b/README.md index 249d06d..709cdbf 100644 --- a/README.md +++ b/README.md @@ -41,9 +41,8 @@ recursive layouts. Disko doesn't require installation: it can be run directly from nix-community repository. The [Quickstart Guide](./docs/quickstart.md) documents how to run -Disko in its simplest form when installing NixOS. Alternatively, you can -also use the new [disko-install](./docs/disko-install.md) tool, which combines -the `disko` and `nixos-install` into one step. +Disko in its simplest form when installing NixOS. Alternatively, you can also +use the new [disko-install](./docs/disko-install.md) tool, which combines `disko` and `nixos-install` into one step. For information on other use cases, including upgrading from an older version of **disko**, using **disko** without NixOS and downloading the module, see the diff --git a/docs/interactive-vm.md b/docs/interactive-vm.md new file mode 100644 index 0000000..951ea3a --- /dev/null +++ b/docs/interactive-vm.md @@ -0,0 +1,17 @@ +# Running Interactive VMs with disko + +disko now exports its own flavor of interactive VMs (similiar to config.system.build.vm). +Simply import the disko module and build the vm runner with: +``` +nix build -L '.#nixosConfigurations.mymachine.config.system.build.vmWithDisko' +``` + +afterwards you can run the interactive VM with: + +``` +result/bin/disko-vm +``` + +extraConfig that is set in disko.tests.extraConfig is also applied to the interactive VMs. +imageSize of the VMs will be determined by the imageSize in the disk type in your disko config. +memorySize is set by disko.memSize diff --git a/docs/quickstart.md b/docs/quickstart.md index b4db199..084c057 100644 --- a/docs/quickstart.md +++ b/docs/quickstart.md @@ -15,7 +15,8 @@ existing partitions. Dual booting with other operating systems is not supported. ### Step 1: Choose a Disk Configuration -Real-world templates are provided in this [repository](https://github.com/nix-community/disko-templates). +Real-world templates are provided in this +[repository](https://github.com/nix-community/disko-templates). More disk layouts for all filesystems can be also found in the [example](https://github.com/nix-community/disko/tree/master/example) directory diff --git a/lib/default.nix b/lib/default.nix index f4337f9..58467f6 100644 --- a/lib/default.nix +++ b/lib/default.nix @@ -16,6 +16,8 @@ let # a version of makeDiskImage which runs outside of the store makeDiskImagesScript = args: (import ./make-disk-image.nix ({ inherit diskoLib; } // args)).impure; + makeVMRunner = args: (import ./interactive-vm.nix ({ inherit diskoLib; } // args)).pure; + testLib = import ./tests.nix { inherit lib makeTest eval-config; }; # like lib.types.oneOf but instead of a list takes an attrset # uses the field "type" to find the correct type in the attrset diff --git a/lib/interactive-vm.nix b/lib/interactive-vm.nix new file mode 100644 index 0000000..ba20987 --- /dev/null +++ b/lib/interactive-vm.nix @@ -0,0 +1,88 @@ +{ nixosConfig +, diskoLib +, pkgs ? nixosConfig.pkgs +, name ? "${nixosConfig.config.networking.hostName}-disko-images" +, extraConfig ? { } +}: +let + lib = pkgs.lib; + vm_disko = (diskoLib.testLib.prepareDiskoConfig nixosConfig.config diskoLib.testLib.devices).disko; + cfg_ = (lib.evalModules { + modules = lib.singleton { + # _file = toString input; + imports = lib.singleton { disko.devices = vm_disko.devices; }; + options = { + disko.devices = lib.mkOption { + type = diskoLib.toplevel; + }; + disko.testMode = lib.mkOption { + type = lib.types.bool; + default = true; + }; + }; + }; + }).config; + disks = lib.attrValues cfg_.disko.devices.disk; + diskoImages = diskoLib.makeDiskImages { + nixosConfig = nixosConfig; + copyNixStore = false; + extraConfig = { + disko.devices = cfg_.disko.devices; + }; + testMode = true; + }; + rootDisk = { + name = "root"; + file = ''"$tmp"/${(builtins.head disks).name}.qcow2''; + driveExtraOpts.cache = "writeback"; + driveExtraOpts.werror = "report"; + deviceExtraOpts.bootindex = "1"; + deviceExtraOpts.serial = "root"; + }; + otherDisks = map + (disk: { + name = disk.name; + file = ''"$tmp"/${disk.name}.qcow2''; + driveExtraOpts.werror = "report"; + }) + (builtins.tail disks); + vm = (nixosConfig.extendModules { + modules = [ + ({ modulesPath, ... }: { + imports = [ + (modulesPath + "/virtualisation/qemu-vm.nix") + ]; + }) + { + virtualisation.useEFIBoot = nixosConfig.config.disko.tests.efi; + virtualisation.memorySize = nixosConfig.config.disko.memSize; + virtualisation.useDefaultFilesystems = false; + virtualisation.diskImage = null; + virtualisation.qemu.drives = [ rootDisk ] ++ otherDisks; + boot.zfs.devNodes = "/dev/disk/by-uuid"; # needed because /dev/disk/by-id is empty in qemu-vms + boot.zfs.forceImportAll = true; + } + { + # generated from disko config + virtualisation.fileSystems = cfg_.disko.devices._config.fileSystems; + boot = cfg_.disko.devices._config.boot or { }; + swapDevices = cfg_.disko.devices._config.swapDevices or [ ]; + } + nixosConfig.config.disko.tests.extraConfig + ]; + }).config.system.build.vm; +in +{ + pure = pkgs.writeDashBin "disko-vm" '' + set -efux + export tmp=$(mktemp -d) + trap 'rm -rf "$tmp"' EXIT + ${lib.concatMapStringsSep "\n" (disk: '' + ${pkgs.qemu}/bin/qemu-img create -f qcow2 \ + -b ${diskoImages}/${disk.name}.raw \ + -F raw "$tmp"/${disk.name}.qcow2 + '') disks} + set +f + ${vm}/bin/run-*-vm + ''; +} diff --git a/lib/make-disk-image.nix b/lib/make-disk-image.nix index c03676e..de9362b 100644 --- a/lib/make-disk-image.nix +++ b/lib/make-disk-image.nix @@ -5,6 +5,9 @@ , name ? "${nixosConfig.config.networking.hostName}-disko-images" , extraPostVM ? nixosConfig.config.disko.extraPostVM , checked ? false +, copyNixStore ? true +, testMode ? false +, extraConfig ? { } }: let vmTools = pkgs.vmTools.override { @@ -16,10 +19,14 @@ let }; cleanedConfig = diskoLib.testLib.prepareDiskoConfig nixosConfig.config diskoLib.testLib.devices; systemToInstall = nixosConfig.extendModules { - modules = [{ - disko.devices = lib.mkForce cleanedConfig.disko.devices; - boot.loader.grub.devices = lib.mkForce cleanedConfig.boot.loader.grub.devices; - }]; + modules = [ + extraConfig + { + disko.testMode = true; + disko.devices = lib.mkForce cleanedConfig.disko.devices; + boot.loader.grub.devices = lib.mkForce cleanedConfig.boot.loader.grub.devices; + } + ]; }; dependencies = with pkgs; [ bash @@ -49,6 +56,7 @@ let rootPaths = [ systemToInstall.config.system.build.toplevel ]; }; partitioner = '' + set -efux # running udev, stolen from stage-1.sh echo "running udev..." ln -sfn /proc/self/fd /dev/fd @@ -64,10 +72,13 @@ let udevadm trigger --action=add udevadm settle + ${lib.optionalString testMode '' + export IN_DISKO_TEST=1 + ''} ${systemToInstall.config.system.build.diskoScript} ''; - installer = '' + installer = lib.optionalString copyNixStore '' # populate nix db, so nixos-install doesn't complain export NIX_STATE_DIR=${systemToInstall.config.disko.rootMountPoint}/nix/var/nix nix-store --load-db < "${closureInfo}/registration" diff --git a/lib/types/gpt.nix b/lib/types/gpt.nix index c9947a0..2042a20 100644 --- a/lib/types/gpt.nix +++ b/lib/types/gpt.nix @@ -45,14 +45,15 @@ in }; priority = lib.mkOption { type = lib.types.int; - default = if partition.config.size or "" == "100%" then - 9001 - else if partition.config.type == "EF02" then + default = + if partition.config.size or "" == "100%" then + 9001 + else if partition.config.type == "EF02" then # Boot partition should be created first, because some BIOS implementations require it. # Priority defaults to 100 here to support any potential use-case for placing partitions prior to EF02 - 100 - else - 1000; + 100 + else + 1000; defaultText = '' 1000: normal partitions 9001: partitions with 100% size diff --git a/lib/types/luks.nix b/lib/types/luks.nix index b75cb9a..dd89f37 100644 --- a/lib/types/luks.nix +++ b/lib/types/luks.nix @@ -114,19 +114,23 @@ in default = '' if ! blkid "${config.device}" >/dev/null || ! (blkid "${config.device}" -o export | grep -q '^TYPE='); then ${lib.optionalString config.askPassword '' - set +x askPassword() { - echo "Enter password for ${config.device}: " - IFS= read -r -s password - echo "Enter password for ${config.device} again to be safe: " - IFS= read -r -s password_check - export password - [ "$password" = "$password_check" ] + if [ -z ''${IN_DISKO_TEST+x} ]; then + set +x + echo "Enter password for ${config.device}: " + IFS= read -r -s password + echo "Enter password for ${config.device} again to be safe: " + IFS= read -r -s password_check + export password + [ "$password" = "$password_check" ] + set -x + else + export password=disko + fi } until askPassword; do echo "Passwords did not match, please try again." done - set -x ''} cryptsetup -q luksFormat ${config.device} ${toString config.extraFormatArgs} ${keyFileArgs} ${cryptsetupOpen} --persistent @@ -147,11 +151,15 @@ in dev = '' if ! cryptsetup status ${config.name} >/dev/null 2>/dev/null; then ${lib.optionalString config.askPassword '' - set +x - echo "Enter password for ${config.device}" - IFS= read -r -s password - export password - set -x + if [ -z ''${IN_DISKO_TEST+x} ]; then + set +x + echo "Enter password for ${config.device}" + IFS= read -r -s password + export password + set -x + else + export password=disko + fi ''} ${cryptsetupOpen} fi diff --git a/lib/types/lvm_vg.nix b/lib/types/lvm_vg.nix index b64caf0..29360db 100644 --- a/lib/types/lvm_vg.nix +++ b/lib/types/lvm_vg.nix @@ -3,7 +3,7 @@ let # Load kernel modules to ensure device mapper types are available kernelModules = lib.filter (x: x != "") (map (lv: lib.optionalString (lv.lvm_type != null && lv.lvm_type != "thinlv") "dm-${lv.lvm_type}") - (lib.attrValues config.lvs)); + (lib.attrValues config.lvs)); in { options = { @@ -125,7 +125,7 @@ in _config = lib.mkOption { internal = true; readOnly = true; - default = [ { boot.initrd.kernelModules = kernelModules; } ] ++ + default = [{ boot.initrd.kernelModules = kernelModules; }] ++ map (lv: [ (lib.optional (lv.content != null) lv.content._config) diff --git a/lib/types/zfs_fs.nix b/lib/types/zfs_fs.nix index e05f91a..813d75a 100644 --- a/lib/types/zfs_fs.nix +++ b/lib/types/zfs_fs.nix @@ -59,40 +59,42 @@ # important to prevent accidental shadowing of mount points # since (create order != mount order) # -p creates parents automatically - default = let - createOptions = (lib.optionalAttrs (config.mountpoint != null) { mountpoint = config.mountpoint; }) // config.options; - # All options defined as PROP_ONETIME or PROP_ONETIME_DEFAULT in https://github.com/openzfs/zfs/blob/master/module/zcommon/zfs_prop.c - onetimeProperties = [ - "encryption" - "casesensitivity" - "utf8only" - "normalization" - "volblocksize" - "pbkdf2iters" - "pbkdf2salt" - "keyformat" - ]; - updateOptions = builtins.removeAttrs config.options onetimeProperties; - mountpoint = config.options.mountpoint or config.mountpoint; - in '' - if ! zfs get type ${config._name} >/dev/null 2>&1; then - zfs create -up ${config._name} \ - ${lib.concatStringsSep " " (lib.mapAttrsToList (n: v: "-o ${n}=${v}") (createOptions))} - ${lib.optionalString (updateOptions != {}) '' - else - zfs set ${lib.concatStringsSep " " (lib.mapAttrsToList (n: v: "${n}=${v}") updateOptions)} ${config._name} - ${lib.optionalString (mountpoint != null) '' - # zfs will try unmount the dataset to change the mountpoint - # but this might fail if the dataset is in use - if ! zfs set mountpoint=${mountpoint} ${config._name}; then - echo "Failed to set mountpoint to '${mountpoint}' for ${config._name}." >&2 - echo "You may need to run when the pool is not mounted i.e. in a recovery system:" >&2 - echo " zfs set mountpoint=${mountpoint} ${config._name}" >&2 - fi + default = + let + createOptions = (lib.optionalAttrs (config.mountpoint != null) { mountpoint = config.mountpoint; }) // config.options; + # All options defined as PROP_ONETIME or PROP_ONETIME_DEFAULT in https://github.com/openzfs/zfs/blob/master/module/zcommon/zfs_prop.c + onetimeProperties = [ + "encryption" + "casesensitivity" + "utf8only" + "normalization" + "volblocksize" + "pbkdf2iters" + "pbkdf2salt" + "keyformat" + ]; + updateOptions = builtins.removeAttrs config.options onetimeProperties; + mountpoint = config.options.mountpoint or config.mountpoint; + in + '' + if ! zfs get type ${config._name} >/dev/null 2>&1; then + zfs create -up ${config._name} \ + ${lib.concatStringsSep " " (lib.mapAttrsToList (n: v: "-o ${n}=${v}") (createOptions))} + ${lib.optionalString (updateOptions != {}) '' + else + zfs set ${lib.concatStringsSep " " (lib.mapAttrsToList (n: v: "${n}=${v}") updateOptions)} ${config._name} + ${lib.optionalString (mountpoint != null) '' + # zfs will try unmount the dataset to change the mountpoint + # but this might fail if the dataset is in use + if ! zfs set mountpoint=${mountpoint} ${config._name}; then + echo "Failed to set mountpoint to '${mountpoint}' for ${config._name}." >&2 + echo "You may need to run when the pool is not mounted i.e. in a recovery system:" >&2 + echo " zfs set mountpoint=${mountpoint} ${config._name}" >&2 + fi + ''} ''} - ''} - fi - ''; + fi + ''; } // { readOnly = false; }; _mount = diskoLib.mkMountOption { diff --git a/module.nix b/module.nix index c9feff1..7b058a0 100644 --- a/module.nix +++ b/module.nix @@ -99,6 +99,15 @@ in type = lib.types.bool; default = false; }; + testMode = lib.mkOption { + internal = true; + description = '' + this is true if the system is being run in test mode. + like a vm test or an interactive vm + ''; + type = lib.types.bool; + default = false; + }; tests = { efi = lib.mkOption { description = '' @@ -151,6 +160,11 @@ in extraSystemConfig = cfg.tests.extraConfig; extraTestScript = cfg.tests.extraChecks; }; + + vmWithDisko = diskoLib.makeVMRunner { + inherit pkgs; + nixosConfig = args; + }; };