Merge pull request #1822 from dalto8/zfs-wip

[zfs] Support for installing to root-on-ZFS
This commit is contained in:
Adriaan de Groot 2021-11-17 12:20:34 +01:00 committed by GitHub
commit ece1e338e0
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
17 changed files with 978 additions and 43 deletions

View file

@ -127,6 +127,7 @@ sequence:
# - dummyprocess
# - dummypython
- partition
# - zfs
- mount
- unpackfs
- machineid

View file

@ -92,6 +92,50 @@ def get_kernel_line(kernel_type):
return ""
def get_zfs_root():
"""
Looks in global storage to find the zfs root
:return: A string containing the path to the zfs root or None if it is not found
"""
zfs = libcalamares.globalstorage.value("zfsDatasets")
if not zfs:
libcalamares.utils.warning("Failed to locate zfs dataset list")
return None
# Find the root dataset
for dataset in zfs:
try:
if dataset["mountpoint"] == "/":
return dataset["zpool"] + "/" + dataset["dsName"]
except KeyError:
# This should be impossible
libcalamares.utils.warning("Internal error handling zfs dataset")
raise
return None
def is_btrfs_root(partition):
""" Returns True if the partition object refers to a btrfs root filesystem
:param partition: A partition map from global storage
:return: True if btrfs and root, False otherwise
"""
return partition["mountPoint"] == "/" and partition["fs"] == "btrfs"
def is_zfs_root(partition):
""" Returns True if the partition object refers to a zfs root filesystem
:param partition: A partition map from global storage
:return: True if zfs and root, False otherwise
"""
return partition["mountPoint"] == "/" and partition["fs"] == "zfs"
def create_systemd_boot_conf(install_path, efi_dir, uuid, entry, entry_name, kernel_type):
"""
Creates systemd-boot configuration files based on given parameters.
@ -133,12 +177,22 @@ def create_systemd_boot_conf(install_path, efi_dir, uuid, entry, entry_name, ker
"root=/dev/mapper/"
+ partition["luksMapperName"]]
# systemd-boot with a BTRFS root filesystem needs to be told
# about the root subvolume.
for partition in partitions:
if partition["mountPoint"] == "/" and partition["fs"] == "btrfs":
# systemd-boot with a BTRFS root filesystem needs to be told
# about the root subvolume.
if is_btrfs_root(partition):
kernel_params.append("rootflags=subvol=@")
# zfs needs to be told the location of the root dataset
if is_zfs_root(partition):
zfs_root_path = get_zfs_root()
if zfs_root_path is not None:
kernel_params.append("zfs=" + zfs_root_path)
else:
# Something is really broken if we get to this point
libcalamares.utils.warning("Internal error handling zfs dataset")
raise Exception("Internal zfs data missing, please contact your distribution")
if cryptdevice_params:
kernel_params.extend(cryptdevice_params)
else:
@ -314,6 +368,76 @@ def get_grub_efi_parameters():
return None
def run_grub_mkconfig(partitions, output_file):
"""
Runs grub-mkconfig in the target environment
:param partitions: The partitions list from global storage
:param output_file: A string containing the path to the generating grub config file
:return:
"""
# zfs needs an environment variable set for grub-mkconfig
if any([is_zfs_root(partition) for partition in partitions]):
check_target_env_call(["sh", "-c", "ZPOOL_VDEV_NAME_PATH=1 " +
libcalamares.job.configuration["grubMkconfig"] + " -o " + output_file])
else:
# The input file /etc/default/grub should already be filled out by the
# grubcfg job module.
check_target_env_call([libcalamares.job.configuration["grubMkconfig"], "-o", output_file])
def run_grub_install(fw_type, partitions, efi_directory=None):
"""
Runs grub-install in the target environment
:param fw_type: A string which is "efi" for UEFI installs. Any other value results in a BIOS install
:param partitions: The partitions list from global storage
:param efi_directory: The path of the efi directory relative to the root of the install
:return:
"""
is_zfs = any([is_zfs_root(partition) for partition in partitions])
# zfs needs an environment variable set for grub
if is_zfs:
check_target_env_call(["sh", "-c", "echo ZPOOL_VDEV_NAME_PATH=1 >> /etc/environment"])
if fw_type == "efi":
efi_bootloader_id = efi_label()
efi_target, efi_grub_file, efi_boot_file = get_grub_efi_parameters()
if is_zfs:
check_target_env_call(["sh", "-c", "ZPOOL_VDEV_NAME_PATH=1 " + libcalamares.job.configuration["grubInstall"]
+ " --target=" + efi_target + " --efi-directory=" + efi_directory
+ " --bootloader-id=" + efi_bootloader_id + " --force"])
else:
check_target_env_call([libcalamares.job.configuration["grubInstall"],
"--target=" + efi_target,
"--efi-directory=" + efi_directory,
"--bootloader-id=" + efi_bootloader_id,
"--force"])
else:
if libcalamares.globalstorage.value("bootLoader") is None:
return
boot_loader = libcalamares.globalstorage.value("bootLoader")
if boot_loader["installPath"] is None:
return
if is_zfs:
check_target_env_call(["sh", "-c", "ZPOOL_VDEV_NAME_PATH=1 "
+ libcalamares.job.configuration["grubInstall"]
+ " --target=i386-pc --recheck --force "
+ boot_loader["installPath"]])
else:
check_target_env_call([libcalamares.job.configuration["grubInstall"],
"--target=i386-pc",
"--recheck",
"--force",
boot_loader["installPath"]])
def install_grub(efi_directory, fw_type):
"""
Installs grub as bootloader, either in pc or efi mode.
@ -321,6 +445,12 @@ def install_grub(efi_directory, fw_type):
:param efi_directory:
:param fw_type:
"""
# get the partition from global storage
partitions = libcalamares.globalstorage.value("partitions")
if not partitions:
libcalamares.utils.warning(_("Failed to install grub, no partitions defined in global storage"))
return
if fw_type == "efi":
libcalamares.utils.debug("Bootloader: grub (efi)")
install_path = libcalamares.globalstorage.value("rootMountPoint")
@ -333,11 +463,7 @@ def install_grub(efi_directory, fw_type):
efi_target, efi_grub_file, efi_boot_file = get_grub_efi_parameters()
check_target_env_call([libcalamares.job.configuration["grubInstall"],
"--target=" + efi_target,
"--efi-directory=" + efi_directory,
"--bootloader-id=" + efi_bootloader_id,
"--force"])
run_grub_install(fw_type, partitions, efi_directory)
# VFAT is weird, see issue CAL-385
install_efi_directory_firmware = (vfat_correct_case(
@ -356,36 +482,21 @@ def install_grub(efi_directory, fw_type):
os.makedirs(install_efi_boot_directory)
# Workaround for some UEFI firmwares
FALLBACK = "installEFIFallback"
libcalamares.utils.debug("UEFI Fallback: " + str(libcalamares.job.configuration.get(FALLBACK, "<unset>")))
if libcalamares.job.configuration.get(FALLBACK, True):
fallback = "installEFIFallback"
libcalamares.utils.debug("UEFI Fallback: " + str(libcalamares.job.configuration.get(fallback, "<unset>")))
if libcalamares.job.configuration.get(fallback, True):
libcalamares.utils.debug(" .. installing '{!s}' fallback firmware".format(efi_boot_file))
efi_file_source = os.path.join(install_efi_directory_firmware,
efi_bootloader_id,
efi_grub_file)
efi_file_target = os.path.join(install_efi_boot_directory,
efi_boot_file)
efi_bootloader_id,
efi_grub_file)
efi_file_target = os.path.join(install_efi_boot_directory, efi_boot_file)
shutil.copy2(efi_file_source, efi_file_target)
else:
libcalamares.utils.debug("Bootloader: grub (bios)")
if libcalamares.globalstorage.value("bootLoader") is None:
return
run_grub_install(fw_type, partitions)
boot_loader = libcalamares.globalstorage.value("bootLoader")
if boot_loader["installPath"] is None:
return
check_target_env_call([libcalamares.job.configuration["grubInstall"],
"--target=i386-pc",
"--recheck",
"--force",
boot_loader["installPath"]])
# The input file /etc/default/grub should already be filled out by the
# grubcfg job module.
check_target_env_call([libcalamares.job.configuration["grubMkconfig"],
"-o", libcalamares.job.configuration["grubCfg"]])
run_grub_mkconfig(partitions, libcalamares.job.configuration["grubCfg"])
def install_secureboot(efi_directory):

View file

@ -196,7 +196,7 @@ class FstabGenerator(object):
dct = self.generate_fstab_line_info(mount_entry)
if dct:
self.print_fstab_line(dct, file=fstab_file)
else:
elif partition["fs"] != "zfs": # zfs partitions don't need an entry in fstab
dct = self.generate_fstab_line_info(partition)
if dct:
self.print_fstab_line(dct, file=fstab_file)

View file

@ -55,6 +55,32 @@ def get_grub_config_path(root_mount_point):
return os.path.join(default_dir, default_config_file)
def get_zfs_root():
"""
Looks in global storage to find the zfs root
:return: A string containing the path to the zfs root or None if it is not found
"""
zfs = libcalamares.globalstorage.value("zfsDatasets")
if not zfs:
libcalamares.utils.warning("Failed to locate zfs dataset list")
return None
# Find the root dataset
for dataset in zfs:
try:
if dataset["mountpoint"] == "/":
return dataset["zpool"] + "/" + dataset["dsName"]
except KeyError:
# This should be impossible
libcalamares.utils.warning("Internal error handling zfs dataset")
raise
return None
def modify_grub_default(partitions, root_mount_point, distributor):
"""
Configures '/etc/default/grub' for hibernation and plymouth.
@ -141,8 +167,15 @@ def modify_grub_default(partitions, root_mount_point, distributor):
)
]
if partition["fs"] == "zfs" and partition["mountPoint"] == "/":
zfs_root_path = get_zfs_root()
kernel_params = ["quiet"]
# Currently, grub doesn't detect this properly so it must be set manually
if zfs_root_path:
kernel_params.insert(0, "zfs=" + zfs_root_path)
if cryptdevice_params:
kernel_params.extend(cryptdevice_params)

View file

@ -150,6 +150,7 @@ def find_initcpio_features(partitions, root_mount_point):
swap_uuid = ""
uses_btrfs = False
uses_zfs = False
uses_lvm2 = False
encrypt_hook = False
openswap_hook = False
@ -172,6 +173,9 @@ def find_initcpio_features(partitions, root_mount_point):
if partition["fs"] == "btrfs":
uses_btrfs = True
if partition["fs"] == "zfs":
uses_zfs = True
if "lvm2" in partition["fs"]:
uses_lvm2 = True
@ -198,6 +202,9 @@ def find_initcpio_features(partitions, root_mount_point):
if uses_lvm2:
hooks.append("lvm2")
if uses_zfs:
hooks.append("zfs")
if swap_uuid != "":
if encrypt_hook and openswap_hook:
hooks.extend(["openswap"])

View file

@ -26,6 +26,17 @@ _ = gettext.translation("calamares-python",
fallback=True).gettext
class ZfsException(Exception):
"""Exception raised when there is a problem with zfs
Attributes:
message -- explanation of the error
"""
def __init__(self, message):
self.message = message
def pretty_name():
return _("Mounting partitions.")
@ -61,6 +72,70 @@ def get_btrfs_subvolumes(partitions):
return btrfs_subvolumes
def mount_zfs(root_mount_point, partition):
""" Mounts a zfs partition at @p root_mount_point
:param root_mount_point: The absolute path to the root of the install
:param partition: The partition map from global storage for this partition
:return:
"""
# Get the list of zpools from global storage
zfs_pool_list = libcalamares.globalstorage.value("zfsPoolInfo")
if not zfs_pool_list:
libcalamares.utils.warning("Failed to locate zfsPoolInfo data in global storage")
raise ZfsException(_("Internal error mounting zfs datasets"))
# Find the zpool matching this partition
for zfs_pool in zfs_pool_list:
if zfs_pool["mountpoint"] == partition["mountPoint"]:
pool_name = zfs_pool["poolName"]
ds_name = zfs_pool["dsName"]
# import the zpool
try:
libcalamares.utils.host_env_process_output(["zpool", "import", "-N", "-R", root_mount_point, pool_name], None)
except subprocess.CalledProcessError:
raise ZfsException(_("Failed to import zpool"))
# Get the encrpytion information from global storage
zfs_info_list = libcalamares.globalstorage.value("zfsInfo")
encrypt = False
if zfs_info_list:
for zfs_info in zfs_info_list:
if zfs_info["mountpoint"] == partition["mountPoint"] and zfs_info["encrypted"] is True:
encrypt = True
passphrase = zfs_info["passphrase"]
if encrypt is True:
# The zpool is encrypted, we need to unlock it
try:
libcalamares.utils.host_env_process_output(["zfs", "load-key", pool_name], None, passphrase)
except subprocess.CalledProcessError:
raise ZfsException(_("Failed to unlock zpool"))
if partition["mountPoint"] == '/':
# Get the zfs dataset list from global storage
zfs = libcalamares.globalstorage.value("zfsDatasets")
if not zfs:
libcalamares.utils.warning("Failed to locate zfs dataset list")
raise ZfsException(_("Internal error mounting zfs datasets"))
zfs.sort(key=lambda x: x["mountpoint"])
for dataset in zfs:
try:
if dataset["canMount"] == "noauto" or dataset["canMount"] is True:
libcalamares.utils.host_env_process_output(["zfs", "mount",
dataset["zpool"] + '/' + dataset["dsName"]])
except subprocess.CalledProcessError:
raise ZfsException(_("Failed to set zfs mountpoint"))
else:
try:
libcalamares.utils.host_env_process_output(["zfs", "mount", pool_name + '/' + ds_name])
except subprocess.CalledProcessError:
raise ZfsException(_("Failed to set zfs mountpoint"))
def mount_partition(root_mount_point, partition, partitions):
"""
Do a single mount of @p partition inside @p root_mount_point.
@ -96,11 +171,14 @@ def mount_partition(root_mount_point, partition, partitions):
if "luksMapperName" in partition:
device = os.path.join("/dev/mapper", partition["luksMapperName"])
if libcalamares.utils.mount(device,
mount_point,
fstype,
partition.get("options", "")) != 0:
libcalamares.utils.warning("Cannot mount {}".format(device))
if fstype == "zfs":
mount_zfs(root_mount_point, partition)
else: # fstype == "zfs"
if libcalamares.utils.mount(device,
mount_point,
fstype,
partition.get("options", "")) != 0:
libcalamares.utils.warning("Cannot mount {}".format(device))
# Special handling for btrfs subvolumes. Create the subvolumes listed in mount.conf
if fstype == "btrfs" and partition["mountPoint"] == '/':
@ -161,8 +239,11 @@ def run():
# under /tmp, we make sure /tmp is mounted before the partition)
mountable_partitions = [ p for p in partitions + extra_mounts if "mountPoint" in p and p["mountPoint"] ]
mountable_partitions.sort(key=lambda x: x["mountPoint"])
for partition in mountable_partitions:
mount_partition(root_mount_point, partition, partitions)
try:
for partition in mountable_partitions:
mount_partition(root_mount_point, partition, partitions)
except ZfsException as ze:
return _("zfs mounting error"), ze.message
libcalamares.globalstorage.insert("rootMountPoint", root_mount_point)

View file

@ -296,7 +296,9 @@ PartitionLayout::createPartitions( Device* dev,
}
Partition* part = nullptr;
if ( luksPassphrase.isEmpty() )
// Encryption for zfs is handled in the zfs module
if ( luksPassphrase.isEmpty() || correctFS( entry.partFileSystem ) == FileSystem::Zfs )
{
part = KPMHelpers::createNewPartition( parent,
*dev,
@ -319,6 +321,24 @@ PartitionLayout::createPartitions( Device* dev,
luksPassphrase,
KPM_PARTITION_FLAG( None ) );
}
// For zfs, we need to make the passphrase available to later modules
if ( correctFS( entry.partFileSystem ) == FileSystem::Zfs )
{
Calamares::GlobalStorage* storage = Calamares::JobQueue::instance()->globalStorage();
QList< QVariant > zfsInfoList;
QVariantMap zfsInfo;
// Save the information subsequent modules will need
zfsInfo[ "encrypted" ] = !luksPassphrase.isEmpty();
zfsInfo[ "passphrase" ] = luksPassphrase;
zfsInfo[ "mountpoint" ] = entry.partMountPoint;
// Add it to the list and insert it into global storage
zfsInfoList.append( zfsInfo );
storage->insert( "zfsInfo", zfsInfoList );
}
PartitionInfo::setFormat( part, true );
PartitionInfo::setMountPoint( part, entry.partMountPoint );
if ( !entry.partLabel.isEmpty() )

View file

@ -23,6 +23,7 @@
#include "GlobalStorage.h"
#include "JobQueue.h"
#include "Settings.h"
#include "partition/FileSystem.h"
#include "partition/PartitionQuery.h"
#include "utils/Logger.h"
@ -104,7 +105,9 @@ CreatePartitionDialog::CreatePartitionDialog( Device* device,
QStringList fsNames;
for ( auto fs : FileSystemFactory::map() )
{
if ( fs->supportCreate() != FileSystem::cmdSupportNone && fs->type() != FileSystem::Extended )
// We need to ensure zfs is added to the list if the zfs module is enabled
if ( ( fs->type() == FileSystem::Type::Zfs && Calamares::Settings::instance()->isModuleEnabled( "zfs" ) )
|| ( fs->supportCreate() != FileSystem::cmdSupportNone && fs->type() != FileSystem::Extended ) )
{
fsNames << userVisibleFS( fs ); // This is put into the combobox
if ( fs->type() == defaultFSType )
@ -240,7 +243,8 @@ CreatePartitionDialog::getNewlyCreatedPartition()
// does so, to set up the partition for create-and-then-set-flags.
Partition* partition = nullptr;
QString luksPassphrase = m_ui->encryptWidget->passphrase();
if ( m_ui->encryptWidget->state() == EncryptWidget::Encryption::Confirmed && !luksPassphrase.isEmpty() )
if ( m_ui->encryptWidget->state() == EncryptWidget::Encryption::Confirmed && !luksPassphrase.isEmpty()
&& fsType != FileSystem::Zfs )
{
partition = KPMHelpers::createNewEncryptedPartition(
m_parent, *m_device, m_role, fsType, fsLabel, first, last, luksPassphrase, PartitionTable::Flags() );
@ -251,6 +255,31 @@ CreatePartitionDialog::getNewlyCreatedPartition()
m_parent, *m_device, m_role, fsType, fsLabel, first, last, PartitionTable::Flags() );
}
// For zfs, we let the zfs module handle the encryption but we need to make the passphrase available to later modules
if ( fsType == FileSystem::Zfs )
{
Calamares::GlobalStorage* storage = Calamares::JobQueue::instance()->globalStorage();
QList< QVariant > zfsInfoList;
QVariantMap zfsInfo;
// If this is not the first encrypted zfs partition, get the old list first
if ( storage->contains( "zfsInfo" ) )
{
zfsInfoList = storage->value( "zfsInfo" ).toList();
storage->remove( "zfsInfo" );
}
// Save the information subsequent modules will need
zfsInfo[ "encrypted" ]
= m_ui->encryptWidget->state() == EncryptWidget::Encryption::Confirmed && !luksPassphrase.isEmpty();
zfsInfo[ "passphrase" ] = luksPassphrase;
zfsInfo[ "mountpoint" ] = selectedMountPoint( m_ui->mountPointComboBox );
// Add it to the list and insert it into global storage
zfsInfoList.append( zfsInfo );
storage->insert( "zfsInfo", zfsInfoList );
}
if ( m_device->type() == Device::Type::LVM_Device )
{
partition->setPartitionPath( m_device->deviceNode() + QStringLiteral( "/" )

View file

@ -25,6 +25,7 @@
#include "GlobalStorage.h"
#include "JobQueue.h"
#include "Settings.h"
#include "partition/FileSystem.h"
#include "utils/Logger.h"
@ -89,7 +90,9 @@ EditExistingPartitionDialog::EditExistingPartitionDialog( Device* device,
QStringList fsNames;
for ( auto fs : FileSystemFactory::map() )
{
if ( fs->supportCreate() != FileSystem::cmdSupportNone && fs->type() != FileSystem::Extended )
// We need to ensure zfs is added to the list if the zfs module is enabled
if ( ( fs->type() == FileSystem::Type::Zfs && Calamares::Settings::instance()->isModuleEnabled( "zfs" ) )
|| ( fs->supportCreate() != FileSystem::cmdSupportNone && fs->type() != FileSystem::Extended ) )
{
fsNames << userVisibleFS( fs ); // For the combo box
}
@ -117,6 +120,12 @@ EditExistingPartitionDialog::EditExistingPartitionDialog( Device* device,
m_ui->fileSystemLabel->setEnabled( m_ui->formatRadioButton->isChecked() );
m_ui->fileSystemComboBox->setEnabled( m_ui->formatRadioButton->isChecked() );
// Force a format if the existing device is a zfs device since reusing a zpool isn't currently supported
m_ui->formatRadioButton->setChecked( m_partition->fileSystem().type() == FileSystem::Type::Zfs );
m_ui->formatRadioButton->setEnabled( !( m_partition->fileSystem().type() == FileSystem::Type::Zfs ) );
m_ui->keepRadioButton->setChecked( !( m_partition->fileSystem().type() == FileSystem::Type::Zfs ) );
m_ui->keepRadioButton->setEnabled( !( m_partition->fileSystem().type() == FileSystem::Type::Zfs ) );
setFlagList( *( m_ui->m_listFlags ), m_partition->availableFlags(), PartitionInfo::flags( m_partition ) );
}

View file

@ -11,8 +11,10 @@
#include "CreatePartitionJob.h"
#include "core/PartitionInfo.h"
#include "partition/FileSystem.h"
#include "partition/PartitionQuery.h"
#include "utils/CalamaresUtilsSystem.h"
#include "utils/Logger.h"
#include "utils/Units.h"
@ -24,9 +26,80 @@
#include <kpmcore/ops/newoperation.h>
#include <kpmcore/util/report.h>
#include <qcoreapplication.h>
#include <qregularexpression.h>
using CalamaresUtils::Partition::untranslatedFS;
using CalamaresUtils::Partition::userVisibleFS;
/** @brief Create
*
* Uses sfdisk to remove @p partition. This should only be used in cases
* where using kpmcore to remove the partition would not be appropriate
*
*/
static Calamares::JobResult
createZfs( Partition* partition, Device* device )
{
auto r = CalamaresUtils::System::instance()->runCommand(
{ "sh",
"-c",
"echo start=" + QString::number( partition->firstSector() ) + " size="
+ QString::number( partition->length() ) + " | sfdisk --append --force " + partition->devicePath() },
std::chrono::seconds( 5 ) );
if ( r.getExitCode() != 0 )
{
return Calamares::JobResult::error(
QCoreApplication::translate( CreatePartitionJob::staticMetaObject.className(),
"Failed to create partition" ),
QCoreApplication::translate( CreatePartitionJob::staticMetaObject.className(),
"Failed to create zfs partition with output: "
+ r.getOutput().toLocal8Bit() ) );
}
// Now we need to do some things that would normally be done by kpmcore
// First we get the device node from the output and set it as the partition path
QRegularExpression re( QStringLiteral( "Created a new partition (\\d+)" ) );
QRegularExpressionMatch rem = re.match( r.getOutput() );
QString deviceNode;
if ( rem.hasMatch() )
{
if ( partition->devicePath().back().isDigit() )
{
deviceNode = partition->devicePath() + QLatin1Char( 'p' ) + rem.captured( 1 );
}
else
{
deviceNode = partition->devicePath() + rem.captured( 1 );
}
}
partition->setPartitionPath( deviceNode );
partition->setState( Partition::State::None );
// If it is a gpt device, set the partition UUID
if ( device->partitionTable()->type() == PartitionTable::gpt && partition->uuid().isEmpty() )
{
r = CalamaresUtils::System::instance()->runCommand(
{ "sfdisk", "--list", "--output", "Device,UUID", partition->devicePath() }, std::chrono::seconds( 5 ) );
if ( r.getExitCode() == 0 )
{
QRegularExpression re( deviceNode + QStringLiteral( " +(.+)" ) );
QRegularExpressionMatch rem = re.match( r.getOutput() );
if ( rem.hasMatch() )
{
partition->setUUID( rem.captured( 1 ) );
}
}
}
return Calamares::JobResult::ok();
}
CreatePartitionJob::CreatePartitionJob( Device* device, Partition* partition )
: PartitionJob( partition )
, m_device( device )
@ -194,6 +267,13 @@ CreatePartitionJob::prettyStatusMessage() const
Calamares::JobResult
CreatePartitionJob::exec()
{
// kpmcore doesn't currently handle this case properly so for now, we manually create the partion
// The zfs module can later deal with creating a zpool in the partition
if ( m_partition->fileSystem().type() == FileSystem::Type::Zfs )
{
return createZfs( m_partition, m_device );
}
Report report( nullptr );
NewOperation op( *m_device, m_partition );
op.setStatus( Operation::StatusRunning );

View file

@ -49,6 +49,27 @@ def list_mounts(root_mount_point):
return lst
def export_zpools(root_mount_point):
""" Exports the zpools if defined in global storage
:param root_mount_point: The absolute path to the root of the install
:return:
"""
try:
zfs_pool_list = libcalamares.globalstorage.value("zfsPoolInfo")
zfs_pool_list.sort(reverse=True, key=lambda x: x["poolName"])
if zfs_pool_list:
for zfs_pool in zfs_pool_list:
try:
libcalamares.utils.host_env_process_output(['zpool', 'export', zfs_pool["poolName"]])
except subprocess.CalledProcessError:
libcalamares.utils.warning("Failed to export zpool")
except Exception as e:
# If this fails it shouldn't cause the installation to fail
libcalamares.utils.warning("Received exception while exporting zpools: " + format(e))
pass
def run():
""" Unmounts given mountpoints in decreasing order.
@ -94,6 +115,8 @@ def run():
# in the exception object.
subprocess.check_output(["umount", "-lv", mount_point], stderr=subprocess.STDOUT)
export_zpools(root_mount_point)
os.rmdir(root_mount_point)
return None

View file

@ -0,0 +1,13 @@
# === This file is part of Calamares - <https://calamares.io> ===
#
# SPDX-FileCopyrightText: 2020 Adriaan de Groot <groot@kde.org>
# SPDX-License-Identifier: BSD-2-Clause
#
calamares_add_plugin( zfs
TYPE job
EXPORT_MACRO PLUGINDLLEXPORT_PRO
SOURCES
ZfsJob.cpp
SHARED_LIB
)

14
src/modules/zfs/README.md Normal file
View file

@ -0,0 +1,14 @@
## zfs Module Notes
There are a few considerations to be aware of when enabling the zfs module
* You must provide zfs kernel modules or kernel support on the ISO for the zfs module to function
* Support for zfs in the partition module is conditional on the zfs module being enabled
* If you use grub with zfs, you must have `ZPOOL_VDEV_NAME_PATH=1` in your environment when running grub-install or grub-mkconfig.
* Calamares will ensure this happens during the bootloader module.
* It will also add it to `/etc/environment` so it will be available in the installation
* If you have an scripts or other processes that trigger grub-mkconfig during the install process, be sure to add that to the environment
* In most cases, you will need to enable services for zfs support appropriate to your distro. For example, when testing on Arch the following services were enabled:
* zfs.target
* zfs-import-cache
* zfs-mount
* zfs-import.target

365
src/modules/zfs/ZfsJob.cpp Normal file
View file

@ -0,0 +1,365 @@
/* === This file is part of Calamares - <https://calamares.io> ===
*
* SPDX-FileCopyrightText: 2021 Evan James <dalto@fastmail.com>
* SPDX-License-Identifier: GPL-3.0-or-later
*
* Calamares is Free Software: see the License-Identifier above.
*
*/
#include "ZfsJob.h"
#include "utils/CalamaresUtilsSystem.h"
#include "utils/Logger.h"
#include "utils/Variant.h"
#include "GlobalStorage.h"
#include "JobQueue.h"
#include "Settings.h"
#include <QProcess>
#include <unistd.h>
/** @brief Returns the alphanumeric portion of a string
*
* @p input is the input string
*
*/
static QString
alphaNumeric( QString input )
{
return input.remove( QRegExp( "[^a-zA-Z\\d\\s]" ) );
}
/** @brief Returns the best available device for zpool creation
*
* zfs partitions generally don't have UUID until the zpool is created. Generally,
* they are formed using either the id or the partuuid. The id isn't stored by kpmcore
* so this function checks to see if we have a partuuid. If so, it forms a device path
* for it. As a backup, it uses the device name i.e. /dev/sdax.
*
* The function returns a fully qualified path to the device or an empty string if no device
* is found
*
* @p pMap is the partition map from global storage
*
*/
static QString
findBestZfsDevice( QVariantMap pMap )
{
// Find the best device identifier, if one isn't available, skip this partition
QString deviceName;
if ( pMap[ "partuuid" ].toString() != "" )
{
return "/dev/disk/by-partuuid/" + pMap[ "partuuid" ].toString().toLower();
}
else if ( pMap[ "device" ].toString() != "" )
{
return pMap[ "device" ].toString().toLower();
}
else
{
return QString();
}
}
/** @brief Converts the value in a QVariant to a string which is a valid option for canmount
*
* Storing "on" and "off" in QVariant results in a conversion to boolean. This function takes
* the Qvariant in @p canMount and converts it to a QString holding "on", "off" or the string
* value in the QVariant.
*
*/
static QString
convertCanMount( QVariant canMount )
{
if ( canMount == true )
{
return "on";
}
else if ( canMount == false )
{
return "off";
}
else
{
return canMount.toString();
}
}
ZfsJob::ZfsJob( QObject* parent )
: Calamares::CppJob( parent )
{
}
ZfsJob::~ZfsJob() {}
QString
ZfsJob::prettyName() const
{
return tr( "Create ZFS pools and datasets" );
}
void
ZfsJob::collectMountpoints( const QVariantList& partitions )
{
m_mountpoints.empty();
for ( const QVariant& partition : partitions )
{
if ( partition.canConvert( QVariant::Map ) )
{
QString mountpoint = partition.toMap().value( "mountPoint" ).toString();
if ( !mountpoint.isEmpty() )
{
m_mountpoints.append( mountpoint );
}
}
}
}
bool
ZfsJob::isMountpointOverlapping( const QString& targetMountpoint ) const
{
for ( const QString& mountpoint : m_mountpoints )
{
if ( mountpoint != '/' && targetMountpoint.startsWith( mountpoint ) )
{
return true;
}
}
return false;
}
ZfsResult
ZfsJob::createZpool( QString deviceName, QString poolName, QString poolOptions, bool encrypt, QString passphrase ) const
{
// zfs doesn't wait for the devices so pause for 2 seconds to ensure we give time for the device files to be created
sleep( 2 );
QStringList command;
if ( encrypt )
{
command = QStringList() << "zpool"
<< "create" << poolOptions.split( ' ' ) << "-O"
<< "encryption=aes-256-gcm"
<< "-O"
<< "keyformat=passphrase" << poolName << deviceName;
}
else
{
command = QStringList() << "zpool"
<< "create" << poolOptions.split( ' ' ) << poolName << deviceName;
}
auto r = CalamaresUtils::System::instance()->runCommand(
CalamaresUtils::System::RunLocation::RunInHost, command, QString(), passphrase, std::chrono::seconds( 10 ) );
if ( r.getExitCode() != 0 )
{
cWarning() << "Failed to run zpool create. The output was: " + r.getOutput();
return { false, tr( "Failed to create zpool on " ) + deviceName };
}
return { true, QString() };
}
Calamares::JobResult
ZfsJob::exec()
{
QVariantList partitions;
Calamares::GlobalStorage* gs = Calamares::JobQueue::instance()->globalStorage();
if ( gs && gs->contains( "partitions" ) && gs->value( "partitions" ).canConvert( QVariant::List ) )
{
partitions = gs->value( "partitions" ).toList();
}
else
{
cWarning() << "No *partitions* defined.";
return Calamares::JobResult::internalError( tr( "Configuration Error" ),
tr( "No partitions are available for Zfs." ),
Calamares::JobResult::InvalidConfiguration );
}
const CalamaresUtils::System* system = CalamaresUtils::System::instance();
QVariantList poolNames;
// Check to ensure the list of zfs info from the partition module is available and convert it to a list
if ( !gs->contains( "zfsInfo" ) && gs->value( "zfsInfo" ).canConvert( QVariant::List ) )
{
return Calamares::JobResult::error( tr( "Internal data missing" ), tr( "Failed to create zpool" ) );
}
QVariantList zfsInfoList = gs->value( "zfsInfo" ).toList();
for ( auto& partition : qAsConst( partitions ) )
{
QVariantMap pMap;
if ( partition.canConvert( QVariant::Map ) )
{
pMap = partition.toMap();
}
// If it isn't a zfs partition, ignore it
if ( pMap[ "fsName" ] != "zfs" )
{
continue;
}
// Find the best device identifier, if one isn't available, skip this partition
QString deviceName = findBestZfsDevice( pMap );
if ( deviceName.isEmpty() )
{
continue;
}
// If the partition doesn't have a mountpoint, skip it
QString mountpoint = pMap[ "mountPoint" ].toString();
if ( mountpoint.isEmpty() )
{
continue;
}
// Build a poolname off config pool name and the mountpoint, this is not ideal but should work until there is UI built for zfs
QString poolName = m_poolName;
if ( mountpoint != '/' )
{
poolName += alphaNumeric( mountpoint );
}
// Look in the zfs info list to see if this partition should be encrypted
bool encrypt = false;
QString passphrase;
for ( const QVariant& zfsInfo : qAsConst( zfsInfoList ) )
{
if ( zfsInfo.canConvert( QVariant::Map ) && zfsInfo.toMap().value( "encrypted" ).toBool()
&& mountpoint == zfsInfo.toMap().value( "mountpoint" ) )
{
encrypt = true;
passphrase = zfsInfo.toMap().value( "passphrase" ).toString();
}
}
// Create the zpool
ZfsResult zfsResult;
if ( encrypt )
{
zfsResult = createZpool( deviceName, poolName, m_poolOptions, true, passphrase );
}
else
{
zfsResult = createZpool( deviceName, poolName, m_poolOptions, false );
}
if ( !zfsResult.success )
{
return Calamares::JobResult::error( tr( "Failed to create zpool" ), zfsResult.failureMessage );
}
// Save the poolname, dataset name and mountpoint. It will later be added to a list and placed in global storage.
// This will be used by later modules including mount and umount
QVariantMap poolNameEntry;
poolNameEntry[ "poolName" ] = poolName;
poolNameEntry[ "mountpoint" ] = mountpoint;
poolNameEntry[ "dsName" ] = "none";
// If the mountpoint is /, create datasets per the config file. If not, create a single dataset mounted at the partitions mountpoint
if ( mountpoint == '/' )
{
collectMountpoints( partitions );
QVariantList datasetList;
for ( const auto& dataset : qAsConst( m_datasets ) )
{
QVariantMap datasetMap = dataset.toMap();
// Make sure all values are valid
if ( datasetMap[ "dsName" ].toString().isEmpty() || datasetMap[ "mountpoint" ].toString().isEmpty()
|| datasetMap[ "canMount" ].toString().isEmpty() )
{
cWarning() << "Bad dataset entry";
continue;
}
// We should skip this dataset if it conflicts with a permanent mountpoint
if ( isMountpointOverlapping( datasetMap[ "mountpoint" ].toString() ) )
{
continue;
}
QString canMount = convertCanMount( datasetMap[ "canMount" ].toString() );
// Create the dataset
auto r = system->runCommand( { QStringList() << "zfs"
<< "create" << m_datasetOptions.split( ' ' ) << "-o"
<< "canmount=" + canMount << "-o"
<< "mountpoint=" + datasetMap[ "mountpoint" ].toString()
<< poolName + "/" + datasetMap[ "dsName" ].toString() },
std::chrono::seconds( 10 ) );
if ( r.getExitCode() != 0 )
{
cWarning() << "Failed to create dataset" << datasetMap[ "dsName" ].toString();
continue;
}
// Add the dataset to the list for global storage this information is used later to properly set
// the mount options on each dataset
datasetMap[ "zpool" ] = m_poolName;
datasetList.append( datasetMap );
}
// If the list isn't empty, add it to global storage
if ( !datasetList.isEmpty() )
{
gs->insert( "zfsDatasets", datasetList );
}
}
else
{
QString dsName = mountpoint;
dsName = alphaNumeric( mountpoint );
auto r = system->runCommand( { QStringList() << "zfs"
<< "create" << m_datasetOptions.split( ' ' ) << "-o"
<< "canmount=on"
<< "-o"
<< "mountpoint=" + mountpoint << poolName + "/" + dsName },
std::chrono::seconds( 10 ) );
if ( r.getExitCode() != 0 )
{
return Calamares::JobResult::error( tr( "Failed to create dataset" ),
tr( "The output was: " ) + r.getOutput() );
}
poolNameEntry[ "dsName" ] = dsName;
}
poolNames.append( poolNameEntry );
// Export the zpool so it can be reimported at the correct location later
auto r = system->runCommand( { "zpool", "export", poolName }, std::chrono::seconds( 10 ) );
if ( r.getExitCode() != 0 )
{
cWarning() << "Failed to export pool" << m_poolName;
}
}
// Put the list of zpools into global storage
if ( !poolNames.isEmpty() )
{
gs->insert( "zfsPoolInfo", poolNames );
}
return Calamares::JobResult::ok();
}
void
ZfsJob::setConfigurationMap( const QVariantMap& map )
{
m_poolName = CalamaresUtils::getString( map, "poolName" );
m_poolOptions = CalamaresUtils::getString( map, "poolOptions" );
m_datasetOptions = CalamaresUtils::getString( map, "datasetOptions" );
m_datasets = CalamaresUtils::getList( map, "datasets" );
}
CALAMARES_PLUGIN_FACTORY_DEFINITION( ZfsJobFactory, registerPlugin< ZfsJob >(); )

89
src/modules/zfs/ZfsJob.h Normal file
View file

@ -0,0 +1,89 @@
/* === This file is part of Calamares - <https://calamares.io> ===
*
* SPDX-FileCopyrightText: 2021 Evan James <dalto@fastmail.com>
* SPDX-License-Identifier: GPL-3.0-or-later
*
* Calamares is Free Software: see the License-Identifier above.
*
*/
#ifndef ZFSJOB_H
#define ZFSJOB_H
#include <QObject>
#include <QStringList>
#include <QVariantMap>
#include "CppJob.h"
#include "utils/PluginFactory.h"
#include "DllMacro.h"
struct ZfsResult
{
bool success;
QString failureMessage; // This message is displayed to the user and should be translated at the time of population
};
/** @brief Create zpools and zfs datasets
*
*/
class PLUGINDLLEXPORT ZfsJob : public Calamares::CppJob
{
Q_OBJECT
public:
explicit ZfsJob( QObject* parent = nullptr );
~ZfsJob() override;
QString prettyName() const override;
Calamares::JobResult exec() override;
void setConfigurationMap( const QVariantMap& configurationMap ) override;
private:
QString m_poolName;
QString m_poolOptions;
QString m_datasetOptions;
QStringList m_mountpoints;
QList< QVariant > m_datasets;
/** @brief Creates a zpool based on the provided arguments
*
* @p deviceName is a full path to the device the zpool should be created on
* @p poolName is a string containing the name of the pool to create
* @p poolOptions are the options to pass to zpool create
* @p encrypt is a boolean which determines if the pool should be encrypted
* @p passphrase is a string continaing the passphrase
*
*/
ZfsResult createZpool( QString deviceName,
QString poolName,
QString poolOptions,
bool encrypt,
QString passphrase = QString() ) const;
/** @brief Collects all the mountpoints from the partitions
*
* Iterates over @p partitions to gather each mountpoint present
* in the list of maps and populates m_mountpoints
*
*/
void collectMountpoints( const QVariantList& partitions );
/** @brief Check to see if a given mountpoint overlaps with one of the defined moutnpoints
*
* Iterates over m_partitions and checks if @p targetMountpoint overlaps with them by comparing
* the beginning of targetMountpoint with all the values in m_mountpoints. Of course, / is excluded
* since all the mountpoints would begin with /
*
*/
bool isMountpointOverlapping( const QString& targetMountpoint ) const;
};
CALAMARES_PLUGIN_FACTORY_DECLARATION( ZfsJobFactory )
#endif // ZFSJOB_H

38
src/modules/zfs/zfs.conf Normal file
View file

@ -0,0 +1,38 @@
# SPDX-FileCopyrightText: no
# SPDX-License-Identifier: CC0-1.0
#
# The zfs module creates the zfs pools and datasets
#
#
#
---
# The name to be used for the zpool
poolName: zpcala
# A list of options that will be passed to zpool create
poolOptions: "-f -o ashift=12 -O mountpoint=none -O acltype=posixacl -O relatime=on"
# A list of options that will be passed to zfs create when creating each dataset
# Do not include "canmount" or "mountpoint" as those are set below in the datasets array
datasetOptions: "-o compression=lz4 -o atime=off -o xattr=sa"
# An array of datasets that will be created on the zpool mounted at /
datasets:
- dsName: ROOT
mountpoint: none
canMount: off
- dsName: ROOT/distro
mountpoint: none
canMount: off
- dsName: ROOT/distro/root
mountpoint: /
canMount: noauto
- dsName: ROOT/distro/home
mountpoint: /home
canMount: on
- dsName: ROOT/distro/varcache
mountpoint: /var/cache
canMount: on
- dsName: ROOT/distro/varlog
mountpoint: /var/log
canMount: on

View file

@ -0,0 +1,22 @@
# SPDX-FileCopyrightText: 2020 Adriaan de Groot <groot@kde.org>
# SPDX-License-Identifier: GPL-3.0-or-later
---
$schema: https://json-schema.org/schema#
$id: https://calamares.io/schemas/zfs
additionalProperties: false
type: object
properties:
poolName: { type: string }
poolOptions: { type: string }
datasetOptions: { type: string }
datasets:
type: array
items:
type: object
additionalProperties: false
properties:
dsName: { type: string }
mountpoint: { type: string }
canMount: { type: string }
required: [ dsName, mountpoint, canmount ]
required: [ poolName, datasets ]