From 4f4d8cb479fe84ff5c14c5f1aadf0dd0e4856c69 Mon Sep 17 00:00:00 2001 From: InsanePrawn Date: Sat, 29 Mar 2025 19:54:42 +0100 Subject: [PATCH] reformat python files with ruff --- docs/source/conf.py | 34 +- src/kupferbootstrap/binfmt/binfmt.py | 78 ++- src/kupferbootstrap/binfmt/cli.py | 32 +- src/kupferbootstrap/cache/cli.py | 48 +- src/kupferbootstrap/chroot/abstract.py | 344 +++++++--- src/kupferbootstrap/chroot/base.py | 27 +- src/kupferbootstrap/chroot/build.py | 189 ++++-- src/kupferbootstrap/chroot/cli.py | 40 +- src/kupferbootstrap/chroot/device.py | 68 +- src/kupferbootstrap/chroot/helpers.py | 74 ++- src/kupferbootstrap/config/cli.py | 269 +++++--- src/kupferbootstrap/config/profile.py | 93 +-- src/kupferbootstrap/config/scheme.py | 48 +- src/kupferbootstrap/config/state.py | 279 +++++--- src/kupferbootstrap/constants.py | 198 +++--- src/kupferbootstrap/devices/cli.py | 46 +- src/kupferbootstrap/devices/device.py | 171 +++-- src/kupferbootstrap/devices/deviceinfo.py | 145 ++-- src/kupferbootstrap/dictscheme.py | 144 +++- src/kupferbootstrap/distro/distro.py | 114 +++- src/kupferbootstrap/distro/package.py | 40 +- src/kupferbootstrap/distro/repo.py | 67 +- src/kupferbootstrap/distro/repo_config.py | 133 ++-- src/kupferbootstrap/exec/cmd.py | 75 ++- src/kupferbootstrap/exec/file.py | 100 ++- src/kupferbootstrap/flavours/cli.py | 56 +- src/kupferbootstrap/flavours/flavour.py | 73 +- src/kupferbootstrap/generator.py | 42 +- src/kupferbootstrap/image/boot.py | 66 +- src/kupferbootstrap/image/fastboot.py | 40 +- src/kupferbootstrap/image/flash.py | 157 +++-- src/kupferbootstrap/image/image.py | 476 ++++++++----- src/kupferbootstrap/logger.py | 48 +- src/kupferbootstrap/main.py | 47 +- src/kupferbootstrap/net/cli.py | 2 +- src/kupferbootstrap/net/forwarding.py | 64 +- src/kupferbootstrap/net/ssh.py | 169 +++-- src/kupferbootstrap/net/telnet.py | 16 +- src/kupferbootstrap/packages/build.py | 627 ++++++++++++------ src/kupferbootstrap/packages/cli.py | 466 +++++++++---- src/kupferbootstrap/packages/pkgbuild.py | 315 ++++++--- src/kupferbootstrap/packages/srcinfo_cache.py | 178 +++-- src/kupferbootstrap/progressbar.py | 26 +- src/kupferbootstrap/utils.py | 143 ++-- src/kupferbootstrap/version/cli.py | 28 +- src/kupferbootstrap/version/kbs.py | 55 +- src/kupferbootstrap/wrapper/__init__.py | 36 +- src/kupferbootstrap/wrapper/docker.py | 163 +++-- src/kupferbootstrap/wrapper/su_helper.py | 45 +- src/kupferbootstrap/wrapper/wrapper.py | 53 +- tests/config/test_config.py | 100 ++- tests/devices/test_device.py | 46 +- tests/devices/test_deviceinfo.py | 28 +- tests/exec/test_cmd.py | 41 +- tests/exec/test_file.py | 90 +-- tests/flavours/test_flavour.py | 2 +- tests/test_integration.py | 87 ++- tests/version/test_kbs_version.py | 16 +- 58 files changed, 4460 insertions(+), 2197 deletions(-) diff --git a/docs/source/conf.py b/docs/source/conf.py index 6ab8a57..aaac37a 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -3,22 +3,22 @@ import os from sphinx.config import getenv from kupferbootstrap.utils import git -#sys.path.insert(0, os.path.abspath('../..')) +# sys.path.insert(0, os.path.abspath('../..')) extensions = [ - 'sphinx_click', + "sphinx_click", "sphinx.ext.autodoc", - 'sphinx.ext.autosummary', + "sphinx.ext.autosummary", "sphinx.ext.linkcode", - 'myst_parser' + "myst_parser", ] myst_all_links_external = True -templates_path = ['templates'] -project = 'Kupfer👢strap' -html_title = 'Kupferbootstrap' -html_theme = 'furo' -html_static_path = ['static'] -html_css_files = ['kupfer_docs.css'] -html_favicon = 'static/kupfer-white-filled.svg' +templates_path = ["templates"] +project = "Kupfer👢strap" +html_title = "Kupferbootstrap" +html_theme = "furo" +html_static_path = ["static"] +html_css_files = ["kupfer_docs.css"] +html_favicon = "static/kupfer-white-filled.svg" html_theme_options = { "globaltoc_maxdepth": 5, "globaltoc_collapse": True, @@ -69,9 +69,13 @@ version = getenv("version") or get_version() def linkcode_resolve(domain, info): - if domain != 'py': + if domain != "py": return None - if not info['module']: + if not info["module"]: return None - filename = info['module'].replace('.', '/') - return "%s/-/blob/%s/src/%s.py" % (html_theme_options["source_repository"], version, filename) + filename = info["module"].replace(".", "/") + return "%s/-/blob/%s/src/%s.py" % ( + html_theme_options["source_repository"], + version, + filename, + ) diff --git a/src/kupferbootstrap/binfmt/binfmt.py b/src/kupferbootstrap/binfmt/binfmt.py index d5d263d..250c15a 100644 --- a/src/kupferbootstrap/binfmt/binfmt.py +++ b/src/kupferbootstrap/binfmt/binfmt.py @@ -20,40 +20,44 @@ def binfmt_info(chroot: Optional[Chroot] = None): logging.debug("parsing: " + info) with open(info, "r") as handle: for line in handle: - if line.startswith('#') or ":" not in line: + if line.startswith("#") or ":" not in line: continue splitted = line.split(":") result = { # _ = splitted[0] # empty - 'name': splitted[1], - 'type': splitted[2], - 'offset': splitted[3], - 'magic': splitted[4], - 'mask': splitted[5], - 'interpreter': splitted[6], - 'flags': splitted[7], - 'line': line, + "name": splitted[1], + "type": splitted[2], + "offset": splitted[3], + "magic": splitted[4], + "mask": splitted[5], + "interpreter": splitted[6], + "flags": splitted[7], + "line": line, } - if not result['name'].startswith('qemu-'): + if not result["name"].startswith("qemu-"): logging.fatal(f'Unknown binfmt handler "{result["name"]}"') - logging.debug(f'binfmt line: {line}') + logging.debug(f"binfmt line: {line}") continue - arch = ''.join(result['name'].split('-')[1:]) + arch = "".join(result["name"].split("-")[1:]) full[arch] = result return full -def is_arch_known(arch: Arch, raise_exception: bool = False, action: Optional[str] = None) -> bool: +def is_arch_known( + arch: Arch, raise_exception: bool = False, action: Optional[str] = None +) -> bool: if arch not in QEMU_ARCHES: if raise_exception: - raise Exception(f'binfmt{f".{action}()" if action else ""}: unknown arch {arch} (not in QEMU_ARCHES)') + raise Exception( + f"binfmt{f'.{action}()' if action else ''}: unknown arch {arch} (not in QEMU_ARCHES)" + ) return False return True def binfmt_is_registered(arch: Arch, chroot: Optional[Chroot] = None) -> bool: - is_arch_known(arch, True, 'is_registered') + is_arch_known(arch, True, "is_registered") qemu_arch = QEMU_ARCHES[arch] path = "/proc/sys/fs/binfmt_misc/qemu-" + qemu_arch binfmt_ensure_mounted(chroot) @@ -63,21 +67,25 @@ def binfmt_is_registered(arch: Arch, chroot: Optional[Chroot] = None) -> bool: def binfmt_ensure_mounted(chroot: Optional[Chroot] = None): - binfmt_path = '/proc/sys/fs/binfmt_misc' - register_path = binfmt_path + '/register' + binfmt_path = "/proc/sys/fs/binfmt_misc" + register_path = binfmt_path + "/register" if chroot: register_path = chroot.get_path(register_path) if not os.path.exists(register_path): - logging.info('mounting binfmt_misc') - result = (chroot.mount if chroot else mount)('binfmt_misc', binfmt_path, options=[], fs_type='binfmt_misc') # type: ignore[operator] - if (isinstance(result, CompletedProcess) and result.returncode != 0) or not result: - raise Exception(f'Failed mounting binfmt_misc to {binfmt_path}') + logging.info("mounting binfmt_misc") + result = (chroot.mount if chroot else mount)( + "binfmt_misc", binfmt_path, options=[], fs_type="binfmt_misc" + ) # type: ignore[operator] + if ( + isinstance(result, CompletedProcess) and result.returncode != 0 + ) or not result: + raise Exception(f"Failed mounting binfmt_misc to {binfmt_path}") def binfmt_register(arch: Arch, chroot: Optional[Chroot] = None): - binfmt_path = '/proc/sys/fs/binfmt_misc' - register_path = binfmt_path + '/register' - is_arch_known(arch, True, 'register') + binfmt_path = "/proc/sys/fs/binfmt_misc" + register_path = binfmt_path + "/register" + is_arch_known(arch, True, "register") qemu_arch = QEMU_ARCHES[arch] if binfmt_is_registered(arch, chroot=chroot): return @@ -95,22 +103,28 @@ def binfmt_register(arch: Arch, chroot: Optional[Chroot] = None): # https://en.wikipedia.org/wiki/Binfmt_misc # :name:type:offset:magic:mask:interpreter:flags info = lines[qemu_arch] - code = info['line'] + code = info["line"] if arch == os.uname().machine: - logging.fatal("Attempted to register qemu binfmt for host architecture, skipping!") + logging.fatal( + "Attempted to register qemu binfmt for host architecture, skipping!" + ) return # Register in binfmt_misc logging.info(f"Registering qemu binfmt ({arch})") - _runcmd(f'echo "{code}" > "{register_path}" 2>/dev/null') # use path without chroot path prefix + _runcmd( + f'echo "{code}" > "{register_path}" 2>/dev/null' + ) # use path without chroot path prefix if not binfmt_is_registered(arch, chroot=chroot): - logging.debug(f'binfmt line: {code}') - raise Exception(f'Failed to register qemu-user for {arch} with binfmt_misc, {binfmt_path}/{info["name"]} not found') + logging.debug(f"binfmt line: {code}") + raise Exception( + f"Failed to register qemu-user for {arch} with binfmt_misc, {binfmt_path}/{info['name']} not found" + ) def binfmt_unregister(arch, chroot: Optional[Chroot] = None): - is_arch_known(arch, True, 'unregister') + is_arch_known(arch, True, "unregister") qemu_arch = QEMU_ARCHES[arch] binfmt_ensure_mounted(chroot) binfmt_file = "/proc/sys/fs/binfmt_misc/qemu-" + qemu_arch @@ -122,4 +136,6 @@ def binfmt_unregister(arch, chroot: Optional[Chroot] = None): logging.info(f"Unregistering qemu binfmt ({arch})") run_root_cmd(f"echo -1 > {binfmt_file}") if binfmt_is_registered(arch, chroot=chroot): - raise Exception(f'Failed to UNregister qemu-user for {arch} with binfmt_misc, {chroot=}') + raise Exception( + f"Failed to UNregister qemu-user for {arch} with binfmt_misc, {chroot=}" + ) diff --git a/src/kupferbootstrap/binfmt/cli.py b/src/kupferbootstrap/binfmt/cli.py index fd33bbe..6a03e83 100644 --- a/src/kupferbootstrap/binfmt/cli.py +++ b/src/kupferbootstrap/binfmt/cli.py @@ -7,27 +7,41 @@ from kupferbootstrap.constants import Arch, ARCHES from .binfmt import binfmt_unregister, binfmt_is_registered -cmd_binfmt = click.Group('binfmt', help='Manage qemu binfmt for executing foreign architecture binaries') -arches_arg = click.argument('arches', type=click.Choice(ARCHES), nargs=-1, required=True) -arches_arg_optional = click.argument('arches', type=click.Choice(ARCHES), nargs=-1, required=False) +cmd_binfmt = click.Group( + "binfmt", + help="Manage qemu binfmt for executing foreign architecture binaries", +) +arches_arg = click.argument( + "arches", type=click.Choice(ARCHES), nargs=-1, required=True +) +arches_arg_optional = click.argument( + "arches", type=click.Choice(ARCHES), nargs=-1, required=False +) -@cmd_binfmt.command('register', help='Register a binfmt handler with the kernel') +@cmd_binfmt.command( + "register", help="Register a binfmt handler with the kernel" +) @arches_arg def cmd_register(arches: list[Arch], disable_chroot: bool = False): from ..packages.build import build_enable_qemu_binfmt + for arch in arches: build_enable_qemu_binfmt(arch) -@cmd_binfmt.command('unregister', help='Unregister a binfmt handler from the kernel') +@cmd_binfmt.command( + "unregister", help="Unregister a binfmt handler from the kernel" +) @arches_arg_optional def cmd_unregister(arches: Optional[list[Arch]]): for arch in arches or ARCHES: binfmt_unregister(arch) -@cmd_binfmt.command('status', help='Get the status of a binfmt handler from the kernel') +@cmd_binfmt.command( + "status", help="Get the status of a binfmt handler from the kernel" +) @arches_arg_optional def cmd_status(arches: Optional[list[Arch]]): for arch in arches or ARCHES: @@ -38,7 +52,9 @@ def cmd_status(arches: Optional[list[Arch]]): continue verb = click.style( "is" if active else "is NOT", - fg='green' if (active ^ native) else 'red', + fg="green" if (active ^ native) else "red", bold=True, ) - click.echo(f'Binfmt for {arch} {verb} set up! {"(host architecture!)" if native else ""}') + click.echo( + f"Binfmt for {arch} {verb} set up! {'(host architecture!)' if native else ''}" + ) diff --git a/src/kupferbootstrap/cache/cli.py b/src/kupferbootstrap/cache/cli.py index 1eedc9a..d816658 100644 --- a/src/kupferbootstrap/cache/cli.py +++ b/src/kupferbootstrap/cache/cli.py @@ -11,21 +11,39 @@ from kupferbootstrap.wrapper import enforce_wrap PATHS = list(CHROOT_PATHS.keys()) -@click.group(name='cache') +@click.group(name="cache") def cmd_cache(): """Clean various cache directories""" -@cmd_cache.command(name='clean') -@click.option('--force', is_flag=True, default=False, help="Don't ask for any confirmation") -@click.option('-n', '--noop', is_flag=True, default=False, help="Print what would be removed but dont execute") -@click.argument('paths', nargs=-1, type=click.Choice(['all'] + PATHS), required=False) +@cmd_cache.command(name="clean") +@click.option( + "--force", + is_flag=True, + default=False, + help="Don't ask for any confirmation", +) +@click.option( + "-n", + "--noop", + is_flag=True, + default=False, + help="Print what would be removed but dont execute", +) +@click.argument( + "paths", nargs=-1, type=click.Choice(["all"] + PATHS), required=False +) @click.pass_context -def cmd_clean(ctx: click.Context, paths: list[str], force: bool = False, noop: bool = False): +def cmd_clean( + ctx: click.Context, + paths: list[str], + force: bool = False, + noop: bool = False, +): """Clean various working directories""" - if unknown_paths := (set(paths) - set(PATHS + ['all'])): + if unknown_paths := (set(paths) - set(PATHS + ["all"])): raise Exception(f"Unknown paths: {' ,'.join(unknown_paths)}") - if 'all' in paths or (not paths and force): + if "all" in paths or (not paths and force): paths = PATHS.copy() enforce_wrap() @@ -33,19 +51,23 @@ def cmd_clean(ctx: click.Context, paths: list[str], force: bool = False, noop: b clear = {path: (path in paths) for path in PATHS} query = not paths if not query and not force: - click.confirm(f'Really clear {", ".join(paths)}?', abort=True) + click.confirm(f"Really clear {', '.join(paths)}?", abort=True) for path_name in PATHS: if query and not force: - clear[path_name] = click.confirm(f'{"(Noop) " if noop else ""}Clear {path_name}?') + clear[path_name] = click.confirm( + f"{'(Noop) ' if noop else ''}Clear {path_name}?" + ) if clear[path_name]: - logging.info(f'Clearing {path_name}') - if path_name == 'pkgbuilds': + logging.info(f"Clearing {path_name}") + if path_name == "pkgbuilds": ctx.invoke(cmd_clean_pkgbuilds, force=force, noop=noop) continue dir = config.get_path(path_name) for file in os.listdir(dir): path = os.path.join(dir, file) log = logging.info if noop else logging.debug - log(f'{"Would remove" if noop else "Removing"} "{path_name}/{file}"') + log( + f'{"Would remove" if noop else "Removing"} "{path_name}/{file}"' + ) if not noop: remove_file(path, recursive=True) diff --git a/src/kupferbootstrap/chroot/abstract.py b/src/kupferbootstrap/chroot/abstract.py index d01db77..3e5c3a2 100644 --- a/src/kupferbootstrap/chroot/abstract.py +++ b/src/kupferbootstrap/chroot/abstract.py @@ -11,13 +11,39 @@ from uuid import uuid4 from kupferbootstrap.config.state import config from kupferbootstrap.constants import Arch, CHROOT_PATHS, GCC_HOSTSPECS -from kupferbootstrap.distro.distro import get_base_distro, get_kupfer_local, RepoInfo -from kupferbootstrap.exec.cmd import FileDescriptor, run_root_cmd, generate_env_cmd, flatten_shell_script, wrap_in_bash, generate_cmd_su -from kupferbootstrap.exec.file import makedir, root_makedir, root_write_file, write_file +from kupferbootstrap.distro.distro import ( + get_base_distro, + get_kupfer_local, + RepoInfo, +) +from kupferbootstrap.exec.cmd import ( + FileDescriptor, + run_root_cmd, + generate_env_cmd, + flatten_shell_script, + wrap_in_bash, + generate_cmd_su, +) +from kupferbootstrap.exec.file import ( + makedir, + root_makedir, + root_write_file, + write_file, +) from kupferbootstrap.generator import generate_makepkg_conf -from kupferbootstrap.utils import mount, umount, check_findmnt, log_or_exception +from kupferbootstrap.utils import ( + mount, + umount, + check_findmnt, + log_or_exception, +) -from .helpers import BASE_CHROOT_PREFIX, BASIC_MOUNTS, base_chroot_name, make_abs_path +from .helpers import ( + BASE_CHROOT_PREFIX, + BASIC_MOUNTS, + base_chroot_name, + make_abs_path, +) class AbstractChroot(Protocol): @@ -42,7 +68,9 @@ class AbstractChroot(Protocol): ): pass - def initialize(self, reset: bool = False, fail_if_initialized: bool = False): + def initialize( + self, reset: bool = False, fail_if_initialized: bool = False + ): raise NotImplementedError() def activate(self, fail_if_active: bool): @@ -74,17 +102,18 @@ class AbstractChroot(Protocol): def mount_pkgbuilds(self, fail_if_mounted: bool): pass - def try_install_packages(self, packages: list[str], refresh: bool, allow_fail: bool) -> dict[str, Union[int, subprocess.CompletedProcess]]: + def try_install_packages( + self, packages: list[str], refresh: bool, allow_fail: bool + ) -> dict[str, Union[int, subprocess.CompletedProcess]]: pass class Chroot(AbstractChroot): - _copy_base: ClassVar[bool] = False copy_base: bool def __repr__(self): - return f'Chroot({self.name})' + return f"Chroot({self.name})" def __init__( self, @@ -92,36 +121,52 @@ class Chroot(AbstractChroot): arch: Arch, copy_base: Optional[bool] = None, extra_repos: Mapping[str, RepoInfo] = {}, - base_packages: list[str] = ['base', 'base-devel', 'git'], + base_packages: list[str] = ["base", "base-devel", "git"], path_override: Optional[str] = None, ): self.uuid = uuid4() if copy_base is None: - logging.debug(f'{name}: copy_base is none!') - copy_base = (name == base_chroot_name(arch)) + logging.debug(f"{name}: copy_base is none!") + copy_base = name == base_chroot_name(arch) self.active = False self.initialized = False self.active_mounts = list[str]() self.name = name self.arch = arch - self.path = path_override or os.path.join(config.get_path('chroots'), name) - self.copy_base = copy_base if copy_base is not None else self._copy_base + self.path = path_override or os.path.join( + config.get_path("chroots"), name + ) + self.copy_base = ( + copy_base if copy_base is not None else self._copy_base + ) self.extra_repos = deepcopy(extra_repos) self.base_packages = base_packages.copy() - if self.name.startswith(BASE_CHROOT_PREFIX) and set(get_kupfer_local(self.arch).repos).intersection(set(self.extra_repos)): - raise Exception(f'Base chroot {self.name} had local repos specified: {self.extra_repos}') + if self.name.startswith(BASE_CHROOT_PREFIX) and set( + get_kupfer_local(self.arch).repos + ).intersection(set(self.extra_repos)): + raise Exception( + f"Base chroot {self.name} had local repos specified: {self.extra_repos}" + ) - def create_rootfs(self, reset: bool, pacman_conf_target: str, active_previously: bool): + def create_rootfs( + self, reset: bool, pacman_conf_target: str, active_previously: bool + ): raise NotImplementedError() - def initialize(self, reset: bool = False, fail_if_initialized: bool = False): - pacman_conf_target = self.get_path('etc/pacman.conf') + def initialize( + self, reset: bool = False, fail_if_initialized: bool = False + ): + pacman_conf_target = self.get_path("etc/pacman.conf") if self.initialized and not reset: # chroot must have been initialized already! if fail_if_initialized: - raise Exception(f"Chroot {self.name} ({self.uuid}) is already initialized, this seems like a bug") - logging.debug(f"Base chroot {self.name} ({self.uuid}) already initialized") + raise Exception( + f"Chroot {self.name} ({self.uuid}) is already initialized, this seems like a bug" + ) + logging.debug( + f"Base chroot {self.name} ({self.uuid}) already initialized" + ) return active_previously = self.active @@ -132,7 +177,7 @@ class Chroot(AbstractChroot): def get_path(self, *joins: str) -> str: if joins: # no need to check for len(joins) > 1 because [1:] will just return [] - joins = (joins[0].lstrip('/'),) + joins[1:] + joins = (joins[0].lstrip("/"),) + joins[1:] return os.path.join(self.path, *joins) @@ -140,7 +185,7 @@ class Chroot(AbstractChroot): self, absolute_source: str, relative_destination: str, - options=['bind'], + options=["bind"], fs_type: Optional[str] = None, fail_if_mounted: bool = True, mkdir: bool = True, @@ -149,26 +194,46 @@ class Chroot(AbstractChroot): """returns the absolute path `relative_target` was mounted at""" def log_or_exc(msg): - log_or_exception(strict_cache_consistency, msg, log_level=logging.ERROR) + log_or_exception( + strict_cache_consistency, msg, log_level=logging.ERROR + ) - relative_destination = relative_destination.lstrip('/') + relative_destination = relative_destination.lstrip("/") absolute_destination = self.get_path(relative_destination) pseudo_absolute = make_abs_path(relative_destination) if check_findmnt(absolute_destination): if pseudo_absolute not in self.active_mounts: - raise Exception(f'{self.name}: We leaked the mount for {pseudo_absolute} ({absolute_destination}).') + raise Exception( + f"{self.name}: We leaked the mount for {pseudo_absolute} ({absolute_destination})." + ) elif fail_if_mounted: - raise Exception(f'{self.name}: {absolute_destination} is already mounted') - logging.debug(f'{self.name}: {absolute_destination} already mounted. Skipping.') + raise Exception( + f"{self.name}: {absolute_destination} is already mounted" + ) + logging.debug( + f"{self.name}: {absolute_destination} already mounted. Skipping." + ) else: if pseudo_absolute in self.active_mounts: - log_or_exc(f'{self.name}: Mount {pseudo_absolute} was in active_mounts but not actually mounted. ({absolute_destination})') + log_or_exc( + f"{self.name}: Mount {pseudo_absolute} was in active_mounts but not actually mounted. ({absolute_destination})" + ) if mkdir and os.path.isdir(absolute_source): root_makedir(absolute_destination) - result = mount(absolute_source, absolute_destination, options=options, fs_type=fs_type, register_unmount=False) + result = mount( + absolute_source, + absolute_destination, + options=options, + fs_type=fs_type, + register_unmount=False, + ) if result.returncode != 0: - raise Exception(f'{self.name}: failed to mount {absolute_source} to {absolute_destination}') - logging.debug(f'{self.name}: {absolute_source} successfully mounted to {absolute_destination}.') + raise Exception( + f"{self.name}: failed to mount {absolute_source} to {absolute_destination}" + ) + logging.debug( + f"{self.name}: {absolute_source} successfully mounted to {absolute_destination}." + ) self.active_mounts += [pseudo_absolute] atexit.register(self.deactivate) return absolute_destination @@ -178,7 +243,10 @@ class Chroot(AbstractChroot): return path = self.get_path(relative_path) result = umount(path) - if result.returncode == 0 and make_abs_path(relative_path) in self.active_mounts: + if ( + result.returncode == 0 + and make_abs_path(relative_path) in self.active_mounts + ): self.active_mounts.remove(relative_path) return result @@ -187,20 +255,26 @@ class Chroot(AbstractChroot): mounts = [make_abs_path(path) for path in relative_paths] mounts.sort(reverse=True) for mount in mounts: - if mount == '/proc': + if mount == "/proc": continue self.umount(mount) - if '/proc' in mounts: - self.umount('/proc') + if "/proc" in mounts: + self.umount("/proc") def activate(self, fail_if_active: bool = False): """mount /dev, /sys and /proc""" if self.active and fail_if_active: - raise Exception(f'chroot {self.name} already active!') + raise Exception(f"chroot {self.name} already active!") if not self.initialized: self.initialize(fail_if_initialized=False) for dst, opts in BASIC_MOUNTS.items(): - self.mount(opts['src'], dst, fs_type=opts['type'], options=opts['options'], fail_if_mounted=fail_if_active) + self.mount( + opts["src"], + dst, + fs_type=opts["type"], + options=opts["options"], + fail_if_mounted=fail_if_active, + ) self.active = True def deactivate_core(self): @@ -209,11 +283,21 @@ class Chroot(AbstractChroot): # additional mounts like crossdirect are intentionally left intact. Is such a chroot still `active` afterwards? self.active = False - def deactivate(self, fail_if_inactive: bool = False, ignore_rootfs: bool = False): + def deactivate( + self, fail_if_inactive: bool = False, ignore_rootfs: bool = False + ): if not self.active: if fail_if_inactive: - raise Exception(f"Chroot {self.name} not activated, can't deactivate!") - self.umount_many([mnt for mnt in self.active_mounts if mnt not in ['/', '/boot'] or not ignore_rootfs]) + raise Exception( + f"Chroot {self.name} not activated, can't deactivate!" + ) + self.umount_many( + [ + mnt + for mnt in self.active_mounts + if mnt not in ["/", "/boot"] or not ignore_rootfs + ] + ) self.active = False def run_cmd( @@ -230,38 +314,58 @@ class Chroot(AbstractChroot): switch_user: Optional[str] = None, ) -> Union[int, subprocess.CompletedProcess]: if not self.active and fail_inactive: - raise Exception(f'Chroot {self.name} is inactive, not running command! Hint: pass `fail_inactive=False`') + raise Exception( + f"Chroot {self.name} is inactive, not running command! Hint: pass `fail_inactive=False`" + ) if outer_env is None: outer_env = {} native = config.runtime.arch assert native - if self.arch != native and 'QEMU_LD_PREFIX' not in outer_env: + if self.arch != native and "QEMU_LD_PREFIX" not in outer_env: outer_env = dict(outer_env) # copy dict for modification - outer_env |= {'QEMU_LD_PREFIX': f'/usr/{GCC_HOSTSPECS[native][self.arch]}'} + outer_env |= { + "QEMU_LD_PREFIX": f"/usr/{GCC_HOSTSPECS[native][self.arch]}" + } env_cmd = generate_env_cmd(inner_env) if inner_env else [] if not isinstance(script, str) and isinstance(script, list): - script = flatten_shell_script(script, shell_quote_items=False, wrap_in_shell_quote=False) + script = flatten_shell_script( + script, shell_quote_items=False, wrap_in_shell_quote=False + ) if cwd: script = f"cd {shell_quote(cwd)} && ( {script} )" if switch_user: - inner_cmd = generate_cmd_su(script, switch_user=switch_user, elevation_method='none', force_su=True) + inner_cmd = generate_cmd_su( + script, + switch_user=switch_user, + elevation_method="none", + force_su=True, + ) else: inner_cmd = wrap_in_bash(script, flatten_result=False) - cmd = flatten_shell_script(['chroot', self.path] + env_cmd + inner_cmd, shell_quote_items=True) + cmd = flatten_shell_script( + ["chroot", self.path] + env_cmd + inner_cmd, shell_quote_items=True + ) - return run_root_cmd(cmd, env=outer_env, attach_tty=attach_tty, capture_output=capture_output, stdout=stdout, stderr=stderr) + return run_root_cmd( + cmd, + env=outer_env, + attach_tty=attach_tty, + capture_output=capture_output, + stdout=stdout, + stderr=stderr, + ) def mount_pkgbuilds(self, fail_if_mounted: bool = False) -> str: return self.mount( - absolute_source=config.get_path('pkgbuilds'), - relative_destination=CHROOT_PATHS['pkgbuilds'].lstrip('/'), + absolute_source=config.get_path("pkgbuilds"), + relative_destination=CHROOT_PATHS["pkgbuilds"].lstrip("/"), fail_if_mounted=fail_if_mounted, ) def mount_pacman_cache(self, fail_if_mounted: bool = False) -> str: - shared_cache = os.path.join(config.get_path('pacman'), self.arch) - rel_target = 'var/cache/pacman/pkg' + shared_cache = os.path.join(config.get_path("pacman"), self.arch) + rel_target = "var/cache/pacman/pkg" makedir(shared_cache) root_makedir(self.get_path(rel_target)) return self.mount( @@ -272,67 +376,95 @@ class Chroot(AbstractChroot): def mount_packages(self, fail_if_mounted: bool = False) -> str: return self.mount( - absolute_source=config.get_path('packages'), - relative_destination=CHROOT_PATHS['packages'].lstrip('/'), + absolute_source=config.get_path("packages"), + relative_destination=CHROOT_PATHS["packages"].lstrip("/"), fail_if_mounted=fail_if_mounted, ) def mount_chroots(self, fail_if_mounted: bool = False) -> str: return self.mount( - absolute_source=config.get_path('chroots'), - relative_destination=CHROOT_PATHS['chroots'].lstrip('/'), + absolute_source=config.get_path("chroots"), + relative_destination=CHROOT_PATHS["chroots"].lstrip("/"), fail_if_mounted=fail_if_mounted, ) - def write_makepkg_conf(self, target_arch: Arch, cross_chroot_relative: Optional[str], cross: bool = True) -> str: + def write_makepkg_conf( + self, + target_arch: Arch, + cross_chroot_relative: Optional[str], + cross: bool = True, + ) -> str: """ Generate a `makepkg.conf` or `makepkg_cross_$arch.conf` file in /etc. If `cross` is set makepkg will be configured to crosscompile for the foreign chroot at `cross_chroot_relative` Returns the relative (to `self.path`) path to the written file, e.g. `etc/makepkg_cross_aarch64.conf`. """ - makepkg_cross_conf = generate_makepkg_conf(target_arch, cross=cross, chroot=cross_chroot_relative) - filename = 'makepkg' + (f'_cross_{target_arch}' if cross else '') + '.conf' - makepkg_conf_path_relative = os.path.join('etc', filename) + makepkg_cross_conf = generate_makepkg_conf( + target_arch, cross=cross, chroot=cross_chroot_relative + ) + filename = ( + "makepkg" + (f"_cross_{target_arch}" if cross else "") + ".conf" + ) + makepkg_conf_path_relative = os.path.join("etc", filename) makepkg_conf_path = os.path.join(self.path, makepkg_conf_path_relative) - root_makedir(self.get_path('/etc')) + root_makedir(self.get_path("/etc")) root_write_file(makepkg_conf_path, makepkg_cross_conf) return makepkg_conf_path_relative - def write_pacman_conf(self, check_space: Optional[bool] = None, in_chroot: bool = True, absolute_path: Optional[str] = None): + def write_pacman_conf( + self, + check_space: Optional[bool] = None, + in_chroot: bool = True, + absolute_path: Optional[str] = None, + ): user = None group = None if check_space is None: check_space = config.file.pacman.check_space if not absolute_path: - path = self.get_path('/etc') + path = self.get_path("/etc") root_makedir(path) - absolute_path = os.path.join(path, 'pacman.conf') - user = 'root' - group = 'root' + absolute_path = os.path.join(path, "pacman.conf") + user = "root" + group = "root" repos = deepcopy(self.extra_repos) if not in_chroot: for repo in repos.values(): repo.url_template = repo.url_template.replace( - f'file://{CHROOT_PATHS["packages"]}', - f'file://{config.get_path("packages")}', + f"file://{CHROOT_PATHS['packages']}", + f"file://{config.get_path('packages')}", 1, ) - conf_text = get_base_distro(self.arch).get_pacman_conf(repos, check_space=check_space, in_chroot=in_chroot) + conf_text = get_base_distro(self.arch).get_pacman_conf( + repos, check_space=check_space, in_chroot=in_chroot + ) write_file(absolute_path, conf_text, user=user, group=group) def create_user( self, - user: str = 'kupfer', + user: str = "kupfer", password: Optional[str] = None, - groups: list[str] = ['network', 'video', 'audio', 'optical', 'storage', 'input', 'scanner', 'games', 'lp', 'rfkill', 'wheel'], - primary_group: Optional[str] = 'users', + groups: list[str] = [ + "network", + "video", + "audio", + "optical", + "storage", + "input", + "scanner", + "games", + "lp", + "rfkill", + "wheel", + ], + primary_group: Optional[str] = "users", uid: Optional[int] = None, non_unique: bool = False, ): - user = user or 'kupfer' - uid_param = f'-u {uid}' if uid is not None else '' - unique_param = '--non-unique' if non_unique else '' - pgroup_param = f'-g {primary_group}' if primary_group else '' + user = user or "kupfer" + uid_param = f"-u {uid}" if uid is not None else "" + unique_param = "--non-unique" if non_unique else "" + pgroup_param = f"-g {primary_group}" if primary_group else "" install_script = f''' set -e if ! id -u "{user}" >/dev/null 2>&1; then @@ -348,27 +480,51 @@ class Chroot(AbstractChroot): result = self.run_cmd(install_script) assert isinstance(result, subprocess.CompletedProcess) if result.returncode != 0: - raise Exception(f'Failed to setup user {user} in self.name') + raise Exception(f"Failed to setup user {user} in self.name") def get_uid(self, user: Union[str, int]) -> int: if isinstance(user, int): return user - if user == 'root': + if user == "root": return 0 - res = self.run_cmd(['id', '-u', user], capture_output=True) + res = self.run_cmd(["id", "-u", user], capture_output=True) assert isinstance(res, subprocess.CompletedProcess) if res.returncode or not res.stdout: - raise Exception(f"chroot {self.name}: Couldnt detect uid for user {user}: {repr(res.stdout)}") + raise Exception( + f"chroot {self.name}: Couldnt detect uid for user {user}: {repr(res.stdout)}" + ) uid = res.stdout.decode() return int(uid) - def add_sudo_config(self, config_name: str = 'wheel', privilegee: str = '%wheel', password_required: bool = True): - if '.' in config_name: - raise Exception(f"won't create sudoers.d file {config_name} since it will be ignored by sudo because it contains a dot!") - comment = ('# allow ' + (f'members of group {privilegee.strip("%")}' if privilegee.startswith('%') else f'user {privilegee}') + - 'to run any program as root' + ('' if password_required else ' without a password')) - line = privilegee + (' ALL=(ALL:ALL) ALL' if password_required else ' ALL=(ALL) NOPASSWD: ALL') - root_write_file(self.get_path(f'/etc/sudoers.d/{config_name}'), f'{comment}\n{line}') + def add_sudo_config( + self, + config_name: str = "wheel", + privilegee: str = "%wheel", + password_required: bool = True, + ): + if "." in config_name: + raise Exception( + f"won't create sudoers.d file {config_name} since it will be ignored by sudo because it contains a dot!" + ) + comment = ( + "# allow " + + ( + f"members of group {privilegee.strip('%')}" + if privilegee.startswith("%") + else f"user {privilegee}" + ) + + "to run any program as root" + + ("" if password_required else " without a password") + ) + line = privilegee + ( + " ALL=(ALL:ALL) ALL" + if password_required + else " ALL=(ALL) NOPASSWD: ALL" + ) + root_write_file( + self.get_path(f"/etc/sudoers.d/{config_name}"), + f"{comment}\n{line}", + ) def try_install_packages( self, @@ -381,16 +537,18 @@ class Chroot(AbstractChroot): results = {} stderr = sys.stdout if redirect_stderr else sys.stderr if refresh: - results['refresh'] = self.run_cmd('pacman -Syy --noconfirm', stderr=stderr) + results["refresh"] = self.run_cmd( + "pacman -Syy --noconfirm", stderr=stderr + ) cmd = "pacman -S --noconfirm --needed --overwrite='/*'" - result = self.run_cmd(f'{cmd} -y {" ".join(packages)}', stderr=stderr) + result = self.run_cmd(f"{cmd} -y {' '.join(packages)}", stderr=stderr) assert isinstance(result, subprocess.CompletedProcess) results |= {package: result for package in packages} if result.returncode != 0 and allow_fail: results = {} - logging.debug('Falling back to serial installation') + logging.debug("Falling back to serial installation") for pkg in set(packages): - results[pkg] = self.run_cmd(f'{cmd} {pkg}', stderr=stderr) + results[pkg] = self.run_cmd(f"{cmd} {pkg}", stderr=stderr) return results @@ -409,12 +567,12 @@ def get_chroot( global chroots if name not in chroots: chroot = chroot_class(name, **chroot_args) - logging.debug(f'Adding chroot {name} to chroot map: {chroot.uuid}') + logging.debug(f"Adding chroot {name} to chroot map: {chroot.uuid}") chroots[name] = chroot else: existing = chroots[name] if fail_if_exists: - raise Exception(f'chroot {name} already exists: {existing.uuid}') + raise Exception(f"chroot {name} already exists: {existing.uuid}") logging.debug(f"returning existing chroot {name}: {existing.uuid}") assert isinstance(existing, chroot_class) chroot = chroots[name] diff --git a/src/kupferbootstrap/chroot/base.py b/src/kupferbootstrap/chroot/base.py index 1d4bedb..5fa20b7 100644 --- a/src/kupferbootstrap/chroot/base.py +++ b/src/kupferbootstrap/chroot/base.py @@ -16,33 +16,34 @@ from .helpers import base_chroot_name class BaseChroot(Chroot): - _copy_base: ClassVar[bool] = False def create_rootfs(self, reset, pacman_conf_target, active_previously): if reset: - logging.info(f'Resetting {self.name}') - for dir in glob(os.path.join(self.path, '*')): + logging.info(f"Resetting {self.name}") + for dir in glob(os.path.join(self.path, "*")): rmtree(dir) - makedir(config.get_path('chroots')) + makedir(config.get_path("chroots")) root_makedir(self.get_path()) self.write_pacman_conf() self.mount_pacman_cache() - logging.info(f'Pacstrapping chroot {self.name}: {", ".join(self.base_packages)}') + logging.info( + f"Pacstrapping chroot {self.name}: {', '.join(self.base_packages)}" + ) result = run_root_cmd( [ - 'pacstrap', - '-C', + "pacstrap", + "-C", pacman_conf_target, - '-G', + "-G", self.path, *self.base_packages, - '--needed', - '--overwrite=*', - '-yyuu', + "--needed", + "--overwrite=*", + "-yyuu", ], stderr=sys.stdout, ) @@ -54,6 +55,8 @@ class BaseChroot(Chroot): def get_base_chroot(arch: Arch) -> BaseChroot: name = base_chroot_name(arch) args = dict(arch=arch, copy_base=False) - chroot = get_chroot(name, initialize=False, chroot_class=BaseChroot, chroot_args=args) + chroot = get_chroot( + name, initialize=False, chroot_class=BaseChroot, chroot_args=args + ) assert isinstance(chroot, BaseChroot) return chroot diff --git a/src/kupferbootstrap/chroot/build.py b/src/kupferbootstrap/chroot/build.py index 745dd0c..1750504 100644 --- a/src/kupferbootstrap/chroot/build.py +++ b/src/kupferbootstrap/chroot/build.py @@ -5,10 +5,21 @@ from glob import glob from typing import ClassVar, Optional from kupferbootstrap.config.state import config -from kupferbootstrap.constants import Arch, GCC_HOSTSPECS, CROSSDIRECT_PKGS, CHROOT_PATHS +from kupferbootstrap.constants import ( + Arch, + GCC_HOSTSPECS, + CROSSDIRECT_PKGS, + CHROOT_PATHS, +) from kupferbootstrap.distro.distro import get_kupfer_local from kupferbootstrap.exec.cmd import run_root_cmd -from kupferbootstrap.exec.file import makedir, remove_file, root_makedir, root_write_file, symlink +from kupferbootstrap.exec.file import ( + makedir, + remove_file, + root_makedir, + root_write_file, + symlink, +) from .abstract import Chroot, get_chroot from .helpers import build_chroot_name @@ -16,53 +27,68 @@ from .base import get_base_chroot class BuildChroot(Chroot): - _copy_base: ClassVar[bool] = True - def create_rootfs(self, reset: bool, pacman_conf_target: str, active_previously: bool): - makedir(config.get_path('chroots')) + def create_rootfs( + self, reset: bool, pacman_conf_target: str, active_previously: bool + ): + makedir(config.get_path("chroots")) root_makedir(self.get_path()) - if reset or not os.path.exists(self.get_path('usr/bin')): + if reset or not os.path.exists(self.get_path("usr/bin")): base_chroot = get_base_chroot(self.arch) if base_chroot == self: - raise Exception('base_chroot == self, bailing out. this is a bug') + raise Exception( + "base_chroot == self, bailing out. this is a bug" + ) base_chroot.initialize() - logging.info(f'Copying {base_chroot.name} chroot to {self.name}') - cmd = ['rsync', '-a', '--delete', '-q', '-W', '-x'] + logging.info(f"Copying {base_chroot.name} chroot to {self.name}") + cmd = ["rsync", "-a", "--delete", "-q", "-W", "-x"] for mountpoint in CHROOT_PATHS.values(): - cmd += ['--exclude', mountpoint.rstrip('/')] - cmd += [f'{base_chroot.path}/', f'{self.path}/'] + cmd += ["--exclude", mountpoint.rstrip("/")] + cmd += [f"{base_chroot.path}/", f"{self.path}/"] logging.debug(f"running rsync: {cmd}") result = run_root_cmd(cmd) if result.returncode != 0: - raise Exception(f'Failed to copy {base_chroot.name} to {self.name}') + raise Exception( + f"Failed to copy {base_chroot.name} to {self.name}" + ) else: - logging.debug(f'{self.name}: Reusing existing installation') + logging.debug(f"{self.name}: Reusing existing installation") - if set(get_kupfer_local(self.arch).repos).intersection(set(self.extra_repos)): + if set(get_kupfer_local(self.arch).repos).intersection( + set(self.extra_repos) + ): self.mount_packages() self.mount_pacman_cache() self.write_pacman_conf() self.initialized = True self.activate() - self.try_install_packages(self.base_packages, refresh=True, allow_fail=False) + self.try_install_packages( + self.base_packages, refresh=True, allow_fail=False + ) self.deactivate_core() # patch makepkg - with open(self.get_path('/usr/bin/makepkg'), 'r') as file: + with open(self.get_path("/usr/bin/makepkg"), "r") as file: data = file.read() - data = data.replace('EUID == 0', 'EUID == -1') - root_write_file(self.get_path('/usr/bin/makepkg'), data) + data = data.replace("EUID == 0", "EUID == -1") + root_write_file(self.get_path("/usr/bin/makepkg"), data) # configure makepkg - self.write_makepkg_conf(self.arch, cross_chroot_relative=None, cross=False) + self.write_makepkg_conf( + self.arch, cross_chroot_relative=None, cross=False + ) if active_previously: self.activate() - def mount_crossdirect(self, native_chroot: Optional[Chroot] = None, fail_if_mounted: bool = False): + def mount_crossdirect( + self, + native_chroot: Optional[Chroot] = None, + fail_if_mounted: bool = False, + ): """ mount `native_chroot` at `target_chroot`/native returns the absolute path that `native_chroot` has been mounted at. @@ -73,69 +99,94 @@ class BuildChroot(Chroot): native_chroot = get_build_chroot(config.runtime.arch) host_arch = native_chroot.arch hostspec = GCC_HOSTSPECS[host_arch][target_arch] - cc = f'{hostspec}-cc' - gcc = f'{hostspec}-gcc' + cc = f"{hostspec}-cc" + gcc = f"{hostspec}-gcc" - native_mount = os.path.join(self.path, 'native') - logging.debug(f'Activating crossdirect in {native_mount}') + native_mount = os.path.join(self.path, "native") + logging.debug(f"Activating crossdirect in {native_mount}") native_chroot.initialize() native_chroot.mount_pacman_cache() native_chroot.mount_packages() native_chroot.activate() logging.debug(f"Installing {CROSSDIRECT_PKGS=} + {gcc=}") - results = dict(native_chroot.try_install_packages( - CROSSDIRECT_PKGS + [gcc], - refresh=True, - allow_fail=False, - ),) + results = dict( + native_chroot.try_install_packages( + CROSSDIRECT_PKGS + [gcc], + refresh=True, + allow_fail=False, + ), + ) res_gcc = results[gcc] - res_crossdirect = results['crossdirect'] + res_crossdirect = results["crossdirect"] assert isinstance(res_gcc, subprocess.CompletedProcess) assert isinstance(res_crossdirect, subprocess.CompletedProcess) if res_gcc.returncode != 0: - logging.debug('Failed to install cross-compiler package {gcc}') + logging.debug("Failed to install cross-compiler package {gcc}") if res_crossdirect.returncode != 0: - raise Exception('Failed to install crossdirect') + raise Exception("Failed to install crossdirect") - cc_path = os.path.join(native_chroot.path, 'usr', 'bin', cc) - target_lib_dir = os.path.join(self.path, 'lib64') + cc_path = os.path.join(native_chroot.path, "usr", "bin", cc) + target_lib_dir = os.path.join(self.path, "lib64") # TODO: crosscompiler weirdness, find proper fix for /include instead of /usr/include - target_include_dir = os.path.join(self.path, 'include') + target_include_dir = os.path.join(self.path, "include") - for target, source in {cc_path: gcc, target_lib_dir: 'lib', target_include_dir: 'usr/include'}.items(): + for target, source in { + cc_path: gcc, + target_lib_dir: "lib", + target_include_dir: "usr/include", + }.items(): if not (os.path.exists(target) or os.path.islink(target)): - logging.debug(f'Symlinking {source=} at {target=}') + logging.debug(f"Symlinking {source=} at {target=}") symlink(source, target) - ld_so = os.path.basename(glob(f"{os.path.join(native_chroot.path, 'usr', 'lib', 'ld-linux-')}*")[0]) + ld_so = os.path.basename( + glob( + f"{os.path.join(native_chroot.path, 'usr', 'lib', 'ld-linux-')}*" + )[0] + ) ld_so_target = os.path.join(target_lib_dir, ld_so) if not os.path.islink(ld_so_target): - symlink(os.path.join('/native', 'usr', 'lib', ld_so), ld_so_target) + symlink(os.path.join("/native", "usr", "lib", ld_so), ld_so_target) else: - logging.debug(f'ld-linux.so symlink already exists, skipping for {self.name}') + logging.debug( + f"ld-linux.so symlink already exists, skipping for {self.name}" + ) # TODO: find proper fix - rustc = os.path.join(native_chroot.path, 'usr/lib/crossdirect', target_arch, 'rustc') + rustc = os.path.join( + native_chroot.path, "usr/lib/crossdirect", target_arch, "rustc" + ) if os.path.exists(rustc): - logging.debug('Disabling crossdirect rustc') + logging.debug("Disabling crossdirect rustc") remove_file(rustc) root_makedir(native_mount) - logging.debug(f'Mounting {native_chroot.name} to {native_mount}') - self.mount(native_chroot.path, 'native', fail_if_mounted=fail_if_mounted) + logging.debug(f"Mounting {native_chroot.name} to {native_mount}") + self.mount( + native_chroot.path, "native", fail_if_mounted=fail_if_mounted + ) return native_mount - def mount_crosscompile(self, foreign_chroot: Chroot, fail_if_mounted: bool = False): - mount_dest = os.path.join(CHROOT_PATHS['chroots'].lstrip('/'), os.path.basename(foreign_chroot.path)) + def mount_crosscompile( + self, foreign_chroot: Chroot, fail_if_mounted: bool = False + ): + mount_dest = os.path.join( + CHROOT_PATHS["chroots"].lstrip("/"), + os.path.basename(foreign_chroot.path), + ) return self.mount( absolute_source=foreign_chroot.path, relative_destination=mount_dest, fail_if_mounted=fail_if_mounted, ) - def mount_ccache(self, user: str = 'kupfer', fail_if_mounted: bool = False): - mount_source = os.path.join(config.get_path('ccache'), self.arch) - mount_dest = os.path.join(f'/home/{user}' if user != 'root' else '/root', '.ccache') + def mount_ccache( + self, user: str = "kupfer", fail_if_mounted: bool = False + ): + mount_source = os.path.join(config.get_path("ccache"), self.arch) + mount_dest = os.path.join( + f"/home/{user}" if user != "root" else "/root", ".ccache" + ) uid = self.get_uid(user) makedir(mount_source, user=uid) return self.mount( @@ -144,28 +195,42 @@ class BuildChroot(Chroot): fail_if_mounted=fail_if_mounted, ) - def mount_rust(self, user: str = 'kupfer', fail_if_mounted: bool = False) -> list[str]: + def mount_rust( + self, user: str = "kupfer", fail_if_mounted: bool = False + ) -> list[str]: results = [] uid = self.get_uid(user) - mount_source_base = config.get_path('rust') # apparently arch-agnostic - for rust_dir in ['cargo', 'rustup']: + mount_source_base = config.get_path("rust") # apparently arch-agnostic + for rust_dir in ["cargo", "rustup"]: mount_source = os.path.join(mount_source_base, rust_dir) - mount_dest = os.path.join(f'/home/{user}' if user != 'root' else '/root', f'.{rust_dir}') + mount_dest = os.path.join( + f"/home/{user}" if user != "root" else "/root", f".{rust_dir}" + ) makedir(mount_source, user=uid) - results.append(self.mount( - absolute_source=mount_source, - relative_destination=mount_dest, - fail_if_mounted=fail_if_mounted, - )) + results.append( + self.mount( + absolute_source=mount_source, + relative_destination=mount_dest, + fail_if_mounted=fail_if_mounted, + ) + ) return results -def get_build_chroot(arch: Arch, add_kupfer_repos: bool = True, **kwargs) -> BuildChroot: +def get_build_chroot( + arch: Arch, add_kupfer_repos: bool = True, **kwargs +) -> BuildChroot: name = build_chroot_name(arch) - if 'extra_repos' in kwargs: - raise Exception('extra_repos!') + if "extra_repos" in kwargs: + raise Exception("extra_repos!") repos = get_kupfer_local(arch).repos if add_kupfer_repos else {} args = dict(arch=arch) - chroot = get_chroot(name, **kwargs, extra_repos=repos, chroot_class=BuildChroot, chroot_args=args) + chroot = get_chroot( + name, + **kwargs, + extra_repos=repos, + chroot_class=BuildChroot, + chroot_args=args, + ) assert isinstance(chroot, BuildChroot) return chroot diff --git a/src/kupferbootstrap/chroot/cli.py b/src/kupferbootstrap/chroot/cli.py index 187cc06..ec713a4 100644 --- a/src/kupferbootstrap/chroot/cli.py +++ b/src/kupferbootstrap/chroot/cli.py @@ -12,25 +12,33 @@ from .abstract import Chroot from .base import get_base_chroot from .build import get_build_chroot, BuildChroot -CHROOT_TYPES = ['base', 'build', 'rootfs'] +CHROOT_TYPES = ["base", "build", "rootfs"] -@click.command('chroot') -@click.argument('type', required=False, type=click.Choice(CHROOT_TYPES), default='build') +@click.command("chroot") @click.argument( - 'name', + "type", required=False, type=click.Choice(CHROOT_TYPES), default="build" +) +@click.argument( + "name", required=False, default=None, ) @click.pass_context -def cmd_chroot(ctx: click.Context, type: str = 'build', name: Optional[str] = None, enable_crossdirect=True): +def cmd_chroot( + ctx: click.Context, + type: str = "build", + name: Optional[str] = None, + enable_crossdirect=True, +): """Open a shell in a chroot. For rootfs NAME is a profile name, for others the architecture (e.g. aarch64).""" if type not in CHROOT_TYPES: raise Exception(f'Unknown chroot type: "{type}"') - if type == 'rootfs': + if type == "rootfs": from ..image.image import cmd_inspect + assert isinstance(cmd_inspect, click.Command) ctx.invoke(cmd_inspect, profile=name, shell=True) return @@ -42,26 +50,30 @@ def cmd_chroot(ctx: click.Context, type: str = 'build', name: Optional[str] = No if not arch: arch = get_profile_device().arch assert arch - if type == 'base': + if type == "base": chroot = get_base_chroot(arch) - if not os.path.exists(chroot.get_path('/bin')): + if not os.path.exists(chroot.get_path("/bin")): chroot.initialize() chroot.initialized = True - elif type == 'build': + elif type == "build": build_chroot: BuildChroot = get_build_chroot(arch, activate=True) chroot = build_chroot # type safety - if not os.path.exists(build_chroot.get_path('/bin')): + if not os.path.exists(build_chroot.get_path("/bin")): build_chroot.initialize() build_chroot.initialized = True build_chroot.mount_pkgbuilds() build_chroot.mount_chroots() assert arch and config.runtime.arch - if config.file.build.crossdirect and enable_crossdirect and arch != config.runtime.arch: + if ( + config.file.build.crossdirect + and enable_crossdirect + and arch != config.runtime.arch + ): build_chroot.mount_crossdirect() else: - raise Exception('Really weird bug') + raise Exception("Really weird bug") chroot.mount_packages() chroot.activate() - logging.debug(f'Starting shell in {chroot.name}:') - chroot.run_cmd('bash', attach_tty=True) + logging.debug(f"Starting shell in {chroot.name}:") + chroot.run_cmd("bash", attach_tty=True) diff --git a/src/kupferbootstrap/chroot/device.py b/src/kupferbootstrap/chroot/device.py index 56de30b..0868e52 100644 --- a/src/kupferbootstrap/chroot/device.py +++ b/src/kupferbootstrap/chroot/device.py @@ -16,38 +16,62 @@ from .abstract import get_chroot class DeviceChroot(BuildChroot): - _copy_base: ClassVar[bool] = False def create_rootfs(self, reset, pacman_conf_target, active_previously): - makedir(config.get_path('chroots')) + makedir(config.get_path("chroots")) root_makedir(self.get_path()) if not self.copy_base: - pacman_conf_target = os.path.join(get_temp_dir(register_cleanup=True), f'pacman-{self.name}.conf') - self.write_pacman_conf(in_chroot=False, absolute_path=pacman_conf_target) - BaseChroot.create_rootfs(cast(BaseChroot, self), reset, pacman_conf_target, active_previously) + pacman_conf_target = os.path.join( + get_temp_dir(register_cleanup=True), f"pacman-{self.name}.conf" + ) + self.write_pacman_conf( + in_chroot=False, absolute_path=pacman_conf_target + ) + BaseChroot.create_rootfs( + cast(BaseChroot, self), + reset, + pacman_conf_target, + active_previously, + ) else: - BuildChroot.create_rootfs(self, reset, pacman_conf_target, active_previously) + BuildChroot.create_rootfs( + self, reset, pacman_conf_target, active_previously + ) - def mount_rootfs(self, source_path: str, fs_type: Optional[str] = None, options: list[str] = [], allow_overlay: bool = False): + def mount_rootfs( + self, + source_path: str, + fs_type: Optional[str] = None, + options: list[str] = [], + allow_overlay: bool = False, + ): if self.active: - raise Exception(f'{self.name}: Chroot is marked as active, not mounting a rootfs over it.') + raise Exception( + f"{self.name}: Chroot is marked as active, not mounting a rootfs over it." + ) if not os.path.exists(source_path): - raise Exception('Source does not exist') + raise Exception("Source does not exist") if not allow_overlay: really_active = [] for mnt in self.active_mounts: if check_findmnt(self.get_path(mnt)): really_active.append(mnt) if really_active: - raise Exception(f'{self.name}: Chroot has submounts active: {really_active}') + raise Exception( + f"{self.name}: Chroot has submounts active: {really_active}" + ) if os.path.ismount(self.path): - raise Exception(f'{self.name}: There is already something mounted at {self.path}, not mounting over it.') - if os.path.exists(os.path.join(self.path, 'usr/bin')): - raise Exception(f'{self.name}: {self.path}/usr/bin exists, not mounting over existing rootfs.') + raise Exception( + f"{self.name}: There is already something mounted at {self.path}, not mounting over it." + ) + if os.path.exists(os.path.join(self.path, "usr/bin")): + raise Exception( + f"{self.name}: {self.path}/usr/bin exists, not mounting over existing rootfs." + ) makedir(self.path) atexit.register(self.deactivate) - self.mount(source_path, '/', fs_type=fs_type, options=options) + self.mount(source_path, "/", fs_type=fs_type, options=options) def get_device_chroot( @@ -59,12 +83,22 @@ def get_device_chroot( extra_repos: Optional[dict[str, RepoInfo]] = None, **kwargs, ) -> DeviceChroot: - name = f'rootfs_{device}-{flavour}' - repos: dict[str, RepoInfo] = get_kupfer_local(arch).repos if use_local_repos else get_kupfer_https(arch).repos # type: ignore + name = f"rootfs_{device}-{flavour}" + repos: dict[str, RepoInfo] = ( + get_kupfer_local(arch).repos + if use_local_repos + else get_kupfer_https(arch).repos + ) # type: ignore repos.update(extra_repos or {}) args = dict(arch=arch, base_packages=packages, extra_repos=repos) - chroot = get_chroot(name, **kwargs, extra_repos=repos, chroot_class=DeviceChroot, chroot_args=args) + chroot = get_chroot( + name, + **kwargs, + extra_repos=repos, + chroot_class=DeviceChroot, + chroot_args=args, + ) assert isinstance(chroot, DeviceChroot) return chroot diff --git a/src/kupferbootstrap/chroot/helpers.py b/src/kupferbootstrap/chroot/helpers.py index 18bb982..c1ce273 100644 --- a/src/kupferbootstrap/chroot/helpers.py +++ b/src/kupferbootstrap/chroot/helpers.py @@ -4,9 +4,9 @@ from typing import Optional, TypedDict from kupferbootstrap.config.state import config from kupferbootstrap.constants import Arch -BIND_BUILD_DIRS = 'BINDBUILDDIRS' -BASE_CHROOT_PREFIX = 'base_' -BUILD_CHROOT_PREFIX = 'build_' +BIND_BUILD_DIRS = "BINDBUILDDIRS" +BASE_CHROOT_PREFIX = "base_" +BUILD_CHROOT_PREFIX = "build_" class MountEntry(TypedDict): @@ -18,51 +18,57 @@ class MountEntry(TypedDict): # inspired by arch-chroot # order of these matters! BASIC_MOUNTS: dict[str, MountEntry] = { - '/proc': { - 'src': 'proc', - 'type': 'proc', - 'options': ['nosuid,noexec,nodev'] + "/proc": { + "src": "proc", + "type": "proc", + "options": ["nosuid,noexec,nodev"], }, - '/sys': { - 'src': 'sys', - 'type': 'sysfs', - 'options': ['nosuid,noexec,nodev,ro'], + "/sys": { + "src": "sys", + "type": "sysfs", + "options": ["nosuid,noexec,nodev,ro"], }, - '/dev': { - 'src': 'udev', - 'type': 'devtmpfs', - 'options': ['mode=0755,nosuid'], + "/dev": { + "src": "udev", + "type": "devtmpfs", + "options": ["mode=0755,nosuid"], }, - '/dev/pts': { - 'src': 'devpts', - 'type': 'devpts', - 'options': ['mode=0620,gid=5,nosuid,noexec'], + "/dev/pts": { + "src": "devpts", + "type": "devpts", + "options": ["mode=0620,gid=5,nosuid,noexec"], }, - '/dev/shm': { - 'src': 'shm', - 'type': 'tmpfs', - 'options': ['mode=1777,nosuid,nodev'], + "/dev/shm": { + "src": "shm", + "type": "tmpfs", + "options": ["mode=1777,nosuid,nodev"], }, - '/run': { - 'src': '/run', - 'type': 'tmpfs', - 'options': ['bind'], + "/run": { + "src": "/run", + "type": "tmpfs", + "options": ["bind"], }, - '/etc/resolv.conf': { - 'src': os.path.realpath('/etc/resolv.conf'), - 'type': None, - 'options': ['bind'], + "/etc/resolv.conf": { + "src": os.path.realpath("/etc/resolv.conf"), + "type": None, + "options": ["bind"], }, } def make_abs_path(path: str) -> str: """Simply ensures the path string starts with a '/'. Does no disk modifications!""" - return '/' + path.lstrip('/') + return "/" + path.lstrip("/") -def get_chroot_path(chroot_name, override_basepath: Optional[str] = None) -> str: - base_path = config.get_path('chroots') if not override_basepath else override_basepath +def get_chroot_path( + chroot_name, override_basepath: Optional[str] = None +) -> str: + base_path = ( + config.get_path("chroots") + if not override_basepath + else override_basepath + ) return os.path.join(base_path, chroot_name) diff --git a/src/kupferbootstrap/config/cli.py b/src/kupferbootstrap/config/cli.py index 0204ad3..779f19e 100644 --- a/src/kupferbootstrap/config/cli.py +++ b/src/kupferbootstrap/config/cli.py @@ -6,24 +6,33 @@ from typing import Any, Callable, Iterable, Mapping, Optional, Union from kupferbootstrap.devices.device import get_devices, sanitize_device_name from kupferbootstrap.flavours.flavour import get_flavours -from kupferbootstrap.utils import color_bold, colors_supported, color_mark_selected +from kupferbootstrap.utils import ( + color_bold, + colors_supported, + color_mark_selected, +) from kupferbootstrap.wrapper import execute_without_exit from .scheme import Profile -from .profile import PROFILE_EMPTY, PROFILE_DEFAULTS, resolve_profile_attr, SparseProfile +from .profile import ( + PROFILE_EMPTY, + PROFILE_DEFAULTS, + resolve_profile_attr, + SparseProfile, +) from .state import config, CONFIG_DEFAULTS, CONFIG_SECTIONS, merge_configs -def list_to_comma_str(str_list: list[str], default='') -> str: +def list_to_comma_str(str_list: list[str], default="") -> str: if str_list is None: return default - return ','.join(str_list) + return ",".join(str_list) def comma_str_to_list(s: str, default=None) -> list[str]: if not s: return default - return [a for a in s.split(',') if a] + return [a for a in s.split(",") if a] def prompt_config( @@ -44,19 +53,23 @@ def prompt_config( def true_or_zero(to_check) -> bool: """returns true if the value is truthy or int(0)""" zero = 0 # compiler complains about 'is with literal' otherwise - return to_check or to_check is zero # can't do == due to boolean<->int casting + return ( + to_check or to_check is zero + ) # can't do == due to boolean<->int casting if type(None) == field_type: field_type = str if field_type == dict: - raise Exception('Dictionaries not supported by config_prompt, this is likely a bug in kupferbootstrap') + raise Exception( + "Dictionaries not supported by config_prompt, this is likely a bug in kupferbootstrap" + ) elif field_type == list: default = list_to_comma_str(default) value_conv = comma_str_to_list else: value_conv = None - default = '' if default is None else default + default = "" if default is None else default if bold: text = click.style(text, bold=True) @@ -69,7 +82,9 @@ def prompt_config( show_default=True, show_choices=show_choices, ) # type: ignore - changed = result != (original_default if field_type == list else default) and (true_or_zero(default) or true_or_zero(result)) + changed = result != ( + original_default if field_type == list else default + ) and (true_or_zero(default) or true_or_zero(result)) if changed and echo_changes: print(f'value changed: "{text}" = "{result}"') return result, changed @@ -82,33 +97,37 @@ def prompt_profile( no_parse: bool = True, ) -> tuple[Profile, bool]: """Prompts the user for every field in `defaults`. Set values to None for an empty profile.""" - PARSEABLE_FIELDS = ['device', 'flavour'] + PARSEABLE_FIELDS = ["device", "flavour"] profile: Any = PROFILE_EMPTY | defaults - if name == 'current': + if name == "current": raise Exception("profile name 'current' not allowed") # don't use get_profile() here because we need the sparse profile if name in config.file.profiles: logging.debug(f"Merging with existing profile config for {name}") profile |= config.file.profiles[name] elif create: - logging.info(f"Profile {name} doesn't exist yet, creating new profile.") + logging.info( + f"Profile {name} doesn't exist yet, creating new profile." + ) else: raise Exception(f'Unknown profile "{name}"') logging.info(f'Configuring profile "{name}"') changed = False for key, current in profile.items(): current = profile[key] - text = f'profiles.{name}.{key}' + text = f"profiles.{name}.{key}" if not no_parse and key in PARSEABLE_FIELDS: parse_prompt = None sanitize_func = None - if key == 'device': + if key == "device": parse_prompt = prompt_profile_device sanitize_func = sanitize_device_name - elif key == 'flavour': + elif key == "flavour": parse_prompt = prompt_profile_flavour else: - raise Exception(f'config: Unhandled parseable field {key}, this is a bug in kupferbootstrap.') + raise Exception( + f"config: Unhandled parseable field {key}, this is a bug in kupferbootstrap." + ) result, _changed = parse_prompt( current=current, profile_name=name, @@ -117,17 +136,32 @@ def prompt_profile( sanitize_func=sanitize_func, ) # type: ignore else: - result, _changed = prompt_config(text=text, default=current, field_type=type(PROFILE_DEFAULTS[key])) # type: ignore + result, _changed = prompt_config( + text=text, + default=current, + field_type=type(PROFILE_DEFAULTS[key]), + ) # type: ignore if _changed: profile[key] = result changed = True return profile, changed -def prompt_choice(current: Optional[Any], key: str, choices: Iterable[Any], allow_none: bool = True, show_choices: bool = False) -> tuple[Any, bool]: - choices = list(choices) + ([''] if allow_none else []) - res, _ = prompt_config(text=key, default=current, field_type=click.Choice(choices), show_choices=show_choices) - if allow_none and res == '': +def prompt_choice( + current: Optional[Any], + key: str, + choices: Iterable[Any], + allow_none: bool = True, + show_choices: bool = False, +) -> tuple[Any, bool]: + choices = list(choices) + ([""] if allow_none else []) + res, _ = prompt_config( + text=key, + default=current, + field_type=click.Choice(choices), + show_choices=show_choices, + ) + if allow_none and res == "": res = None return res, res != current @@ -152,49 +186,74 @@ def prompt_wrappable( ) -> tuple[str, bool]: use_colors = colors_supported(use_colors) - print(color_bold(f"Pick your {attr_name}!\nThese are the available choices:", use_colors=use_colors)) + print( + color_bold( + f"Pick your {attr_name}!\nThese are the available choices:", + use_colors=use_colors, + ) + ) items = execute_without_exit(native_cmd, cli_cmd) if items is None: - logging.warning("(wrapper mode, input for this field will not be checked for correctness)") - return prompt_config(text=f'profiles.{profile_name}.{attr_name}', default=current) - selected, inherited_from = resolve_profile_field(current, profile_name, attr_name, sparse_profiles) + logging.warning( + "(wrapper mode, input for this field will not be checked for correctness)" + ) + return prompt_config( + text=f"profiles.{profile_name}.{attr_name}", default=current + ) + selected, inherited_from = resolve_profile_field( + current, profile_name, attr_name, sparse_profiles + ) if selected and sanitize_func: selected = sanitize_func(selected) for key in sorted(items.keys()): text = items[key].nice_str(newlines=True, colors=use_colors) if key == selected: text = color_mark_selected(text, profile_name, inherited_from) - print(text + '\n') - return prompt_choice(current, f'profiles.{profile_name}.{attr_name}', items.keys()) + print(text + "\n") + return prompt_choice( + current, f"profiles.{profile_name}.{attr_name}", items.keys() + ) def prompt_profile_device(*kargs, **kwargs) -> tuple[str, bool]: - return prompt_wrappable('device', get_devices, ['devices'], *kargs, **kwargs) + return prompt_wrappable( + "device", get_devices, ["devices"], *kargs, **kwargs + ) def prompt_profile_flavour(*kargs, **kwargs) -> tuple[str, bool]: - return prompt_wrappable('flavour', get_flavours, ['flavours'], *kargs, **kwargs) + return prompt_wrappable( + "flavour", get_flavours, ["flavours"], *kargs, **kwargs + ) -def config_dot_name_get(name: str, config: dict[str, Any], prefix: str = '') -> Any: +def config_dot_name_get( + name: str, config: dict[str, Any], prefix: str = "" +) -> Any: if not isinstance(config, dict): - raise Exception(f"Couldn't resolve config name: passed config is not a dict: {repr(config)}") - split_name = name.split('.') + raise Exception( + f"Couldn't resolve config name: passed config is not a dict: {repr(config)}" + ) + split_name = name.split(".") name = split_name[0] if name not in config: - raise Exception(f"Couldn't resolve config name: key {prefix + name} not found") + raise Exception( + f"Couldn't resolve config name: key {prefix + name} not found" + ) value = config[name] if len(split_name) == 1: return value else: - rest_name = '.'.join(split_name[1:]) - return config_dot_name_get(name=rest_name, config=value, prefix=prefix + name + '.') + rest_name = ".".join(split_name[1:]) + return config_dot_name_get( + name=rest_name, config=value, prefix=prefix + name + "." + ) def config_dot_name_set(name: str, value: Any, config: dict[str, Any]): - split_name = name.split('.') + split_name = name.split(".") if len(split_name) > 1: - config = config_dot_name_get('.'.join(split_name[:-1]), config) + config = config_dot_name_get(".".join(split_name[:-1]), config) config[split_name[-1]] = value @@ -206,11 +265,17 @@ def prompt_for_save(retry_ctx: Optional[click.Context] = None): False will still be returned as the retry is expected to either save, perform another retry or arbort. """ from ..wrapper import is_wrapped - if click.confirm(f'Do you want to save your changes to {config.runtime.config_file}?', default=True): + + if click.confirm( + f"Do you want to save your changes to {config.runtime.config_file}?", + default=True, + ): if is_wrapped(): - logging.warning("Writing to config file inside wrapper." - "This is pointless and probably a bug." - "Your host config file will not be modified.") + logging.warning( + "Writing to config file inside wrapper." + "This is pointless and probably a bug." + "Your host config file will not be modified." + ) return True if retry_ctx: if click.confirm('Retry? ("n" to quit without saving)', default=True): @@ -219,32 +284,40 @@ def prompt_for_save(retry_ctx: Optional[click.Context] = None): config_option = click.option( - '-C', - '--config', - 'config_file', - help='Override path to config file', + "-C", + "--config", + "config_file", + help="Override path to config file", ) -@click.group(name='config') +@click.group(name="config") def cmd_config(): """Manage the configuration and -profiles""" -noninteractive_flag = click.option('-N', '--non-interactive', is_flag=True) -noop_flag = click.option('--noop', '-n', help="Don't write changes to file", is_flag=True) -noparse_flag = click.option('--no-parse', help="Don't search PKGBUILDs for devices and flavours", is_flag=True) +noninteractive_flag = click.option("-N", "--non-interactive", is_flag=True) +noop_flag = click.option( + "--noop", "-n", help="Don't write changes to file", is_flag=True +) +noparse_flag = click.option( + "--no-parse", + help="Don't search PKGBUILDs for devices and flavours", + is_flag=True, +) -CONFIG_MSG = ("Leave fields empty to leave them at their currently displayed value.") +CONFIG_MSG = ( + "Leave fields empty to leave them at their currently displayed value." +) -@cmd_config.command(name='init') +@cmd_config.command(name="init") @noninteractive_flag @noop_flag @noparse_flag @click.option( - '--sections', - '-s', + "--sections", + "-s", multiple=True, type=click.Choice(CONFIG_SECTIONS), default=CONFIG_SECTIONS, @@ -264,14 +337,18 @@ def cmd_config_init( results: dict[str, dict] = {} for section in sections: if section not in CONFIG_SECTIONS: - raise Exception(f'Unknown section: {section}') - if section == 'profiles': + raise Exception(f"Unknown section: {section}") + if section == "profiles": continue results[section] = {} for key, current in config.file[section].items(): - text = f'{section}.{key}' - result, changed = prompt_config(text=text, default=current, field_type=type(CONFIG_DEFAULTS[section][key])) + text = f"{section}.{key}" + result, changed = prompt_config( + text=text, + default=current, + field_type=type(CONFIG_DEFAULTS[section][key]), + ) if changed: results[section][key] = result @@ -282,11 +359,19 @@ def cmd_config_init( config.write() else: return - if 'profiles' in sections: + if "profiles" in sections: print("Configuring profiles") - current_profile = 'default' if 'current' not in config.file.profiles else config.file.profiles.current - new_current, _ = prompt_config('profiles.current', default=current_profile, field_type=str) - profile, changed = prompt_profile(new_current, create=True, no_parse=no_parse) + current_profile = ( + "default" + if "current" not in config.file.profiles + else config.file.profiles.current + ) + new_current, _ = prompt_config( + "profiles.current", default=current_profile, field_type=str + ) + profile, changed = prompt_profile( + new_current, create=True, no_parse=no_parse + ) config.update_profile(new_current, profile) if not noop: if not prompt_for_save(ctx): @@ -295,16 +380,24 @@ def cmd_config_init( if not noop: config.write() else: - logging.info(f'--noop passed, not writing to {config.runtime.config_file}!') + logging.info( + f"--noop passed, not writing to {config.runtime.config_file}!" + ) -@cmd_config.command(name='set') +@cmd_config.command(name="set") @noninteractive_flag @noop_flag @noparse_flag -@click.argument('key_vals', nargs=-1) +@click.argument("key_vals", nargs=-1) @click.pass_context -def cmd_config_set(ctx, key_vals: list[str], non_interactive: bool = False, noop: bool = False, no_parse: bool = False): +def cmd_config_set( + ctx, + key_vals: list[str], + non_interactive: bool = False, + noop: bool = False, + no_parse: bool = False, +): """ Set config entries. Pass entries as `key=value` pairs, with keys as dot-separated identifiers, like `build.clean_mode=false` or alternatively just keys to get prompted if run interactively. @@ -313,7 +406,7 @@ def cmd_config_set(ctx, key_vals: list[str], non_interactive: bool = False, noop logging.info(CONFIG_MSG) config_copy = deepcopy(config.file) for pair in key_vals: - split_pair = pair.split('=') + split_pair = pair.split("=") if len(split_pair) == 2: key: str = split_pair[0] value: Any = split_pair[1] @@ -326,12 +419,20 @@ def cmd_config_set(ctx, key_vals: list[str], non_interactive: bool = False, noop key = split_pair[0] value_type = type(config_dot_name_get(key, CONFIG_DEFAULTS)) current = config_dot_name_get(key, config.file) - value, _ = prompt_config(text=key, default=current, field_type=value_type, echo_changes=False) + value, _ = prompt_config( + text=key, + default=current, + field_type=value_type, + echo_changes=False, + ) else: raise Exception(f'Invalid key=value pair "{pair}"') - print('%s = %s' % (key, value)) + print("%s = %s" % (key, value)) config_dot_name_set(key, value, config_copy) - if merge_configs(config_copy, warn_missing_defaultprofile=False) != config_copy: + if ( + merge_configs(config_copy, warn_missing_defaultprofile=False) + != config_copy + ): raise Exception('Config "{key}" = "{value}" failed to evaluate') if not noop: if not non_interactive and not prompt_for_save(ctx): @@ -340,8 +441,8 @@ def cmd_config_set(ctx, key_vals: list[str], non_interactive: bool = False, noop config.write() -@cmd_config.command(name='get') -@click.argument('keys', nargs=-1) +@cmd_config.command(name="get") +@click.argument("keys", nargs=-1) def cmd_config_get(keys: list[str]): """Get config entries. Get entries for keys passed as dot-separated identifiers, like `build.clean_mode`""" @@ -349,24 +450,30 @@ def cmd_config_get(keys: list[str]): print(config_dot_name_get(keys[0], config.file)) return for key in keys: - print('%s = %s' % (key, config_dot_name_get(key, config.file))) + print("%s = %s" % (key, config_dot_name_get(key, config.file))) -@cmd_config.group(name='profile') +@cmd_config.group(name="profile") def cmd_profile(): """Manage config profiles""" -@cmd_profile.command(name='init') +@cmd_profile.command(name="init") @noninteractive_flag @noop_flag @noparse_flag -@click.argument('name', required=False) +@click.argument("name", required=False) @click.pass_context -def cmd_profile_init(ctx, name: Optional[str] = None, non_interactive: bool = False, noop: bool = False, no_parse: bool = False): +def cmd_profile_init( + ctx, + name: Optional[str] = None, + non_interactive: bool = False, + noop: bool = False, + no_parse: bool = False, +): """Create or edit a profile""" profile = deepcopy(PROFILE_EMPTY) - if name == 'current': + if name == "current": raise Exception("profile name 'current' not allowed") logging.info(CONFIG_MSG) name = name or config.file.profiles.current @@ -374,7 +481,9 @@ def cmd_profile_init(ctx, name: Optional[str] = None, non_interactive: bool = Fa profile |= config.file.profiles[name] if not non_interactive: - profile, _changed = prompt_profile(name, create=True, no_parse=no_parse) + profile, _changed = prompt_profile( + name, create=True, no_parse=no_parse + ) config.update_profile(name, profile) if not noop: @@ -384,4 +493,6 @@ def cmd_profile_init(ctx, name: Optional[str] = None, non_interactive: bool = Fa config.write() else: - logging.info(f'--noop passed, not writing to {config.runtime.config_file}!') + logging.info( + f"--noop passed, not writing to {config.runtime.config_file}!" + ) diff --git a/src/kupferbootstrap/config/profile.py b/src/kupferbootstrap/config/profile.py index ff0ba95..f58d1b3 100644 --- a/src/kupferbootstrap/config/profile.py +++ b/src/kupferbootstrap/config/profile.py @@ -6,15 +6,15 @@ from typing import Optional from .scheme import Profile, SparseProfile PROFILE_DEFAULTS_DICT = { - 'parent': '', - 'device': '', - 'flavour': '', - 'pkgs_include': [], - 'pkgs_exclude': [], - 'hostname': 'kupfer', - 'username': 'kupfer', - 'password': None, - 'size_extra_mb': "0", + "parent": "", + "device": "", + "flavour": "", + "pkgs_include": [], + "pkgs_exclude": [], + "hostname": "kupfer", + "username": "kupfer", + "password": None, + "size_extra_mb": "0", } PROFILE_DEFAULTS = Profile.fromDict(PROFILE_DEFAULTS_DICT) @@ -45,38 +45,51 @@ def resolve_profile( resolved = dict[str, Profile]() if name in _visited: loop = list(_visited) - raise Exception(f'Dependency loop detected in profiles: {" -> ".join(loop+[loop[0]])}') + raise Exception( + f"Dependency loop detected in profiles: {' -> '.join(loop + [loop[0]])}" + ) if name in resolved: return resolved - logging.debug(f'Resolving profile {name}') + logging.debug(f"Resolving profile {name}") _visited.append(name) sparse = sparse_profiles[name].copy() full = deepcopy(sparse) - if name != 'default' and 'parent' not in sparse: - sparse['parent'] = 'default' - if 'parent' in sparse and (parent_name := sparse['parent']): - parent = resolve_profile(name=parent_name, sparse_profiles=sparse_profiles, resolved=resolved, _visited=_visited)[parent_name] + if name != "default" and "parent" not in sparse: + sparse["parent"] = "default" + if "parent" in sparse and (parent_name := sparse["parent"]): + parent = resolve_profile( + name=parent_name, + sparse_profiles=sparse_profiles, + resolved=resolved, + _visited=_visited, + )[parent_name] full = parent | sparse # add up size_extra_mb - if 'size_extra_mb' in sparse: - size = sparse['size_extra_mb'] - if isinstance(size, str) and size.startswith('+'): - full['size_extra_mb'] = int(parent.get('size_extra_mb', 0)) + int(size.lstrip('+')) + if "size_extra_mb" in sparse: + size = sparse["size_extra_mb"] + if isinstance(size, str) and size.startswith("+"): + full["size_extra_mb"] = int( + parent.get("size_extra_mb", 0) + ) + int(size.lstrip("+")) else: - full['size_extra_mb'] = int(sparse['size_extra_mb']) + full["size_extra_mb"] = int(sparse["size_extra_mb"]) # join our includes with parent's - includes = set(parent.get('pkgs_include', []) + sparse.get('pkgs_include', [])) - if 'pkgs_exclude' in sparse: - includes -= set(sparse['pkgs_exclude']) - full['pkgs_include'] = list(includes) + includes = set( + parent.get("pkgs_include", []) + sparse.get("pkgs_include", []) + ) + if "pkgs_exclude" in sparse: + includes -= set(sparse["pkgs_exclude"]) + full["pkgs_include"] = list(includes) # join our includes with parent's - excludes = set(parent.get('pkgs_exclude', []) + sparse.get('pkgs_exclude', [])) + excludes = set( + parent.get("pkgs_exclude", []) + sparse.get("pkgs_exclude", []) + ) # our includes override parent excludes - if 'pkgs_include' in sparse: - excludes -= set(sparse['pkgs_include']) - full['pkgs_exclude'] = list(excludes) + if "pkgs_include" in sparse: + excludes -= set(sparse["pkgs_include"]) + full["pkgs_exclude"] = list(excludes) # now init missing keys for key, value in PROFILE_DEFAULTS_DICT.items(): @@ -85,7 +98,7 @@ def resolve_profile( if type(value) == list: full[key] = [] # type: ignore[literal-required] - full['size_extra_mb'] = int(full['size_extra_mb'] or 0) + full["size_extra_mb"] = int(full["size_extra_mb"] or 0) resolved[name] = Profile.fromDict(full) return resolved @@ -107,8 +120,10 @@ def resolve_profile_attr( if attr_name in profile: return profile[attr_name], profile_name - if 'parent' not in profile: - raise KeyError(f'Profile attribute {attr_name} not found in {profile_name} and no parents') + if "parent" not in profile: + raise KeyError( + f"Profile attribute {attr_name} not found in {profile_name} and no parents" + ) parent = profile parent_name = profile_name seen = [] @@ -118,11 +133,15 @@ def resolve_profile_attr( seen.append(parent_name) - if not parent.get('parent', None): - raise KeyError(f'Profile attribute {attr_name} not found in inheritance chain, ' - f'we went down to {parent_name}.') - parent_name = parent['parent'] + if not parent.get("parent", None): + raise KeyError( + f"Profile attribute {attr_name} not found in inheritance chain, " + f"we went down to {parent_name}." + ) + parent_name = parent["parent"] if parent_name in seen: - raise RecursionError(f"Profile recursion loop: profile {profile_name} couldn't be resolved" - f"because of a dependency loop:\n{' -> '.join([*seen, parent_name])}") + raise RecursionError( + f"Profile recursion loop: profile {profile_name} couldn't be resolved" + f"because of a dependency loop:\n{' -> '.join([*seen, parent_name])}" + ) parent = profiles_sparse[parent_name] diff --git a/src/kupferbootstrap/config/scheme.py b/src/kupferbootstrap/config/scheme.py index cdaa4ec..8c942c1 100644 --- a/src/kupferbootstrap/config/scheme.py +++ b/src/kupferbootstrap/config/scheme.py @@ -19,7 +19,7 @@ class SparseProfile(DictScheme): size_extra_mb: Optional[Union[str, int]] def __repr__(self): - return f'{type(self)}{dict.__repr__(self.toDict())}' + return f"{type(self)}{dict.__repr__(self.toDict())}" class Profile(SparseProfile): @@ -77,24 +77,36 @@ class ProfilesSection(DictScheme): super().__init__(*kargs, allow_extra=allow_extra, **kwargs) # type: ignore[misc] @classmethod - def transform(cls, values: Mapping[str, Any], validate: bool = True, allow_extra: bool = True, type_hints: Optional[dict[str, Any]] = None): + def transform( + cls, + values: Mapping[str, Any], + validate: bool = True, + allow_extra: bool = True, + type_hints: Optional[dict[str, Any]] = None, + ): results = {} for k, v in values.items(): - if k == 'current': + if k == "current": results[k] = v continue - if not allow_extra and k != 'default': - raise Exception(f'Unknown key {k} in profiles section (Hint: extra_keys not allowed for some reason)') + if not allow_extra and k != "default": + raise Exception( + f"Unknown key {k} in profiles section (Hint: extra_keys not allowed for some reason)" + ) if not isinstance(v, dict): - raise Exception(f'profile {v} is not a dict!') - results[k] = SparseProfile.fromDict(v, validate=True, allow_extra=allow_extra) + raise Exception(f"profile {v} is not a dict!") + results[k] = SparseProfile.fromDict( + v, validate=True, allow_extra=allow_extra + ) return results def update(self, d, validate: bool = True, **kwargs): # type: ignore[override] - Munch.update(self, self.transform(values=d, validate=validate, **kwargs)) + Munch.update( + self, self.transform(values=d, validate=validate, **kwargs) + ) def __repr__(self): - return f'{type(self)}{dict.__repr__(self.toDict())}' + return f"{type(self)}{dict.__repr__(self.toDict())}" class Config(DictScheme): @@ -107,18 +119,20 @@ class Config(DictScheme): @classmethod def fromDict( # type: ignore[override] - cls, - values: Mapping[str, Any], - validate: bool = True, - allow_extra: bool = False, - allow_incomplete: bool = False, + cls, + values: Mapping[str, Any], + validate: bool = True, + allow_extra: bool = False, + allow_incomplete: bool = False, ): values = dict(values) # copy for later modification _vals = {} for name, _class in cls._type_hints.items(): if name not in values: if not allow_incomplete: - raise Exception(f'Config key "{name}" not in input dictionary') + raise Exception( + f'Config key "{name}" not in input dictionary' + ) continue value = values.pop(name) if not isinstance(value, _class): @@ -127,7 +141,9 @@ class Config(DictScheme): if values: if validate: - raise Exception(f'values contained unknown keys: {list(values.keys())}') + raise Exception( + f"values contained unknown keys: {list(values.keys())}" + ) _vals |= values return Config(_vals, validate=validate) diff --git a/src/kupferbootstrap/config/state.py b/src/kupferbootstrap/config/state.py index 088e3c7..3574e2d 100644 --- a/src/kupferbootstrap/config/state.py +++ b/src/kupferbootstrap/config/state.py @@ -7,79 +7,97 @@ from typing import Mapping, Optional from kupferbootstrap.constants import DEFAULT_PACKAGE_BRANCH -from .scheme import Config, ConfigLoadState, DictScheme, Profile, RuntimeConfiguration +from .scheme import ( + Config, + ConfigLoadState, + DictScheme, + Profile, + RuntimeConfiguration, +) from .profile import PROFILE_DEFAULTS, PROFILE_DEFAULTS_DICT, resolve_profile -CONFIG_DIR = appdirs.user_config_dir('kupfer') -CACHE_DIR = appdirs.user_cache_dir('kupfer') -CONFIG_DEFAULT_PATH = os.path.join(CONFIG_DIR, 'kupferbootstrap.toml') +CONFIG_DIR = appdirs.user_config_dir("kupfer") +CACHE_DIR = appdirs.user_cache_dir("kupfer") +CONFIG_DEFAULT_PATH = os.path.join(CONFIG_DIR, "kupferbootstrap.toml") CONFIG_DEFAULTS_DICT = { - 'wrapper': { - 'type': 'docker', + "wrapper": { + "type": "docker", }, - 'build': { - 'ccache': True, - 'clean_mode': True, - 'crosscompile': True, - 'crossdirect': True, - 'threads': 0, + "build": { + "ccache": True, + "clean_mode": True, + "crosscompile": True, + "crossdirect": True, + "threads": 0, }, - 'pkgbuilds': { - 'git_repo': 'https://gitlab.com/kupfer/packages/pkgbuilds.git', - 'git_branch': DEFAULT_PACKAGE_BRANCH, + "pkgbuilds": { + "git_repo": "https://gitlab.com/kupfer/packages/pkgbuilds.git", + "git_branch": DEFAULT_PACKAGE_BRANCH, }, - 'pacman': { - 'parallel_downloads': 4, - 'check_space': False, # TODO: investigate why True causes issues - 'repo_branch': DEFAULT_PACKAGE_BRANCH, + "pacman": { + "parallel_downloads": 4, + "check_space": False, # TODO: investigate why True causes issues + "repo_branch": DEFAULT_PACKAGE_BRANCH, }, - 'paths': { - 'cache_dir': CACHE_DIR, - 'chroots': os.path.join('%cache_dir%', 'chroots'), - 'pacman': os.path.join('%cache_dir%', 'pacman'), - 'packages': os.path.join('%cache_dir%', 'packages'), - 'pkgbuilds': os.path.join('%cache_dir%', 'pkgbuilds'), - 'jumpdrive': os.path.join('%cache_dir%', 'jumpdrive'), - 'images': os.path.join('%cache_dir%', 'images'), - 'ccache': os.path.join('%cache_dir%', 'ccache'), - 'rust': os.path.join('%cache_dir%', 'rust'), + "paths": { + "cache_dir": CACHE_DIR, + "chroots": os.path.join("%cache_dir%", "chroots"), + "pacman": os.path.join("%cache_dir%", "pacman"), + "packages": os.path.join("%cache_dir%", "packages"), + "pkgbuilds": os.path.join("%cache_dir%", "pkgbuilds"), + "jumpdrive": os.path.join("%cache_dir%", "jumpdrive"), + "images": os.path.join("%cache_dir%", "images"), + "ccache": os.path.join("%cache_dir%", "ccache"), + "rust": os.path.join("%cache_dir%", "rust"), }, - 'profiles': { - 'current': 'default', - 'default': deepcopy(PROFILE_DEFAULTS_DICT), + "profiles": { + "current": "default", + "default": deepcopy(PROFILE_DEFAULTS_DICT), }, } CONFIG_DEFAULTS: Config = Config.fromDict(CONFIG_DEFAULTS_DICT) CONFIG_SECTIONS = list(CONFIG_DEFAULTS.keys()) -CONFIG_RUNTIME_DEFAULTS: RuntimeConfiguration = RuntimeConfiguration.fromDict({ - 'verbose': False, - 'no_wrap': False, - 'error_shell': False, - 'config_file': None, - 'script_source_dir': None, - 'arch': None, - 'uid': None, - 'progress_bars': None, - 'colors': None, -}) +CONFIG_RUNTIME_DEFAULTS: RuntimeConfiguration = RuntimeConfiguration.fromDict( + { + "verbose": False, + "no_wrap": False, + "error_shell": False, + "config_file": None, + "script_source_dir": None, + "arch": None, + "uid": None, + "progress_bars": None, + "colors": None, + } +) def resolve_path_template(path_template: str, paths: dict[str, str]) -> str: - terminator = '%' # i'll be back + terminator = "%" # i'll be back result = path_template for path_name, path in paths.items(): result = result.replace(terminator + path_name + terminator, path) return result -def sanitize_config(conf: dict[str, dict], warn_missing_defaultprofile=True) -> dict[str, dict]: +def sanitize_config( + conf: dict[str, dict], warn_missing_defaultprofile=True +) -> dict[str, dict]: """checks the input config dict for unknown keys and returns only the known parts""" - return merge_configs(conf_new=conf, conf_base={}, warn_missing_defaultprofile=warn_missing_defaultprofile) + return merge_configs( + conf_new=conf, + conf_base={}, + warn_missing_defaultprofile=warn_missing_defaultprofile, + ) -def merge_configs(conf_new: Mapping[str, dict], conf_base={}, warn_missing_defaultprofile=True) -> dict[str, dict]: +def merge_configs( + conf_new: Mapping[str, dict], + conf_base={}, + warn_missing_defaultprofile=True, +) -> dict[str, dict]: """ Returns `conf_new` semantically merged into `conf_base`, after validating `conf_new` keys against `CONFIG_DEFAULTS` and `PROFILE_DEFAULTS`. @@ -105,17 +123,24 @@ def merge_configs(conf_new: Mapping[str, dict], conf_base={}, warn_missing_defau # profiles need special handling: # 1. profile names are unknown keys by definition, but we want 'default' to exist # 2. A profile's subkeys must be compared against PROFILE_DEFAULTS.keys() - if outer_name == 'profiles': - if warn_missing_defaultprofile and 'default' not in outer_conf.keys(): - logging.warning('Default profile is not defined in config file') + if outer_name == "profiles": + if ( + warn_missing_defaultprofile + and "default" not in outer_conf.keys() + ): + logging.warning( + "Default profile is not defined in config file" + ) update = dict[str, dict]() for profile_name, profile_conf in outer_conf.items(): if not isinstance(profile_conf, (dict, Profile)): - if profile_name == 'current': + if profile_name == "current": parsed[outer_name][profile_name] = profile_conf else: - logging.warning(f'Skipped key "{profile_name}" in profile section: only subsections and "current" allowed') + logging.warning( + f'Skipped key "{profile_name}" in profile section: only subsections and "current" allowed' + ) continue # init profile @@ -126,7 +151,9 @@ def merge_configs(conf_new: Mapping[str, dict], conf_base={}, warn_missing_defau for key, val in profile_conf.items(): if key not in PROFILE_DEFAULTS: - logging.warning(f'Skipped unknown config item "{key}" in profile "{profile_name}"') + logging.warning( + f'Skipped unknown config item "{key}" in profile "{profile_name}"' + ) continue profile[key] = val update |= {profile_name: profile} @@ -136,7 +163,9 @@ def merge_configs(conf_new: Mapping[str, dict], conf_base={}, warn_missing_defau # handle generic inner config dict for inner_name, inner_conf in outer_conf.items(): if inner_name not in CONFIG_DEFAULTS[outer_name].keys(): - logging.warning(f'Skipped unknown config item "{inner_name}" in section "{outer_name}"') + logging.warning( + f'Skipped unknown config item "{inner_name}" in section "{outer_name}"' + ) continue parsed[outer_name][inner_name] = inner_conf @@ -148,7 +177,6 @@ def dump_toml(conf) -> str: def dump_file(file_path: str, config: dict, file_mode: int = 0o600): - def _opener(path, flags): return os.open(path, flags, file_mode) @@ -156,7 +184,7 @@ def dump_file(file_path: str, config: dict, file_mode: int = 0o600): if not os.path.exists(conf_dir): os.makedirs(conf_dir) old_umask = os.umask(0) - with open(file_path, 'w', opener=_opener) as f: + with open(file_path, "w", opener=_opener) as f: f.write(dump_toml(conf=config)) os.umask(old_umask) @@ -167,8 +195,10 @@ def parse_file(config_file: str, base: dict = CONFIG_DEFAULTS) -> dict: The parsed results are semantically merged into `base` before returning. `base` itself is NOT checked for invalid keys. """ - _conf_file = config_file if config_file is not None else CONFIG_DEFAULT_PATH - logging.debug(f'Trying to load config file: {_conf_file}') + _conf_file = ( + config_file if config_file is not None else CONFIG_DEFAULT_PATH + ) + logging.debug(f"Trying to load config file: {_conf_file}") loaded_conf = toml.load(_conf_file) return merge_configs(conf_new=loaded_conf, conf_base=base) @@ -176,14 +206,16 @@ def parse_file(config_file: str, base: dict = CONFIG_DEFAULTS) -> dict: class ConfigLoadException(Exception): inner = None - def __init__(self, extra_msg='', inner_exception: Optional[Exception] = None): - msg: list[str] = ['Config load failed!'] + def __init__( + self, extra_msg="", inner_exception: Optional[Exception] = None + ): + msg: list[str] = ["Config load failed!"] if extra_msg: msg.append(extra_msg) if inner_exception: self.inner = inner_exception msg.append(str(inner_exception)) - super().__init__(self, ' '.join(msg)) + super().__init__(self, " ".join(msg)) class ConfigStateHolder: @@ -194,13 +226,24 @@ class ConfigStateHolder: file_state: ConfigLoadState _profile_cache: Optional[dict[str, Profile]] - def __init__(self, file_conf_path: Optional[str] = None, runtime_conf={}, file_conf_base: dict = {}): + def __init__( + self, + file_conf_path: Optional[str] = None, + runtime_conf={}, + file_conf_base: dict = {}, + ): """init a stateholder, optionally loading `file_conf_path`""" - self.file = Config.fromDict(merge_configs(conf_new=file_conf_base, conf_base=CONFIG_DEFAULTS)) + self.file = Config.fromDict( + merge_configs(conf_new=file_conf_base, conf_base=CONFIG_DEFAULTS) + ) self.file_state = ConfigLoadState() - self.runtime = RuntimeConfiguration.fromDict(CONFIG_RUNTIME_DEFAULTS | runtime_conf) + self.runtime = RuntimeConfiguration.fromDict( + CONFIG_RUNTIME_DEFAULTS | runtime_conf + ) self.runtime.arch = os.uname().machine - self.runtime.script_source_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) + self.runtime.script_source_dir = os.path.dirname( + os.path.dirname(os.path.realpath(__file__)) + ) self.runtime.uid = os.getuid() self._profile_cache = {} if file_conf_path: @@ -211,7 +254,9 @@ class ConfigStateHolder: self.runtime.config_file = config_file self._profile_cache = None try: - self.file = Config.fromDict(parse_file(config_file=config_file, base=base), validate=True) + self.file = Config.fromDict( + parse_file(config_file=config_file, base=base), validate=True + ) self.file_state.exception = None except Exception as ex: self.file_state.exception = ex @@ -219,7 +264,9 @@ class ConfigStateHolder: def is_loaded(self) -> bool: "returns True if a file was **sucessfully** loaded" - return self.file_state.load_finished and self.file_state.exception is None + return ( + self.file_state.load_finished and self.file_state.exception is None + ) def enforce_config_loaded(self): if not self.file_state.load_finished: @@ -228,51 +275,76 @@ class ConfigStateHolder: ex = self.file_state.exception if ex: if type(ex) == FileNotFoundError: - ex = Exception("Config file doesn't exist. Try running `kupferbootstrap config init` first?") + ex = Exception( + "Config file doesn't exist. Try running `kupferbootstrap config init` first?" + ) raise ex def get_profile(self, name: Optional[str] = None) -> Profile: name = name or self.file.profiles.current - self._profile_cache = resolve_profile(name=name, sparse_profiles=self.file.profiles, resolved=self._profile_cache) + self._profile_cache = resolve_profile( + name=name, + sparse_profiles=self.file.profiles, + resolved=self._profile_cache, + ) return self._profile_cache[name] - def _enforce_profile_field(self, field: str, profile_name: Optional[str] = None, hint_or_set_arch: bool = False) -> Profile: + def _enforce_profile_field( + self, + field: str, + profile_name: Optional[str] = None, + hint_or_set_arch: bool = False, + ) -> Profile: # TODO: device - profile_name = profile_name if profile_name is not None else self.file.profiles.current - arch_hint = '' + profile_name = ( + profile_name + if profile_name is not None + else self.file.profiles.current + ) + arch_hint = "" if not hint_or_set_arch: self.enforce_config_loaded() else: - arch_hint = (' or specifiy the target architecture by passing `--arch` to the current command,\n' - 'e.g. `kupferbootstrap packages build --arch x86_64`') + arch_hint = ( + " or specifiy the target architecture by passing `--arch` to the current command,\n" + "e.g. `kupferbootstrap packages build --arch x86_64`" + ) if not self.is_loaded(): if not self.file_state.exception: - raise Exception(f'Error enforcing config profile {field}: config hadn\'t even been loaded yet.\n' - 'This is a bug in kupferbootstrap!') - raise Exception(f"Profile {field} couldn't be resolved because the config file couldn't be loaded.\n" - "If the config doesn't exist, try running `kupferbootstrap config init`.\n" - f"Error: {self.file_state.exception}") + raise Exception( + f"Error enforcing config profile {field}: config hadn't even been loaded yet.\n" + "This is a bug in kupferbootstrap!" + ) + raise Exception( + f"Profile {field} couldn't be resolved because the config file couldn't be loaded.\n" + "If the config doesn't exist, try running `kupferbootstrap config init`.\n" + f"Error: {self.file_state.exception}" + ) if profile_name and profile_name not in self.file.profiles: - raise Exception(f'Unknown profile "{profile_name}". Please run `kupferbootstrap config profile init`{arch_hint}') + raise Exception( + f'Unknown profile "{profile_name}". Please run `kupferbootstrap config profile init`{arch_hint}' + ) profile = self.get_profile(profile_name) if field not in profile or not profile[field]: - m = (f'Profile "{profile_name}" has no {field.upper()} configured.\n' - f'Please run `kupferbootstrap config profile init {profile_name}`{arch_hint}') + m = ( + f'Profile "{profile_name}" has no {field.upper()} configured.\n' + f"Please run `kupferbootstrap config profile init {profile_name}`{arch_hint}" + ) raise Exception(m) return profile def enforce_profile_device_set(self, **kwargs) -> Profile: - return self._enforce_profile_field(field='device', **kwargs) + return self._enforce_profile_field(field="device", **kwargs) def enforce_profile_flavour_set(self, **kwargs) -> Profile: - return self._enforce_profile_field(field='flavour', **kwargs) + return self._enforce_profile_field(field="flavour", **kwargs) def get_path(self, path_name: str) -> str: paths = self.file.paths return resolve_path_template(paths[path_name], paths) def get_package_dir(self, arch: str): - return os.path.join(self.get_path('packages'), arch) + return os.path.join(self.get_path("packages"), arch) def dump(self) -> str: """dump toml representation of `self.file`""" @@ -286,32 +358,53 @@ class ConfigStateHolder: os.makedirs(os.path.dirname(path), exist_ok=True) new = not os.path.exists(path) dump_file(path, self.file) - logging.info(f'{"Created" if new else "Written changes to"} config file at {path}') + logging.info( + f"{'Created' if new else 'Written changes to'} config file at {path}" + ) def invalidate_profile_cache(self): """Clear the profile cache (usually after modification)""" self._profile_cache = None - def update(self, config_fragment: dict[str, dict], warn_missing_defaultprofile: bool = True) -> bool: + def update( + self, + config_fragment: dict[str, dict], + warn_missing_defaultprofile: bool = True, + ) -> bool: """Update `self.file` with `config_fragment`. Returns `True` if the config was changed""" - merged = merge_configs(config_fragment, conf_base=self.file, warn_missing_defaultprofile=warn_missing_defaultprofile) + merged = merge_configs( + config_fragment, + conf_base=self.file, + warn_missing_defaultprofile=warn_missing_defaultprofile, + ) changed = self.file.toDict() != merged self.file.update(merged) - if changed and 'profiles' in config_fragment and self.file.profiles.toDict() != config_fragment['profiles']: + if ( + changed + and "profiles" in config_fragment + and self.file.profiles.toDict() != config_fragment["profiles"] + ): self.invalidate_profile_cache() return changed - def update_profile(self, name: str, profile: Profile, merge: bool = False, create: bool = True, prune: bool = True): + def update_profile( + self, + name: str, + profile: Profile, + merge: bool = False, + create: bool = True, + prune: bool = True, + ): new = {} if name not in self.file.profiles: if not create: - raise Exception(f'Unknown profile: {name}') + raise Exception(f"Unknown profile: {name}") else: if merge: new = deepcopy(self.file.profiles[name]) - logging.debug(f'new: {new}') - logging.debug(f'profile: {profile}') + logging.debug(f"new: {new}") + logging.debug(f"profile: {profile}") new |= profile if prune: diff --git a/src/kupferbootstrap/constants.py b/src/kupferbootstrap/constants.py index 027ccb7..3b711b4 100644 --- a/src/kupferbootstrap/constants.py +++ b/src/kupferbootstrap/constants.py @@ -1,173 +1,173 @@ from .typehelpers import TypeAlias -FASTBOOT = 'fastboot' +FASTBOOT = "fastboot" FLASH_PARTS = { - 'FULL': 'full', - 'ABOOT': 'abootimg', - 'LK2ND': 'lk2nd', - 'QHYPSTUB': 'qhypstub', + "FULL": "full", + "ABOOT": "abootimg", + "LK2ND": "lk2nd", + "QHYPSTUB": "qhypstub", } -EMMC = 'emmc' -MICROSD = 'microsd' +EMMC = "emmc" +MICROSD = "microsd" LOCATIONS = [EMMC, MICROSD] -JUMPDRIVE = 'jumpdrive' -JUMPDRIVE_VERSION = '0.8' +JUMPDRIVE = "jumpdrive" +JUMPDRIVE_VERSION = "0.8" BASE_LOCAL_PACKAGES: list[str] = [ - 'base-kupfer', + "base-kupfer", ] BASE_PACKAGES: list[str] = BASE_LOCAL_PACKAGES + [ - 'base', - 'nano', - 'vim', + "base", + "nano", + "vim", ] POST_INSTALL_CMDS = [ - 'kupfer-config apply', - 'kupfer-config --user apply', + "kupfer-config apply", + "kupfer-config --user apply", ] REPOS_CONFIG_FILE = "repos.yml" REPOS_CONFIG_FILE_USER = "repos.local.yml" REPOSITORIES = [ - 'boot', - 'cross', - 'device', - 'firmware', - 'linux', - 'main', - 'phosh', + "boot", + "cross", + "device", + "firmware", + "linux", + "main", + "phosh", ] -DEFAULT_PACKAGE_BRANCH = 'dev' -KUPFER_BRANCH_MARKER = '%kupfer_branch%' -KUPFER_HTTPS_BASE = f'https://gitlab.com/kupfer/packages/prebuilts/-/raw/{KUPFER_BRANCH_MARKER}' -KUPFER_HTTPS = KUPFER_HTTPS_BASE + '/$arch/$repo' +DEFAULT_PACKAGE_BRANCH = "dev" +KUPFER_BRANCH_MARKER = "%kupfer_branch%" +KUPFER_HTTPS_BASE = f"https://gitlab.com/kupfer/packages/prebuilts/-/raw/{KUPFER_BRANCH_MARKER}" +KUPFER_HTTPS = KUPFER_HTTPS_BASE + "/$arch/$repo" Arch: TypeAlias = str ARCHES = [ - 'x86_64', - 'aarch64', - 'armv7h', + "x86_64", + "aarch64", + "armv7h", ] DistroArch: TypeAlias = Arch TargetArch: TypeAlias = Arch ALARM_REPOS = { - 'core': 'http://mirror.archlinuxarm.org/$arch/$repo', - 'extra': 'http://mirror.archlinuxarm.org/$arch/$repo', - 'alarm': 'http://mirror.archlinuxarm.org/$arch/$repo', - 'aur': 'http://mirror.archlinuxarm.org/$arch/$repo', + "core": "http://mirror.archlinuxarm.org/$arch/$repo", + "extra": "http://mirror.archlinuxarm.org/$arch/$repo", + "alarm": "http://mirror.archlinuxarm.org/$arch/$repo", + "aur": "http://mirror.archlinuxarm.org/$arch/$repo", } BASE_DISTROS: dict[DistroArch, dict[str, dict[str, str]]] = { - 'x86_64': { - 'repos': { - 'core': 'https://geo.mirror.pkgbuild.com/$repo/os/$arch', - 'extra': 'https://geo.mirror.pkgbuild.com/$repo/os/$arch', + "x86_64": { + "repos": { + "core": "https://geo.mirror.pkgbuild.com/$repo/os/$arch", + "extra": "https://geo.mirror.pkgbuild.com/$repo/os/$arch", }, }, - 'aarch64': { - 'repos': ALARM_REPOS, + "aarch64": { + "repos": ALARM_REPOS, }, - 'armv7h': { - 'repos': ALARM_REPOS, + "armv7h": { + "repos": ALARM_REPOS, }, } COMPILE_ARCHES: dict[Arch, str] = { - 'x86_64': 'amd64', - 'aarch64': 'arm64', - 'armv7h': 'arm', + "x86_64": "amd64", + "aarch64": "arm64", + "armv7h": "arm", } GCC_HOSTSPECS: dict[DistroArch, dict[TargetArch, str]] = { - 'x86_64': { - 'x86_64': 'x86_64-pc-linux-gnu', - 'aarch64': 'aarch64-unknown-linux-gnu', - 'armv7h': 'arm-unknown-linux-gnueabihf' + "x86_64": { + "x86_64": "x86_64-pc-linux-gnu", + "aarch64": "aarch64-unknown-linux-gnu", + "armv7h": "arm-unknown-linux-gnueabihf", }, - 'aarch64': { - 'aarch64': 'aarch64-unknown-linux-gnu', - }, - 'armv7h': { - 'armv7h': 'armv7l-unknown-linux-gnueabihf' + "aarch64": { + "aarch64": "aarch64-unknown-linux-gnu", }, + "armv7h": {"armv7h": "armv7l-unknown-linux-gnueabihf"}, } -CFLAGS_GENERAL = ['-O2', '-pipe', '-fstack-protector-strong'] +CFLAGS_GENERAL = ["-O2", "-pipe", "-fstack-protector-strong"] CFLAGS_ALARM = [ - ' -fno-plt', - '-fexceptions', - '-Wp,-D_FORTIFY_SOURCE=2', - '-Wformat', - '-Werror=format-security', - '-fstack-clash-protection', + " -fno-plt", + "-fexceptions", + "-Wp,-D_FORTIFY_SOURCE=2", + "-Wformat", + "-Werror=format-security", + "-fstack-clash-protection", ] CFLAGS_ARCHES: dict[Arch, list[str]] = { - 'x86_64': ['-march=x86-64', '-mtune=generic'], - 'aarch64': [ - '-march=armv8-a', - ] + CFLAGS_ALARM, - 'armv7h': [ - '-march=armv7-a', - '-mfloat-abi=hard', - '-mfpu=neon', - ] + CFLAGS_ALARM, + "x86_64": ["-march=x86-64", "-mtune=generic"], + "aarch64": [ + "-march=armv8-a", + ] + + CFLAGS_ALARM, + "armv7h": [ + "-march=armv7-a", + "-mfloat-abi=hard", + "-mfpu=neon", + ] + + CFLAGS_ALARM, } QEMU_ARCHES: dict[Arch, str] = { - 'x86_64': 'x86_64', - 'aarch64': 'aarch64', - 'armv7h': 'arm', + "x86_64": "x86_64", + "aarch64": "aarch64", + "armv7h": "arm", } -QEMU_BINFMT_PKGS = ['qemu-user-static-bin', 'binfmt-qemu-static'] -CROSSDIRECT_PKGS = ['crossdirect'] + QEMU_BINFMT_PKGS +QEMU_BINFMT_PKGS = ["qemu-user-static-bin", "binfmt-qemu-static"] +CROSSDIRECT_PKGS = ["crossdirect"] + QEMU_BINFMT_PKGS -SSH_DEFAULT_HOST = '172.16.42.1' +SSH_DEFAULT_HOST = "172.16.42.1" SSH_DEFAULT_PORT = 22 SSH_COMMON_OPTIONS = [ - '-o', - 'GlobalKnownHostsFile=/dev/null', - '-o', - 'UserKnownHostsFile=/dev/null', - '-o', - 'StrictHostKeyChecking=no', + "-o", + "GlobalKnownHostsFile=/dev/null", + "-o", + "UserKnownHostsFile=/dev/null", + "-o", + "StrictHostKeyChecking=no", ] CHROOT_PATHS = { - 'chroots': '/chroots', - 'jumpdrive': '/var/cache/jumpdrive', - 'pacman': '/pacman', - 'packages': '/packages', - 'pkgbuilds': '/pkgbuilds', - 'images': '/images', + "chroots": "/chroots", + "jumpdrive": "/var/cache/jumpdrive", + "pacman": "/pacman", + "packages": "/packages", + "pkgbuilds": "/pkgbuilds", + "images": "/images", } WRAPPER_TYPES = [ - 'none', - 'docker', + "none", + "docker", ] -WRAPPER_ENV_VAR = 'KUPFERBOOTSTRAP_WRAPPED' +WRAPPER_ENV_VAR = "KUPFERBOOTSTRAP_WRAPPED" MAKEPKG_CMD = [ - 'makepkg', - '--noconfirm', - '--ignorearch', - '--needed', + "makepkg", + "--noconfirm", + "--ignorearch", + "--needed", ] -SRCINFO_FILE = '.SRCINFO' -SRCINFO_METADATA_FILE = '.srcinfo_meta.json' +SRCINFO_FILE = ".SRCINFO" +SRCINFO_METADATA_FILE = ".srcinfo_meta.json" SRCINFO_INITIALISED_FILE = ".srcinfo_initialised.json" SRCINFO_TARBALL_FILE = "srcinfos.tar.gz" -SRCINFO_TARBALL_URL = f'{KUPFER_HTTPS_BASE}/{SRCINFO_TARBALL_FILE}' +SRCINFO_TARBALL_URL = f"{KUPFER_HTTPS_BASE}/{SRCINFO_TARBALL_FILE}" -FLAVOUR_INFO_FILE = 'flavourinfo.json' -FLAVOUR_DESCRIPTION_PREFIX = 'kupfer flavour:' +FLAVOUR_INFO_FILE = "flavourinfo.json" +FLAVOUR_DESCRIPTION_PREFIX = "kupfer flavour:" diff --git a/src/kupferbootstrap/devices/cli.py b/src/kupferbootstrap/devices/cli.py index ca7501a..a1ea91a 100644 --- a/src/kupferbootstrap/devices/cli.py +++ b/src/kupferbootstrap/devices/cli.py @@ -12,28 +12,34 @@ from kupferbootstrap.version.cli import _check_kbs_version from .device import get_devices, get_device -@click.command(name='devices') -@click.option('-j', '--json', is_flag=True, help='output machine-parsable JSON format') +@click.command(name="devices") @click.option( - '--force-parse-deviceinfo/--no-parse-deviceinfo', + "-j", "--json", is_flag=True, help="output machine-parsable JSON format" +) +@click.option( + "--force-parse-deviceinfo/--no-parse-deviceinfo", is_flag=True, default=None, help="Force or disable deviceinfo parsing. The default is to try but continue if it fails.", ) @click.option( - '--download-packages/--no-download-packages', + "--download-packages/--no-download-packages", is_flag=True, default=False, - help='Download packages while trying to parse deviceinfo', + help="Download packages while trying to parse deviceinfo", +) +@click.option( + "--output-file", + type=click.Path(exists=False, file_okay=True), + help="Dump JSON to file", ) -@click.option('--output-file', type=click.Path(exists=False, file_okay=True), help="Dump JSON to file") def cmd_devices( json: bool = False, force_parse_deviceinfo: Optional[bool] = True, download_packages: bool = False, output_file: Optional[str] = None, ): - 'list the available devices and descriptions' + "list the available devices and descriptions" _check_kbs_version(init_pkgbuilds=False) devices = get_devices() if not devices: @@ -42,17 +48,23 @@ def cmd_devices( profile_name = config.file.profiles.current selected, inherited_from = None, None try: - selected, inherited_from = resolve_profile_field(None, profile_name, 'device', config.file.profiles) + selected, inherited_from = resolve_profile_field( + None, profile_name, "device", config.file.profiles + ) if selected: profile_device = get_device(selected) except Exception as ex: - logging.debug(f"Failed to get profile device for marking as currently selected, continuing anyway. Exception: {ex}") - output = [''] + logging.debug( + f"Failed to get profile device for marking as currently selected, continuing anyway. Exception: {ex}" + ) + output = [""] json_output = {} interactive_json = json and not output_file if output_file: json = True - use_colors = colors_supported(False if interactive_json else config.runtime.colors) + use_colors = colors_supported( + False if interactive_json else config.runtime.colors + ) for name in sorted(devices.keys()): device = devices[name] assert device @@ -61,7 +73,9 @@ def cmd_devices( device.parse_deviceinfo(try_download=download_packages) except Exception as ex: if not force_parse_deviceinfo: - logging.debug(f"Failed to parse deviceinfo for extended description, not a problem: {ex}") + logging.debug( + f"Failed to parse deviceinfo for extended description, not a problem: {ex}" + ) else: raise ex @@ -71,12 +85,14 @@ def cmd_devices( continue snippet = device.nice_str(colors=use_colors, newlines=True) if profile_device and profile_device.name == device.name: - snippet = color_mark_selected(snippet, profile_name or '[unknown]', inherited_from) + snippet = color_mark_selected( + snippet, profile_name or "[unknown]", inherited_from + ) output.append(f"{snippet}\n") if interactive_json: - output = ['\n' + json_dump(json_output, indent=4)] + output = ["\n" + json_dump(json_output, indent=4)] if output_file: - with open(output_file, 'w') as fd: + with open(output_file, "w") as fd: fd.write(json_dump(json_output)) for line in output: print(line) diff --git a/src/kupferbootstrap/devices/device.py b/src/kupferbootstrap/devices/device.py index 7b8610e..cbed024 100644 --- a/src/kupferbootstrap/devices/device.py +++ b/src/kupferbootstrap/devices/device.py @@ -8,7 +8,13 @@ from kupferbootstrap.constants import Arch, ARCHES from kupferbootstrap.dictscheme import DictScheme from kupferbootstrap.distro.distro import get_kupfer_local from kupferbootstrap.distro.package import LocalPackage -from kupferbootstrap.packages.pkgbuild import Pkgbuild, _pkgbuilds_cache, discover_pkgbuilds, get_pkgbuild_by_path, init_pkgbuilds +from kupferbootstrap.packages.pkgbuild import ( + Pkgbuild, + _pkgbuilds_cache, + discover_pkgbuilds, + get_pkgbuild_by_path, + init_pkgbuilds, +) from kupferbootstrap.utils import read_files_from_tar, color_str from .deviceinfo import DEFAULT_IMAGE_SECTOR_SIZE, DeviceInfo, parse_deviceinfo @@ -30,17 +36,26 @@ class DeviceSummary(DictScheme): package_path: Optional[str] def nice_str(self, newlines: bool = False, colors: bool = False) -> str: - separator = '\n' if newlines else ', ' + separator = "\n" if newlines else ", " assert bool(self.package_path) == bool(self.package_name) - package_path = {"Package Path": self.package_path} if self.package_path else {} + package_path = ( + {"Package Path": self.package_path} if self.package_path else {} + ) fields = { "Device": self.name, - "Description": self.description or f"[no package {'description' if self.package_name else 'associated (?!)'} and deviceinfo not parsed]", + "Description": self.description + or f"[no package {'description' if self.package_name else 'associated (?!)'} and deviceinfo not parsed]", "Architecture": self.arch, - "Package Name": self.package_name or "no package associated. PROBABLY A BUG!", + "Package Name": self.package_name + or "no package associated. PROBABLY A BUG!", **package_path, } - return separator.join([f"{color_str(name, bold=True, use_colors=colors)}: {value}" for name, value in fields.items()]) + return separator.join( + [ + f"{color_str(name, bold=True, use_colors=colors)}: {value}" + for name, value in fields.items() + ] + ) class Device(DictScheme): @@ -50,7 +65,7 @@ class Device(DictScheme): deviceinfo: Optional[DeviceInfo] def __repr__(self): - return f'Device<{self.name},{self.arch},{self.package.path if self.package else "[no package]"}>' + return f"Device<{self.name},{self.arch},{self.package.path if self.package else '[no package]'}>" def __str__(self): return self.nice_str(newlines=True) @@ -60,8 +75,14 @@ class Device(DictScheme): def get_summary(self) -> DeviceSummary: result: dict[str, Optional[str]] = {} - description = ((self.package.description if self.package else "").strip() or - (self.deviceinfo.get("name", "[No name in deviceinfo]") if self.deviceinfo else "")).strip() + description = ( + (self.package.description if self.package else "").strip() + or ( + self.deviceinfo.get("name", "[No name in deviceinfo]") + if self.deviceinfo + else "" + ) + ).strip() result["name"] = self.name result["description"] = description result["arch"] = self.arch @@ -69,24 +90,39 @@ class Device(DictScheme): result["package_path"] = self.package.path if self.package else None return DeviceSummary(result) - def parse_deviceinfo(self, try_download: bool = True, lazy: bool = True) -> DeviceInfo: - if not lazy or 'deviceinfo' not in self or self.deviceinfo is None: + def parse_deviceinfo( + self, try_download: bool = True, lazy: bool = True + ) -> DeviceInfo: + if not lazy or "deviceinfo" not in self or self.deviceinfo is None: # avoid import loop - from kupferbootstrap.packages.build import check_package_version_built - is_built = check_package_version_built(self.package, self.arch, try_download=try_download) + from kupferbootstrap.packages.build import ( + check_package_version_built, + ) + + is_built = check_package_version_built( + self.package, self.arch, try_download=try_download + ) if not is_built: - raise Exception(f"device package {self.package.name} for device {self.name} couldn't be acquired!") - pkgs: dict[str, LocalPackage] = get_kupfer_local(arch=self.arch, in_chroot=False, scan=True).get_packages() + raise Exception( + f"device package {self.package.name} for device {self.name} couldn't be acquired!" + ) + pkgs: dict[str, LocalPackage] = get_kupfer_local( + arch=self.arch, in_chroot=False, scan=True + ).get_packages() if self.package.name not in pkgs: - raise Exception(f"device package {self.package.name} somehow not in repos, this is a kupferbootstrap bug") + raise Exception( + f"device package {self.package.name} somehow not in repos, this is a kupferbootstrap bug" + ) pkg = pkgs[self.package.name] file_path = pkg.acquire() assert file_path assert os.path.exists(file_path) - deviceinfo_path = 'etc/kupfer/deviceinfo' + deviceinfo_path = "etc/kupfer/deviceinfo" for path, f in read_files_from_tar(file_path, [deviceinfo_path]): if path != deviceinfo_path: - raise Exception(f'Somehow, we got a wrong file: expected: "{deviceinfo_path}", got: "{path}"') + raise Exception( + f'Somehow, we got a wrong file: expected: "{deviceinfo_path}", got: "{path}"' + ) with f as fd: lines = fd.readlines() assert lines @@ -95,13 +131,15 @@ class Device(DictScheme): info = parse_deviceinfo(lines, self.name) assert info.arch assert info.arch == self.arch - self['deviceinfo'] = info + self["deviceinfo"] = info assert self.deviceinfo return self.deviceinfo def get_image_sectorsize(self, **kwargs) -> Optional[int]: """Gets the deviceinfo_rootfs_image_sector_size if defined, otherwise None""" - return self.parse_deviceinfo(**kwargs).get('rootfs_image_sector_size', None) + return self.parse_deviceinfo(**kwargs).get( + "rootfs_image_sector_size", None + ) def get_image_sectorsize_default(self, **kwargs) -> int: return self.get_image_sectorsize(**kwargs) or DEFAULT_IMAGE_SECTOR_SIZE @@ -109,29 +147,39 @@ class Device(DictScheme): def check_devicepkg_name(name: str, log_level: Optional[int] = None): valid = True - if not name.startswith('device-'): + if not name.startswith("device-"): valid = False if log_level is not None: - logging.log(log_level, f'invalid device package name "{name}": doesn\'t start with "device-"') - if name.endswith('-common'): + logging.log( + log_level, + f'invalid device package name "{name}": doesn\'t start with "device-"', + ) + if name.endswith("-common"): valid = False if log_level is not None: - logging.log(log_level, f'invalid device package name "{name}": ends with "-common"') + logging.log( + log_level, + f'invalid device package name "{name}": ends with "-common"', + ) return valid def parse_device_pkg(pkgbuild: Pkgbuild) -> Device: if len(pkgbuild.arches) != 1: - raise Exception(f"{pkgbuild.name}: Device package must have exactly one arch, but has {pkgbuild.arches}") + raise Exception( + f"{pkgbuild.name}: Device package must have exactly one arch, but has {pkgbuild.arches}" + ) arch = pkgbuild.arches[0] - if arch == 'any' or arch not in ARCHES: - raise Exception(f'unknown arch for device package: {arch}') - if pkgbuild.repo != 'device': - logging.warning(f'device package {pkgbuild.name} is in unexpected repo "{pkgbuild.repo}", expected "device"') + if arch == "any" or arch not in ARCHES: + raise Exception(f"unknown arch for device package: {arch}") + if pkgbuild.repo != "device": + logging.warning( + f'device package {pkgbuild.name} is in unexpected repo "{pkgbuild.repo}", expected "device"' + ) name = pkgbuild.name - prefix = 'device-' + prefix = "device-" if name.startswith(prefix): - name = name[len(prefix):] + name = name[len(prefix) :] return Device(name=name, arch=arch, package=pkgbuild, deviceinfo=None) @@ -141,8 +189,10 @@ def sanitize_device_name(name: str, warn: bool = True) -> str: warning = f"Deprecated device {name}" replacement = DEVICE_DEPRECATIONS[name] if replacement: - warning += (f': Device has been renamed to {replacement}! Please adjust your profile config!\n' - 'This will become an error in a future version!') + warning += ( + f": Device has been renamed to {replacement}! Please adjust your profile config!\n" + "This will become an error in a future version!" + ) name = replacement if warn: logging.warning(warning) @@ -153,16 +203,21 @@ _device_cache: dict[str, Device] = {} _device_cache_populated: bool = False -def get_devices(pkgbuilds: Optional[dict[str, Pkgbuild]] = None, lazy: bool = True) -> dict[str, Device]: +def get_devices( + pkgbuilds: Optional[dict[str, Pkgbuild]] = None, lazy: bool = True +) -> dict[str, Device]: global _device_cache, _device_cache_populated use_cache = _device_cache_populated and lazy if not use_cache: logging.info("Searching PKGBUILDs for device packages") if not pkgbuilds: - pkgbuilds = discover_pkgbuilds(lazy=lazy, repositories=['device']) + pkgbuilds = discover_pkgbuilds(lazy=lazy, repositories=["device"]) _device_cache.clear() for pkgbuild in pkgbuilds.values(): - if not (pkgbuild.repo == 'device' and check_devicepkg_name(pkgbuild.name, log_level=None)): + if not ( + pkgbuild.repo == "device" + and check_devicepkg_name(pkgbuild.name, log_level=None) + ): continue dev = parse_device_pkg(pkgbuild) _device_cache[dev.name] = dev @@ -170,7 +225,12 @@ def get_devices(pkgbuilds: Optional[dict[str, Pkgbuild]] = None, lazy: bool = Tr return _device_cache.copy() -def get_device(name: str, pkgbuilds: Optional[dict[str, Pkgbuild]] = None, lazy: bool = True, scan_all=False) -> Device: +def get_device( + name: str, + pkgbuilds: Optional[dict[str, Pkgbuild]] = None, + lazy: bool = True, + scan_all=False, +) -> Device: global _device_cache, _device_cache_populated assert lazy or pkgbuilds name = sanitize_device_name(name) @@ -179,31 +239,46 @@ def get_device(name: str, pkgbuilds: Optional[dict[str, Pkgbuild]] = None, lazy: if scan_all: devices = get_devices(pkgbuilds=pkgbuilds, lazy=lazy) if name not in devices: - raise Exception(f'Unknown device {name}!\n' - f'Available: {list(devices.keys())}') + raise Exception( + f"Unknown device {name}!\nAvailable: {list(devices.keys())}" + ) return devices[name] else: - pkgname = f'device-{name}' + pkgname = f"device-{name}" if pkgbuilds: if pkgname not in pkgbuilds: - raise Exception(f'Unknown device {name}!') + raise Exception(f"Unknown device {name}!") pkgbuild = pkgbuilds[pkgname] else: if lazy and pkgname in _pkgbuilds_cache: pkgbuild = _pkgbuilds_cache[pkgname] else: init_pkgbuilds() - relative_path = os.path.join('device', pkgname) - if not os.path.exists(os.path.join(config.get_path('pkgbuilds'), relative_path)): - logging.debug(f'Exact device pkgbuild path "pkgbuilds/{relative_path}" doesn\'t exist, scanning entire repo') - return get_device(name, pkgbuilds=pkgbuilds, lazy=lazy, scan_all=True) - pkgbuild = [p for p in get_pkgbuild_by_path(relative_path, lazy=lazy) if p.name == pkgname][0] + relative_path = os.path.join("device", pkgname) + if not os.path.exists( + os.path.join(config.get_path("pkgbuilds"), relative_path) + ): + logging.debug( + f'Exact device pkgbuild path "pkgbuilds/{relative_path}" doesn\'t exist, scanning entire repo' + ) + return get_device( + name, pkgbuilds=pkgbuilds, lazy=lazy, scan_all=True + ) + pkgbuild = [ + p + for p in get_pkgbuild_by_path(relative_path, lazy=lazy) + if p.name == pkgname + ][0] device = parse_device_pkg(pkgbuild) if lazy: _device_cache[name] = device return device -def get_profile_device(profile_name: Optional[str] = None, hint_or_set_arch: bool = False): - profile = config.enforce_profile_device_set(profile_name=profile_name, hint_or_set_arch=hint_or_set_arch) +def get_profile_device( + profile_name: Optional[str] = None, hint_or_set_arch: bool = False +): + profile = config.enforce_profile_device_set( + profile_name=profile_name, hint_or_set_arch=hint_or_set_arch + ) return get_device(profile.device) diff --git a/src/kupferbootstrap/devices/deviceinfo.py b/src/kupferbootstrap/devices/deviceinfo.py index 17ed6a2..0b281c6 100644 --- a/src/kupferbootstrap/devices/deviceinfo.py +++ b/src/kupferbootstrap/devices/deviceinfo.py @@ -12,7 +12,7 @@ from kupferbootstrap.constants import Arch from kupferbootstrap.dictscheme import DictScheme PMOS_ARCHES_OVERRIDES: dict[str, Arch] = { - "armv7": 'armv7h', + "armv7": "armv7h", } DEFAULT_IMAGE_SECTOR_SIZE = 512 @@ -30,7 +30,7 @@ class DeviceInfo(DictScheme): @classmethod def transform(cls, values: Mapping[str, Optional[str]], **kwargs): - kwargs = {'allow_extra': True} | kwargs + kwargs = {"allow_extra": True} | kwargs return super().transform(values, **kwargs) @@ -45,7 +45,6 @@ deviceinfo_attributes = [ "dtb", "modules_initfs", "arch", - # device "chassis", "keyboard", @@ -55,11 +54,9 @@ deviceinfo_attributes = [ "dev_touchscreen", "dev_touchscreen_calibration", "append_dtb", - # bootloader "flash_method", "boot_filesystem", - # flash "flash_heimdall_partition_kernel", "flash_heimdall_partition_initfs", @@ -96,10 +93,8 @@ deviceinfo_attributes = [ "cgpt_kpart", "cgpt_kpart_start", "cgpt_kpart_size", - # weston "weston_pixman_type", - # keymaps "keymaps", ] @@ -123,67 +118,95 @@ def sanity_check(deviceinfo: dict[str, Optional[str]], device_name: str): try: _pmos_sanity_check(deviceinfo, device_name) except RuntimeError as err: - raise Exception(f"{device_name}: The postmarketOS checker for deviceinfo files has run into an issue.\n" - "Here at kupfer, we usually don't maintain our own deviceinfo files " - "and instead often download them postmarketOS in our PKGBUILDs.\n" - "Please make sure your PKGBUILDs.git is up to date. (run `kupferbootstrap packages update`)\n" - "If the problem persists, please open an issue for this device's deviceinfo file " - "in the kupfer pkgbuilds git repo on Gitlab.\n\n" - "postmarketOS error message (referenced file may not exist until you run makepkg in that directory):\n" - f"{err}") + raise Exception( + f"{device_name}: The postmarketOS checker for deviceinfo files has run into an issue.\n" + "Here at kupfer, we usually don't maintain our own deviceinfo files " + "and instead often download them postmarketOS in our PKGBUILDs.\n" + "Please make sure your PKGBUILDs.git is up to date. (run `kupferbootstrap packages update`)\n" + "If the problem persists, please open an issue for this device's deviceinfo file " + "in the kupfer pkgbuilds git repo on Gitlab.\n\n" + "postmarketOS error message (referenced file may not exist until you run makepkg in that directory):\n" + f"{err}" + ) def _pmos_sanity_check(info: dict[str, Optional[str]], device_name: str): # Resolve path for more readable error messages - path = os.path.join(config.get_path('pkgbuilds'), 'device', device_name, 'deviceinfo') + path = os.path.join( + config.get_path("pkgbuilds"), "device", device_name, "deviceinfo" + ) # Legacy errors if "flash_methods" in info: - raise RuntimeError("deviceinfo_flash_methods has been renamed to" - " deviceinfo_flash_method. Please adjust your" - " deviceinfo file: " + path) + raise RuntimeError( + "deviceinfo_flash_methods has been renamed to" + " deviceinfo_flash_method. Please adjust your" + " deviceinfo file: " + path + ) if "external_disk" in info or "external_disk_install" in info: - raise RuntimeError("Instead of deviceinfo_external_disk and" - " deviceinfo_external_disk_install, please use the" - " new variable deviceinfo_external_storage in your" - " deviceinfo file: " + path) + raise RuntimeError( + "Instead of deviceinfo_external_disk and" + " deviceinfo_external_disk_install, please use the" + " new variable deviceinfo_external_storage in your" + " deviceinfo file: " + path + ) if "msm_refresher" in info: - raise RuntimeError("It is enough to specify 'msm-fb-refresher' in the" - " depends of your device's package now. Please" - " delete the deviceinfo_msm_refresher line in: " + path) + raise RuntimeError( + "It is enough to specify 'msm-fb-refresher' in the" + " depends of your device's package now. Please" + " delete the deviceinfo_msm_refresher line in: " + path + ) if "flash_fastboot_vendor_id" in info: - raise RuntimeError("Fastboot doesn't allow specifying the vendor ID" - " anymore (#1830). Try removing the" - " 'deviceinfo_flash_fastboot_vendor_id' line in: " + path + " (if you are sure that " - " you need this, then we can probably bring it back to fastboot, just" - " let us know in the postmarketOS issues!)") + raise RuntimeError( + "Fastboot doesn't allow specifying the vendor ID" + " anymore (#1830). Try removing the" + " 'deviceinfo_flash_fastboot_vendor_id' line in: " + + path + + " (if you are sure that " + " you need this, then we can probably bring it back to fastboot, just" + " let us know in the postmarketOS issues!)" + ) if "nonfree" in info: - raise RuntimeError("deviceinfo_nonfree is unused. " - "Please delete it in: " + path) + raise RuntimeError( + "deviceinfo_nonfree is unused. Please delete it in: " + path + ) if "dev_keyboard" in info: - raise RuntimeError("deviceinfo_dev_keyboard is unused. " - "Please delete it in: " + path) + raise RuntimeError( + "deviceinfo_dev_keyboard is unused. Please delete it in: " + path + ) if "date" in info: - raise RuntimeError("deviceinfo_date was replaced by deviceinfo_year. " - "Set it to the release year in: " + path) + raise RuntimeError( + "deviceinfo_date was replaced by deviceinfo_year. " + "Set it to the release year in: " + path + ) # "codename" is required codename = os.path.basename(os.path.dirname(path)) if codename.startswith("device-"): codename = codename[7:] # kupfer prepends the SoC - codename_alternative = codename.split('-', maxsplit=1)[1] if codename.count('-') > 1 else codename - _codename = info.get('codename', None) - if not _codename or not (_codename in [codename, codename_alternative] or codename.startswith(_codename) or - codename_alternative.startswith(_codename)): - raise RuntimeError(f"Please add 'deviceinfo_codename=\"{codename}\"' " - f"to: {path}") + codename_alternative = ( + codename.split("-", maxsplit=1)[1] + if codename.count("-") > 1 + else codename + ) + _codename = info.get("codename", None) + if not _codename or not ( + _codename in [codename, codename_alternative] + or codename.startswith(_codename) + or codename_alternative.startswith(_codename) + ): + raise RuntimeError( + f"Please add 'deviceinfo_codename=\"{codename}\"' to: {path}" + ) # "chassis" is required chassis_types = deviceinfo_chassis_types if "chassis" not in info or not info["chassis"]: - logging.info("NOTE: the most commonly used chassis types in" - " postmarketOS are 'handset' (for phones) and 'tablet'.") + logging.info( + "NOTE: the most commonly used chassis types in" + " postmarketOS are 'handset' (for phones) and 'tablet'." + ) raise RuntimeError(f"Please add 'deviceinfo_chassis' to: {path}") # "arch" is required @@ -193,12 +216,16 @@ def _pmos_sanity_check(info: dict[str, Optional[str]], device_name: str): # "chassis" validation chassis_type = info["chassis"] if chassis_type not in chassis_types: - raise RuntimeError(f"Unknown chassis type '{chassis_type}', should" - f" be one of {', '.join(chassis_types)}. Fix this" - f" and try again: {path}") + raise RuntimeError( + f"Unknown chassis type '{chassis_type}', should" + f" be one of {', '.join(chassis_types)}. Fix this" + f" and try again: {path}" + ) -def parse_kernel_suffix(deviceinfo: dict[str, Optional[str]], kernel: str = 'mainline') -> dict[str, Optional[str]]: +def parse_kernel_suffix( + deviceinfo: dict[str, Optional[str]], kernel: str = "mainline" +) -> dict[str, Optional[str]]: """ Remove the kernel suffix (as selected in 'pmbootstrap init') from deviceinfo variables. Related: @@ -231,12 +258,14 @@ def parse_kernel_suffix(deviceinfo: dict[str, Optional[str]], kernel: str = 'mai # Move ret[key_kernel] to ret[key] logging.debug(f"parse_kernel_suffix: {key_kernel} => {key}") ret[key] = ret[key_kernel] - del (ret[key_kernel]) + del ret[key_kernel] return ret -def parse_deviceinfo(deviceinfo_lines: list[str], device_name: str, kernel='mainline') -> DeviceInfo: +def parse_deviceinfo( + deviceinfo_lines: list[str], device_name: str, kernel="mainline" +) -> DeviceInfo: """ :param device: defaults to args.device :param kernel: defaults to args.kernel @@ -250,10 +279,12 @@ def parse_deviceinfo(deviceinfo_lines: list[str], device_name: str, kernel='main raise SyntaxError(f"{device_name}: No '=' found:\n\t{line}") split = line.split("=", 1) if not split[0].startswith("deviceinfo_"): - logging.warning(f"{device_name}: Unknown key {split[0]} in deviceinfo:\n{line}") + logging.warning( + f"{device_name}: Unknown key {split[0]} in deviceinfo:\n{line}" + ) continue - key = split[0][len("deviceinfo_"):] - value = split[1].replace("\"", "").replace("\n", "") + key = split[0][len("deviceinfo_") :] + value = split[1].replace('"', "").replace("\n", "") info[key] = value # Assign empty string as default @@ -263,8 +294,8 @@ def parse_deviceinfo(deviceinfo_lines: list[str], device_name: str, kernel='main info = parse_kernel_suffix(info, kernel) sanity_check(info, device_name) - if 'arch' in info: - arch = info['arch'] - info['arch'] = PMOS_ARCHES_OVERRIDES.get(arch, arch) # type: ignore[arg-type] + if "arch" in info: + arch = info["arch"] + info["arch"] = PMOS_ARCHES_OVERRIDES.get(arch, arch) # type: ignore[arg-type] dev = DeviceInfo.fromDict(info, allow_extra=True) return dev diff --git a/src/kupferbootstrap/dictscheme.py b/src/kupferbootstrap/dictscheme.py index 38d0dc9..b938c0f 100644 --- a/src/kupferbootstrap/dictscheme.py +++ b/src/kupferbootstrap/dictscheme.py @@ -5,12 +5,25 @@ import toml from munch import Munch from toml.encoder import TomlEncoder, TomlPreserveInlineDictEncoder -from typing import ClassVar, Generator, Optional, Union, Mapping, Any, get_type_hints, get_origin, get_args, Iterable +from typing import ( + ClassVar, + Generator, + Optional, + Union, + Mapping, + Any, + get_type_hints, + get_origin, + get_args, + Iterable, +) from .typehelpers import UnionType, NoneType -def resolve_type_hint(hint: type, ignore_origins: list[type] = []) -> Iterable[type]: +def resolve_type_hint( + hint: type, ignore_origins: list[type] = [] +) -> Iterable[type]: origin = get_origin(hint) args: Iterable[type] = get_args(hint) if origin in ignore_origins: @@ -46,13 +59,20 @@ def resolve_dict_hints(hints: Any) -> Generator[tuple[Any, ...], None, None]: class DictScheme(Munch): - _type_hints: ClassVar[dict[str, Any]] _strip_hidden: ClassVar[bool] = False _sparse: ClassVar[bool] = False - def __init__(self, d: Mapping = {}, validate: bool = True, allow_extra: bool = False, **kwargs): - self.update(dict(d) | kwargs, validate=validate, allow_extra=allow_extra) + def __init__( + self, + d: Mapping = {}, + validate: bool = True, + allow_extra: bool = False, + **kwargs, + ): + self.update( + dict(d) | kwargs, validate=validate, allow_extra=allow_extra + ) @classmethod def transform( @@ -75,7 +95,9 @@ class DictScheme(Munch): results[key] = None continue if issubclass(_classes[0], dict): - assert isinstance(value, dict) or (optional and value is None), f'{key=} is not dict: {value!r}, {_classes=}' + assert isinstance(value, dict) or ( + optional and value is None + ), f"{key=} is not dict: {value!r}, {_classes=}" target_class = _classes[0] if target_class in [None, NoneType, Optional]: for target in _classes[1:]: @@ -90,8 +112,12 @@ class DictScheme(Munch): raise Exception(msg) logging.warning(msg) if len(dict_hints) == 1 and value is not None: - if len(dict_hints[0]) != 2 or not all(dict_hints[0]): - logging.debug(f"Weird dict hints received: {dict_hints}") + if len(dict_hints[0]) != 2 or not all( + dict_hints[0] + ): + logging.debug( + f"Weird dict hints received: {dict_hints}" + ) continue key_type, value_type = dict_hints[0] if not isinstance(value, Mapping): @@ -111,23 +137,44 @@ class DictScheme(Munch): logging.warning(msg) if validate: for k in value: - if not isinstance(k, tuple(flatten_hints(key_type))): - raise Exception(f'Subdict "{key}": wrong type for subkey "{k}": got: {type(k)}, expected: {key_type}') + if not isinstance( + k, tuple(flatten_hints(key_type)) + ): + raise Exception( + f'Subdict "{key}": wrong type for subkey "{k}": got: {type(k)}, expected: {key_type}' + ) dict_content_hints = {k: value_type for k in value} - value = cls.transform(value, validate=validate, allow_extra=allow_extra, type_hints=dict_content_hints) + value = cls.transform( + value, + validate=validate, + allow_extra=allow_extra, + type_hints=dict_content_hints, + ) if not isinstance(value, target_class): if not (optional and value is None): assert issubclass(target_class, Munch) # despite the above assert, mypy doesn't seem to understand target_class is a Munch here - kwargs = {'validate': validate} if issubclass(target_class, DictScheme) else {} + kwargs = ( + {"validate": validate} + if issubclass(target_class, DictScheme) + else {} + ) value = target_class(value, **kwargs) # type:ignore[attr-defined] else: # print(f"nothing to do: '{key}' was already {target_class}) pass # handle numerics - elif set(_classes).intersection([int, float]) and isinstance(value, str) and str not in _classes: + elif ( + set(_classes).intersection([int, float]) + and isinstance(value, str) + and str not in _classes + ): parsed_number = None - parsers: list[tuple[type, list]] = [(int, [10]), (int, [0]), (float, [])] + parsers: list[tuple[type, list]] = [ + (int, [10]), + (int, [0]), + (float, []), + ] for _cls, args in parsers: if _cls not in _classes: continue @@ -138,15 +185,23 @@ class DictScheme(Munch): continue if parsed_number is None: if validate: - raise Exception(f"Couldn't parse string value {repr(value)} for key '{key}' into number formats: " + - (', '.join(list(c.__name__ for c in _classes)))) + raise Exception( + f"Couldn't parse string value {repr(value)} for key '{key}' into number formats: " + + ( + ", ".join( + list(c.__name__ for c in _classes) + ) + ) + ) else: value = parsed_number if validate: if not isinstance(value, _classes): - raise Exception(f'key "{key}" has value of wrong type! expected: ' - f'{" ,".join([ c.__name__ for c in _classes])}; ' - f'got: {type(value).__name__}; value: {value}') + raise Exception( + f'key "{key}" has value of wrong type! expected: ' + f"{' ,'.join([c.__name__ for c in _classes])}; " + f"got: {type(value).__name__}; value: {value}" + ) elif validate and not allow_extra: logging.debug(f"{cls}: unknown key '{key}': {value}") raise Exception(f'{cls}: Unknown key "{key}"') @@ -156,13 +211,17 @@ class DictScheme(Munch): results[key] = value if values: if validate: - raise Exception(f'values contained unknown keys: {list(values.keys())}') + raise Exception( + f"values contained unknown keys: {list(values.keys())}" + ) results |= values return results @classmethod - def fromDict(cls, values: Mapping[str, Any], validate: bool = True, **kwargs): + def fromDict( + cls, values: Mapping[str, Any], validate: bool = True, **kwargs + ): return cls(d=values, validate=validate, **kwargs) def toDict( @@ -189,7 +248,9 @@ class DictScheme(Munch): ) -> dict[Any, Any]: # preserve original None-type args _sparse = cls._sparse if sparse is None else sparse - _strip_hidden = cls._strip_hidden if strip_hidden is None else strip_hidden + _strip_hidden = ( + cls._strip_hidden if strip_hidden is None else strip_hidden + ) hints = cls._type_hints if hints is None else hints result = dict(d) if not (_strip_hidden or _sparse or result): @@ -202,7 +263,7 @@ class DictScheme(Munch): raise Exception(msg) logging.warning(f"{msg} (skipping)") continue - if _strip_hidden and k.startswith('_'): + if _strip_hidden and k.startswith("_"): result.pop(k) continue if v is None: @@ -220,7 +281,9 @@ class DictScheme(Munch): continue if isinstance(v, DictScheme): # pass None in sparse and strip_hidden - result[k] = v.toDict(strip_hidden=strip_hidden, sparse=sparse) + result[k] = v.toDict( + strip_hidden=strip_hidden, sparse=sparse + ) continue if isinstance(v, Munch): result[k] = v.toDict() @@ -251,15 +314,29 @@ class DictScheme(Munch): ) return result - def update(self, d: Mapping[str, Any], validate: bool = True, allow_extra: bool = False): - Munch.update(self, type(self).transform(d, validate=validate, allow_extra=allow_extra)) + def update( + self, + d: Mapping[str, Any], + validate: bool = True, + allow_extra: bool = False, + ): + Munch.update( + self, + type(self).transform( + d, validate=validate, allow_extra=allow_extra + ), + ) def __init_subclass__(cls): super().__init_subclass__() - cls._type_hints = {name: hint for name, hint in get_type_hints(cls).items() if get_origin(hint) is not ClassVar} + cls._type_hints = { + name: hint + for name, hint in get_type_hints(cls).items() + if get_origin(hint) is not ClassVar + } def __repr__(self): - return f'{type(self)}{dict.__repr__(dict(self))}' + return f"{type(self)}{dict.__repr__(dict(self))}" def toYAML( self, @@ -268,7 +345,8 @@ class DictScheme(Munch): **yaml_args, ) -> str: import yaml - yaml_args = {'sort_keys': False} | yaml_args + + yaml_args = {"sort_keys": False} | yaml_args dumped = yaml.dump( self.toDict(strip_hidden=strip_hidden, sparse=sparse), **yaml_args, @@ -278,10 +356,10 @@ class DictScheme(Munch): return dumped def toToml( - self, - strip_hidden: Optional[bool] = None, - sparse: Optional[bool] = None, - encoder: Optional[TomlEncoder] = TomlPreserveInlineDictEncoder(), + self, + strip_hidden: Optional[bool] = None, + sparse: Optional[bool] = None, + encoder: Optional[TomlEncoder] = TomlPreserveInlineDictEncoder(), ) -> str: return toml.dumps( self.toDict(strip_hidden=strip_hidden, sparse=sparse), diff --git a/src/kupferbootstrap/distro/distro.py b/src/kupferbootstrap/distro/distro.py index 268957b..06091fe 100644 --- a/src/kupferbootstrap/distro/distro.py +++ b/src/kupferbootstrap/distro/distro.py @@ -3,12 +3,25 @@ import logging from enum import IntFlag from typing import Generic, Mapping, Optional, TypeVar -from kupferbootstrap.constants import Arch, ARCHES, REPOSITORIES, KUPFER_BRANCH_MARKER, KUPFER_HTTPS, CHROOT_PATHS +from kupferbootstrap.constants import ( + Arch, + ARCHES, + REPOSITORIES, + KUPFER_BRANCH_MARKER, + KUPFER_HTTPS, + CHROOT_PATHS, +) from kupferbootstrap.generator import generate_pacman_conf_body from kupferbootstrap.config.state import config from .repo import BinaryPackageType, RepoInfo, Repo, LocalRepo, RemoteRepo -from .repo_config import AbstrRepoConfig, BaseDistro, ReposConfigFile, REPOS_CONFIG_DEFAULT, get_repo_config as _get_repo_config +from .repo_config import ( + AbstrRepoConfig, + BaseDistro, + ReposConfigFile, + REPOS_CONFIG_DEFAULT, + get_repo_config as _get_repo_config, +) class DistroLocation(IntFlag): @@ -17,15 +30,17 @@ class DistroLocation(IntFlag): CHROOT = 3 -RepoType = TypeVar('RepoType', bound=Repo) +RepoType = TypeVar("RepoType", bound=Repo) class Distro(Generic[RepoType]): repos: Mapping[str, RepoType] arch: str - def __init__(self, arch: Arch, repo_infos: dict[str, RepoInfo], scan=False): - assert (arch in ARCHES) + def __init__( + self, arch: Arch, repo_infos: dict[str, RepoInfo], scan=False + ): + assert arch in ARCHES self.arch = arch self.repos = dict[str, RepoType]() for repo_name, repo_info in repo_infos.items(): @@ -42,20 +57,37 @@ class Distro(Generic[RepoType]): Repo(**kwargs) def get_packages(self) -> dict[str, BinaryPackageType]: - """ get packages from all repos, semantically overlaying them""" + """get packages from all repos, semantically overlaying them""" results = dict[str, BinaryPackageType]() for repo in list(self.repos.values())[::-1]: assert repo.packages is not None results.update(repo.packages) return results - def repos_config_snippet(self, extra_repos: Mapping[str, RepoInfo] = {}) -> str: + def repos_config_snippet( + self, extra_repos: Mapping[str, RepoInfo] = {} + ) -> str: extras: list[Repo] = [ - Repo(name, url_template=info.url_template, arch=self.arch, options=info.options, scan=False) for name, info in extra_repos.items() + Repo( + name, + url_template=info.url_template, + arch=self.arch, + options=info.options, + scan=False, + ) + for name, info in extra_repos.items() ] - return '\n\n'.join(repo.config_snippet() for repo in (extras + list(self.repos.values()))) + return "\n\n".join( + repo.config_snippet() + for repo in (extras + list(self.repos.values())) + ) - def get_pacman_conf(self, extra_repos: Mapping[str, RepoInfo] = {}, check_space: bool = True, in_chroot: bool = True): + def get_pacman_conf( + self, + extra_repos: Mapping[str, RepoInfo] = {}, + check_space: bool = True, + in_chroot: bool = True, + ): body = generate_pacman_conf_body(self.arch, check_space=check_space) return body + self.repos_config_snippet(extra_repos) @@ -72,20 +104,23 @@ class Distro(Generic[RepoType]): class LocalDistro(Distro[LocalRepo]): - def _create_repo(self, **kwargs) -> LocalRepo: return LocalRepo(**kwargs) class RemoteDistro(Distro[RemoteRepo]): - def _create_repo(self, **kwargs) -> RemoteRepo: return RemoteRepo(**kwargs) def get_kupfer(arch: str, url_template: str, scan: bool = False) -> Distro: - repos = {name: RepoInfo(url_template=url_template, options={'SigLevel': 'Never'}) for name in REPOSITORIES} - remote = not url_template.startswith('file://') + repos = { + name: RepoInfo( + url_template=url_template, options={"SigLevel": "Never"} + ) + for name in REPOSITORIES + } + remote = not url_template.startswith("file://") clss = RemoteDistro if remote else LocalDistro distro = clss( arch=arch, @@ -113,7 +148,9 @@ def reset_distro_caches(): cache.clear() -def get_kupfer_url(url: str = KUPFER_HTTPS, branch: Optional[str] = None) -> str: +def get_kupfer_url( + url: str = KUPFER_HTTPS, branch: Optional[str] = None +) -> str: """gets the repo URL for `branch`, getting branch from config if `None` is passed.""" branch = config.file.pacman.repo_branch if branch is None else branch return url.replace(KUPFER_BRANCH_MARKER, branch) @@ -137,20 +174,29 @@ def get_kupfer_repo_names(local) -> list[str]: return results -def get_RepoInfo(arch: Arch, repo_config: AbstrRepoConfig, default_url: Optional[str]) -> RepoInfo: +def get_RepoInfo( + arch: Arch, repo_config: AbstrRepoConfig, default_url: Optional[str] +) -> RepoInfo: url = repo_config.remote_url or default_url if isinstance(url, dict): if arch not in url and not default_url: - raise Exception(f"Invalid repo config: Architecture {arch} not in remote_url mapping: {url}") + raise Exception( + f"Invalid repo config: Architecture {arch} not in remote_url mapping: {url}" + ) url = url.get(arch, default_url) assert url return RepoInfo( url_template=get_kupfer_url(url), - options=repo_config.get('options', None) or {}, + options=repo_config.get("options", None) or {}, ) -def get_base_distro(arch: Arch, scan: bool = False, unsigned: bool = True, cache_db: bool = True) -> RemoteDistro: +def get_base_distro( + arch: Arch, + scan: bool = False, + unsigned: bool = True, + cache_db: bool = True, +) -> RemoteDistro: base_distros = get_repo_config().base_distros if base_distros is None or arch not in base_distros: base_distros = REPOS_CONFIG_DEFAULT.base_distros @@ -160,8 +206,12 @@ def get_base_distro(arch: Arch, scan: bool = False, unsigned: bool = True, cache repos = {} for repo, repo_config in distro_config.repos.items(): if unsigned: - repo_config['options'] = (repo_config.get('options', None) or {}) | {'SigLevel': 'Never'} - repos[repo] = get_RepoInfo(arch, repo_config, default_url=distro_config.remote_url) + repo_config["options"] = ( + repo_config.get("options", None) or {} + ) | {"SigLevel": "Never"} + repos[repo] = get_RepoInfo( + arch, repo_config, default_url=distro_config.remote_url + ) distro = RemoteDistro(arch=arch, repo_infos=repos, scan=False) if cache_db: @@ -188,16 +238,20 @@ def get_kupfer_distro( remote = True cache = _kupfer_https default_url = repo_config.remote_url or KUPFER_HTTPS - repos = {repo: get_RepoInfo(arch, conf, default_url) for repo, conf in repo_config.repos.items() if not conf.local_only} + repos = { + repo: get_RepoInfo(arch, conf, default_url) + for repo, conf in repo_config.repos.items() + if not conf.local_only + } cls = RemoteDistro elif location in [DistroLocation.CHROOT, DistroLocation.LOCAL]: if location == DistroLocation.CHROOT: cache = _kupfer_local_chroots - pkgdir = CHROOT_PATHS['packages'] + pkgdir = CHROOT_PATHS["packages"] else: assert location == DistroLocation.LOCAL cache = _kupfer_local - pkgdir = config.get_path('packages') + pkgdir = config.get_path("packages") default_url = f"file://{pkgdir}/$arch/$repo" cls = LocalDistro repos = {} @@ -232,13 +286,19 @@ def get_kupfer_distro( return item -def get_kupfer_https(arch: Arch, scan: bool = False, cache_db: bool = True) -> RemoteDistro: - d = get_kupfer_distro(arch, location=DistroLocation.REMOTE, scan=scan, cache_db=cache_db) +def get_kupfer_https( + arch: Arch, scan: bool = False, cache_db: bool = True +) -> RemoteDistro: + d = get_kupfer_distro( + arch, location=DistroLocation.REMOTE, scan=scan, cache_db=cache_db + ) assert isinstance(d, RemoteDistro) return d -def get_kupfer_local(arch: Optional[Arch] = None, scan: bool = False, in_chroot: bool = True) -> LocalDistro: +def get_kupfer_local( + arch: Optional[Arch] = None, scan: bool = False, in_chroot: bool = True +) -> LocalDistro: arch = arch or config.runtime.arch assert arch location = DistroLocation.CHROOT if in_chroot else DistroLocation.LOCAL diff --git a/src/kupferbootstrap/distro/package.py b/src/kupferbootstrap/distro/package.py index c2dc5c8..1709890 100644 --- a/src/kupferbootstrap/distro/package.py +++ b/src/kupferbootstrap/distro/package.py @@ -34,30 +34,34 @@ class BinaryPackage(PackageInfo): self.resolved_url = resolved_url def __repr__(self): - return f'{self.name}@{self.version}' + return f"{self.name}@{self.version}" @classmethod def parse_desc(clss, desc_str: str, resolved_repo_url=None): """Parses a desc file, returning a PackageInfo""" desc: dict[str, Union[str, list[str]]] = {} - for segment in f'\n{desc_str}'.split('\n%'): + for segment in f"\n{desc_str}".split("\n%"): if not segment.strip(): continue - key, elements = (e.strip() for e in segment.strip().split('%\n', 1)) - elements_split = elements.split('\n') - desc[key] = elements if len(elements_split) == 1 else elements_split + key, elements = ( + e.strip() for e in segment.strip().split("%\n", 1) + ) + elements_split = elements.split("\n") + desc[key] = ( + elements if len(elements_split) == 1 else elements_split + ) validated: dict[str, str] = {} - for key in ['NAME', 'VERSION', 'ARCH', 'FILENAME']: + for key in ["NAME", "VERSION", "ARCH", "FILENAME"]: assert key in desc value = desc[key] assert isinstance(value, str) validated[key] = value p = clss( - name=validated['NAME'], - version=validated['VERSION'], - arch=validated['ARCH'], - filename=validated['FILENAME'], - resolved_url='/'.join([resolved_repo_url, validated['FILENAME']]), + name=validated["NAME"], + version=validated["VERSION"], + arch=validated["ARCH"], + filename=validated["FILENAME"], + resolved_url="/".join([resolved_repo_url, validated["FILENAME"]]), ) p._desc = desc return p @@ -67,18 +71,20 @@ class BinaryPackage(PackageInfo): class LocalPackage(BinaryPackage): - def acquire(self) -> str: - assert self.resolved_url and self.filename and self.filename in self.resolved_url - path = f'{self.resolved_url.split("file://")[1]}' + assert ( + self.resolved_url + and self.filename + and self.filename in self.resolved_url + ) + path = f"{self.resolved_url.split('file://')[1]}" assert os.path.exists(path) or print(path) return path class RemotePackage(BinaryPackage): - def acquire(self, dest_dir: Optional[str] = None) -> str: - assert self.resolved_url and '.pkg.tar.' in self.resolved_url + assert self.resolved_url and ".pkg.tar." in self.resolved_url url = f"{self.resolved_url}" assert url @@ -87,7 +93,7 @@ class RemotePackage(BinaryPackage): dest_file_path = os.path.join(dest_dir, self.filename) logging.info(f"Trying to download package {url}") - with urlopen(url) as fsrc, open(dest_file_path, 'wb') as fdst: + with urlopen(url) as fsrc, open(dest_file_path, "wb") as fdst: copyfileobj(fsrc, fdst) logging.info(f"{self.filename} downloaded from repos") return dest_file_path diff --git a/src/kupferbootstrap/distro/repo.py b/src/kupferbootstrap/distro/repo.py index 1a50fa0..0570117 100644 --- a/src/kupferbootstrap/distro/repo.py +++ b/src/kupferbootstrap/distro/repo.py @@ -11,12 +11,12 @@ from kupferbootstrap.utils import download_file from .package import BinaryPackage, LocalPackage, RemotePackage -BinaryPackageType = TypeVar('BinaryPackageType', bound=BinaryPackage) +BinaryPackageType = TypeVar("BinaryPackageType", bound=BinaryPackage) def resolve_url(url_template, repo_name: str, arch: str): result = url_template - for template, replacement in {'$repo': repo_name, '$arch': arch}.items(): + for template, replacement in {"$repo": repo_name, "$arch": arch}.items(): result = result.replace(template, replacement) return result @@ -39,25 +39,29 @@ class Repo(RepoInfo, Generic[BinaryPackageType]): scanned: bool = False def resolve_url(self) -> str: - return resolve_url(self.url_template, repo_name=self.name, arch=self.arch) + return resolve_url( + self.url_template, repo_name=self.name, arch=self.arch + ) def scan(self, allow_failure: bool = False) -> bool: failed = False self.resolved_url = self.resolve_url() - self.remote = not self.resolved_url.startswith('file://') + self.remote = not self.resolved_url.startswith("file://") try: path = self.acquire_db_file() index = tarfile.open(path) except Exception as ex: if not allow_failure: raise ex - logging.error(f"Repo {self.name}, {self.arch}: Error acquiring repo DB: {ex!r}") + logging.error( + f"Repo {self.name}, {self.arch}: Error acquiring repo DB: {ex!r}" + ) return False - logging.debug(f'Parsing repo file at {path}') + logging.debug(f"Parsing repo file at {path}") for node in index.getmembers(): - if os.path.basename(node.name) == 'desc': + if os.path.basename(node.name) == "desc": pkgname = os.path.dirname(node.name) - logging.debug(f'Parsing desc file for {pkgname}') + logging.debug(f"Parsing desc file for {pkgname}") fd = index.extractfile(node) assert fd contents = fd.read().decode() @@ -66,7 +70,9 @@ class Repo(RepoInfo, Generic[BinaryPackageType]): except Exception as ex: if not allow_failure: raise ex - logging.error(f'Repo {self.name}, {self.arch}: Error parsing desc for "{pkgname}": {ex!r}') + logging.error( + f'Repo {self.name}, {self.arch}: Error parsing desc for "{pkgname}": {ex!r}' + ) failed = True continue self.packages[pkg.name] = pkg @@ -75,7 +81,9 @@ class Repo(RepoInfo, Generic[BinaryPackageType]): self.scanned = True return True - def _parse_desc(self, desc_text: str): # can't annotate the type properly :( + def _parse_desc( + self, desc_text: str + ): # can't annotate the type properly :( raise NotImplementedError() def parse_desc(self, desc_text: str) -> BinaryPackageType: @@ -84,7 +92,9 @@ class Repo(RepoInfo, Generic[BinaryPackageType]): def acquire_db_file(self) -> str: raise NotImplementedError - def __init__(self, name: str, url_template: str, arch: str, options={}, scan=False): + def __init__( + self, name: str, url_template: str, arch: str, options={}, scan=False + ): self.packages = {} self.name = name self.url_template = url_template @@ -94,23 +104,26 @@ class Repo(RepoInfo, Generic[BinaryPackageType]): self.scan() def __repr__(self): - return f'' + return f"" def config_snippet(self) -> str: - options = {'Server': self.url_template} | self.options - return ('[%s]\n' % self.name) + '\n'.join([f"{key} = {value}" for key, value in options.items()]) + options = {"Server": self.url_template} | self.options + return ("[%s]\n" % self.name) + "\n".join( + [f"{key} = {value}" for key, value in options.items()] + ) def get_RepoInfo(self): return RepoInfo(url_template=self.url_template, options=self.options) class LocalRepo(Repo[LocalPackage]): - def _parse_desc(self, desc_text: str) -> LocalPackage: - return LocalPackage.parse_desc(desc_text, resolved_repo_url=self.resolved_url) + return LocalPackage.parse_desc( + desc_text, resolved_repo_url=self.resolved_url + ) def acquire_db_file(self) -> str: - return f'{self.resolved_url}/{self.name}.db'.split('file://')[1] + return f"{self.resolved_url}/{self.name}.db".split("file://")[1] class RemoteRepo(Repo[RemotePackage]): @@ -121,14 +134,22 @@ class RemoteRepo(Repo[RemotePackage]): super().__init__(*kargs, **kwargs) def _parse_desc(self, desc_text: str) -> RemotePackage: - return RemotePackage.parse_desc(desc_text, resolved_repo_url=self.resolved_url) + return RemotePackage.parse_desc( + desc_text, resolved_repo_url=self.resolved_url + ) def acquire_db_file(self) -> str: - uri = f'{self.resolved_url}/{self.name}.db' - logging.info(f'Downloading repo file from {uri}') - assert self.arch and self.name, f"repo has incomplete information: {self.name=}, {self.arch=}" - path = get_temp_dir() if not self.cache_repo_db else os.path.join(config.get_path('pacman'), 'repo_dbs', self.arch) + uri = f"{self.resolved_url}/{self.name}.db" + logging.info(f"Downloading repo file from {uri}") + assert self.arch and self.name, ( + f"repo has incomplete information: {self.name=}, {self.arch=}" + ) + path = ( + get_temp_dir() + if not self.cache_repo_db + else os.path.join(config.get_path("pacman"), "repo_dbs", self.arch) + ) os.makedirs(path, exist_ok=True) - repo_file = f'{path}/{self.name}.tar.gz' + repo_file = f"{path}/{self.name}.tar.gz" download_file(repo_file, uri, update=True) return repo_file diff --git a/src/kupferbootstrap/distro/repo_config.py b/src/kupferbootstrap/distro/repo_config.py index a58c40f..de85979 100644 --- a/src/kupferbootstrap/distro/repo_config.py +++ b/src/kupferbootstrap/distro/repo_config.py @@ -9,15 +9,26 @@ from copy import deepcopy from typing import ClassVar, Optional, Mapping, Union from ..config.state import config -from ..constants import Arch, BASE_DISTROS, KUPFER_HTTPS, REPOS_CONFIG_FILE, REPOS_CONFIG_FILE_USER, REPOSITORIES -from ..dictscheme import DictScheme, toml_inline_dicts, TomlPreserveInlineDictEncoder +from ..constants import ( + Arch, + BASE_DISTROS, + KUPFER_HTTPS, + REPOS_CONFIG_FILE, + REPOS_CONFIG_FILE_USER, + REPOSITORIES, +) +from ..dictscheme import ( + DictScheme, + toml_inline_dicts, + TomlPreserveInlineDictEncoder, +) from ..utils import sha256sum -REPOS_KEY = 'repos' -REMOTEURL_KEY = 'remote_url' -LOCALONLY_KEY = 'local_only' -OPTIONS_KEY = 'options' -BASEDISTROS_KEY = 'base_distros' +REPOS_KEY = "repos" +REMOTEURL_KEY = "remote_url" +LOCALONLY_KEY = "local_only" +OPTIONS_KEY = "options" +BASEDISTROS_KEY = "base_distros" _current_config: Optional[ReposConfigFile] @@ -57,14 +68,29 @@ class ReposConfigFile(DictScheme): super().__init__(d=d, allow_extra=allow_extra, **kwargs) self[REPOS_KEY] = self.get(REPOS_KEY, {}) for repo_cls, defaults, repos, remote_url in [ - (RepoConfig, REPO_DEFAULTS, self.get(REPOS_KEY), d.get(REMOTEURL_KEY, None)), - *[(BaseDistroRepo, BASE_DISTRO_DEFAULTS, _distro.repos, _distro.get(REMOTEURL_KEY, None)) for _distro in self.base_distros.values()], + ( + RepoConfig, + REPO_DEFAULTS, + self.get(REPOS_KEY), + d.get(REMOTEURL_KEY, None), + ), + *[ + ( + BaseDistroRepo, + BASE_DISTRO_DEFAULTS, + _distro.repos, + _distro.get(REMOTEURL_KEY, None), + ) + for _distro in self.base_distros.values() + ], ]: if repos is None: continue for name, repo in repos.items(): _repo = dict(defaults | (repo or {})) # type: ignore[operator] - if REMOTEURL_KEY not in repo and not repo.get(LOCALONLY_KEY, None): + if REMOTEURL_KEY not in repo and not repo.get( + LOCALONLY_KEY, None + ): _repo[REMOTEURL_KEY] = remote_url repos[name] = repo_cls(_repo, **kwargs) @@ -74,21 +100,29 @@ class ReposConfigFile(DictScheme): @staticmethod def parse_config(path: str) -> ReposConfigFile: try: - with open(path, 'r') as fd: + with open(path, "r") as fd: data = yaml.safe_load(fd) - data['_path'] = path - data['_checksum'] = sha256sum(path) + data["_path"] = path + data["_checksum"] = sha256sum(path) return ReposConfigFile(data, validate=True) except Exception as ex: logging.error(f'Error parsing repos config at "{path}":\n{ex}') raise ex - def toToml(self, strip_hidden=None, sparse=None, encoder=TomlPreserveInlineDictEncoder()): + def toToml( + self, + strip_hidden=None, + sparse=None, + encoder=TomlPreserveInlineDictEncoder(), + ): d = self.toDict(strip_hidden=strip_hidden, sparse=sparse) for key in [REPOS_KEY]: if key not in d or not isinstance(d[key], Mapping): continue - inline = {name: {k: toml_inline_dicts(v) for k, v in value.items()} for name, value in d[key].items()} + inline = { + name: {k: toml_inline_dicts(v) for k, v in value.items()} + for name, value in d[key].items() + } logging.info(f"Inlined {key}: {inline}") d[key] = inline return toml.dumps(d, encoder=encoder) @@ -97,9 +131,7 @@ class ReposConfigFile(DictScheme): REPO_DEFAULTS = { LOCALONLY_KEY: None, REMOTEURL_KEY: None, - OPTIONS_KEY: { - 'SigLevel': 'Never' - }, + OPTIONS_KEY: {"SigLevel": "Never"}, } BASE_DISTRO_DEFAULTS = { @@ -107,29 +139,26 @@ BASE_DISTRO_DEFAULTS = { OPTIONS_KEY: None, } -REPOS_CONFIG_DEFAULT = ReposConfigFile({ - '_path': '__DEFAULTS__', - '_checksum': None, - REMOTEURL_KEY: KUPFER_HTTPS, - REPOS_KEY: { - 'kupfer_local': REPO_DEFAULTS | { - LOCALONLY_KEY: True +REPOS_CONFIG_DEFAULT = ReposConfigFile( + { + "_path": "__DEFAULTS__", + "_checksum": None, + REMOTEURL_KEY: KUPFER_HTTPS, + REPOS_KEY: { + "kupfer_local": REPO_DEFAULTS | {LOCALONLY_KEY: True}, + **{r: deepcopy(REPO_DEFAULTS) for r in REPOSITORIES}, }, - **{ - r: deepcopy(REPO_DEFAULTS) for r in REPOSITORIES + BASEDISTROS_KEY: { + arch: { + REMOTEURL_KEY: None, + "repos": { + k: {"remote_url": v} for k, v in arch_def["repos"].items() + }, + } + for arch, arch_def in BASE_DISTROS.items() }, - }, - BASEDISTROS_KEY: { - arch: { - REMOTEURL_KEY: None, - 'repos': { - k: { - 'remote_url': v - } for k, v in arch_def['repos'].items() - }, - } for arch, arch_def in BASE_DISTROS.items() - }, -}) + } +) _current_config = None @@ -139,9 +168,13 @@ def get_repo_config( repo_config_file: Optional[str] = None, ) -> tuple[ReposConfigFile, bool]: global _current_config - user_repo_config = os.path.join(config.get_path('pkgbuilds'), REPOS_CONFIG_FILE_USER) + user_repo_config = os.path.join( + config.get_path("pkgbuilds"), REPOS_CONFIG_FILE_USER + ) if repo_config_file is None: - repo_config_file_path = os.path.join(config.get_path('pkgbuilds'), REPOS_CONFIG_FILE) + repo_config_file_path = os.path.join( + config.get_path("pkgbuilds"), REPOS_CONFIG_FILE + ) if os.path.exists(user_repo_config): repo_config_file_path = user_repo_config else: @@ -150,18 +183,28 @@ def get_repo_config( if not config_exists and _current_config is None: if initialize_pkgbuilds: from ..packages.pkgbuild import init_pkgbuilds + init_pkgbuilds(update=False) - return get_repo_config(initialize_pkgbuilds=False, repo_config_file=repo_config_file) + return get_repo_config( + initialize_pkgbuilds=False, repo_config_file=repo_config_file + ) if repo_config_file is not None: - raise Exception(f"Requested repo config {repo_config_file} doesn't exist") - logging.warning(f"{repo_config_file_path} doesn't exist, using built-in repo config defaults") + raise Exception( + f"Requested repo config {repo_config_file} doesn't exist" + ) + logging.warning( + f"{repo_config_file_path} doesn't exist, using built-in repo config defaults" + ) _current_config = deepcopy(REPOS_CONFIG_DEFAULT) return _current_config, False if os.path.exists(user_repo_config): repo_config_file_path = user_repo_config config_exists = True changed = False - if (not _current_config) or (config_exists and _current_config._checksum != sha256sum(repo_config_file_path)): + if (not _current_config) or ( + config_exists + and _current_config._checksum != sha256sum(repo_config_file_path) + ): if config_exists: conf = ReposConfigFile.parse_config(repo_config_file_path) else: diff --git a/src/kupferbootstrap/exec/cmd.py b/src/kupferbootstrap/exec/cmd.py index cf2af75..23cce39 100644 --- a/src/kupferbootstrap/exec/cmd.py +++ b/src/kupferbootstrap/exec/cmd.py @@ -3,7 +3,9 @@ import os import pwd import subprocess -from subprocess import CompletedProcess # make it easy for users of this module +from subprocess import ( + CompletedProcess, +) # make it easy for users of this module from shlex import quote as shell_quote from typing import IO, Optional, Union @@ -20,15 +22,19 @@ ELEVATION_METHOD_DEFAULT = "sudo" ELEVATION_METHODS: dict[ElevationMethod, list[str]] = { "none": [], - "sudo": ['sudo', '--'], + "sudo": ["sudo", "--"], } def generate_env_cmd(env: dict[str, str]): - return ['/usr/bin/env'] + [f'{key}={value}' for key, value in env.items()] + return ["/usr/bin/env"] + [f"{key}={value}" for key, value in env.items()] -def flatten_shell_script(script: Union[list[str], str], shell_quote_items: bool = False, wrap_in_shell_quote=False) -> str: +def flatten_shell_script( + script: Union[list[str], str], + shell_quote_items: bool = False, + wrap_in_shell_quote=False, +) -> str: """ takes a shell-script and returns a flattened string for consumption with `sh -c`. @@ -41,21 +47,33 @@ def flatten_shell_script(script: Union[list[str], str], shell_quote_items: bool if shell_quote_items: cmds = [shell_quote(i) for i in cmds] else: - cmds = [(i if i != '' else '""') for i in cmds] + cmds = [(i if i != "" else '""') for i in cmds] script = " ".join(cmds) if wrap_in_shell_quote: script = shell_quote(script) return script -def wrap_in_bash(cmd: Union[list[str], str], flatten_result=True) -> Union[str, list[str]]: - res: Union[str, list[str]] = ['/bin/bash', '-c', flatten_shell_script(cmd, shell_quote_items=False, wrap_in_shell_quote=False)] +def wrap_in_bash( + cmd: Union[list[str], str], flatten_result=True +) -> Union[str, list[str]]: + res: Union[str, list[str]] = [ + "/bin/bash", + "-c", + flatten_shell_script( + cmd, shell_quote_items=False, wrap_in_shell_quote=False + ), + ] if flatten_result: - res = flatten_shell_script(res, shell_quote_items=True, wrap_in_shell_quote=False) + res = flatten_shell_script( + res, shell_quote_items=True, wrap_in_shell_quote=False + ) return res -def generate_cmd_elevated(cmd: Union[list[str], str], elevation_method: ElevationMethod): +def generate_cmd_elevated( + cmd: Union[list[str], str], elevation_method: ElevationMethod +): "wraps `cmd` in the necessary commands to escalate, e.g. `['sudo', '--', cmd]`." if isinstance(cmd, str): cmd = wrap_in_bash(cmd, flatten_result=False) @@ -79,10 +97,21 @@ def generate_cmd_su( """ current_uid = os.getuid() if pwd.getpwuid(current_uid).pw_name != switch_user or force_su: - if switch_user != 'root' or force_su: - cmd = ['/bin/su', switch_user, '-s', '/bin/bash', '-c', flatten_shell_script(cmd, shell_quote_items=True)] - if current_uid != 0 or force_elevate: # in order to use `/bin/su`, we have to be root first. - cmd = generate_cmd_elevated(cmd, elevation_method or ELEVATION_METHOD_DEFAULT) + if switch_user != "root" or force_su: + cmd = [ + "/bin/su", + switch_user, + "-s", + "/bin/bash", + "-c", + flatten_shell_script(cmd, shell_quote_items=True), + ] + if ( + current_uid != 0 or force_elevate + ): # in order to use `/bin/su`, we have to be root first. + cmd = generate_cmd_elevated( + cmd, elevation_method or ELEVATION_METHOD_DEFAULT + ) return cmd @@ -103,22 +132,26 @@ def run_cmd( env_cmd = [] if env: env_cmd = generate_env_cmd(env) - kwargs['env'] = env + kwargs["env"] = env if not attach_tty: if (stdout, stderr) == (None, None): - kwargs['capture_output'] = capture_output + kwargs["capture_output"] = capture_output else: - for name, fd in {'stdout': stdout, 'stderr': stderr}.items(): + for name, fd in {"stdout": stdout, "stderr": stderr}.items(): if fd is not None: kwargs[name] = fd script = flatten_shell_script(script) if cwd: - kwargs['cwd'] = cwd + kwargs["cwd"] = cwd wrapped_script: list[str] = wrap_in_bash(script, flatten_result=False) # type: ignore cmd = env_cmd + wrapped_script if switch_user: - cmd = generate_cmd_su(cmd, switch_user, elevation_method=elevation_method) - logging.debug(f'Running cmd: "{cmd}"' + (f' (path: {repr(cwd)})' if cwd else '')) + cmd = generate_cmd_su( + cmd, switch_user, elevation_method=elevation_method + ) + logging.debug( + f'Running cmd: "{cmd}"' + (f" (path: {repr(cwd)})" if cwd else "") + ) if attach_tty: return subprocess.call(cmd, **kwargs) else: @@ -126,9 +159,9 @@ def run_cmd( def run_root_cmd(*kargs, **kwargs): - kwargs['switch_user'] = 'root' + kwargs["switch_user"] = "root" return run_cmd(*kargs, **kwargs) def elevation_noop(**kwargs): - run_root_cmd('/bin/true', **kwargs) + run_root_cmd("/bin/true", **kwargs) diff --git a/src/kupferbootstrap/exec/file.py b/src/kupferbootstrap/exec/file.py index a42ec90..62cdad9 100644 --- a/src/kupferbootstrap/exec/file.py +++ b/src/kupferbootstrap/exec/file.py @@ -8,64 +8,86 @@ from shutil import rmtree from tempfile import mkdtemp from typing import Optional, Union -from .cmd import run_cmd, run_root_cmd, elevation_noop, generate_cmd_su, wrap_in_bash, shell_quote +from .cmd import ( + run_cmd, + run_root_cmd, + elevation_noop, + generate_cmd_su, + wrap_in_bash, + shell_quote, +) from kupferbootstrap.utils import get_user_name, get_group_name -def try_native_filewrite(path: str, content: Union[str, bytes], chmod: Optional[str] = None) -> Optional[Exception]: +def try_native_filewrite( + path: str, content: Union[str, bytes], chmod: Optional[str] = None +) -> Optional[Exception]: "try writing with python open(), return None on success, return(!) Exception on failure" - bflag = 'b' if isinstance(content, bytes) else '' + bflag = "b" if isinstance(content, bytes) else "" try: kwargs = {} if chmod: - kwargs['mode'] = chmod + kwargs["mode"] = chmod descriptor = os.open(path, **kwargs) # type: ignore - with open(descriptor, 'w' + bflag) as f: + with open(descriptor, "w" + bflag) as f: f.write(content) except Exception as ex: return ex return None -def chown(path: str, user: Optional[Union[str, int]] = None, group: Optional[Union[str, int]] = None, recursive: bool = False): - owner = '' +def chown( + path: str, + user: Optional[Union[str, int]] = None, + group: Optional[Union[str, int]] = None, + recursive: bool = False, +): + owner = "" if user is not None: owner += get_user_name(user) if group is not None: - owner += f':{get_group_name(group)}' + owner += f":{get_group_name(group)}" if owner: - cmd = ["chown"] + (['-R'] if recursive else []) + cmd = ["chown"] + (["-R"] if recursive else []) result = run_root_cmd(cmd + [owner, path]) assert isinstance(result, subprocess.CompletedProcess) if result.returncode: raise Exception(f"Failed to change owner of '{path}' to '{owner}'") -def chmod(path, mode: Union[int, str] = 0o0755, force_sticky=True, privileged: bool = True): +def chmod( + path, + mode: Union[int, str] = 0o0755, + force_sticky=True, + privileged: bool = True, +): if not isinstance(mode, str): octal = oct(mode)[2:] else: octal = mode assert octal.isnumeric() - octal = octal.rjust(3, '0') + octal = octal.rjust(3, "0") if force_sticky: - octal = octal.rjust(4, '0') + octal = octal.rjust(4, "0") try: os.chmod(path, mode=octal) # type: ignore except: cmd = ["chmod", octal, path] - result = run_cmd(cmd, switch_user='root' if privileged else None) + result = run_cmd(cmd, switch_user="root" if privileged else None) assert isinstance(result, subprocess.CompletedProcess) if result.returncode: raise Exception(f"Failed to set mode of '{path}' to '{chmod}'") def root_check_exists(path): - return os.path.exists(path) or run_root_cmd(['[', '-e', path, ']']).returncode == 0 + return ( + os.path.exists(path) + or run_root_cmd(["[", "-e", path, "]"]).returncode == 0 + ) def root_check_is_dir(path): - return os.path.isdir(path) or run_root_cmd(['[', '-d', path, ']']) + return os.path.isdir(path) or run_root_cmd(["[", "-d", path, "]"]) def write_file( @@ -76,7 +98,7 @@ def write_file( user: Optional[str] = None, group: Optional[str] = None, ): - chmod_mode = '' + chmod_mode = "" chown_user = get_user_name(user) if user else None chown_group = get_group_name(group) if group else None fstat: os.stat_result @@ -97,17 +119,31 @@ def write_file( raise Exception(f"Error writing file {path}, parent dir {reason}") if mode: if not mode.isnumeric(): - raise Exception(f"Unknown file mode '{mode}' (must be numeric): {path}") - if not exists or failed or stat.filemode(int(mode, 8)) != stat.filemode(fstat.st_mode): + raise Exception( + f"Unknown file mode '{mode}' (must be numeric): {path}" + ) + if ( + not exists + or failed + or stat.filemode(int(mode, 8)) != stat.filemode(fstat.st_mode) + ): chmod_mode = mode if not failed: failed = try_native_filewrite(path, content, chmod_mode) is not None if exists or failed: if failed: try: - elevation_noop(attach_tty=True) # avoid password prompt while writing file + elevation_noop( + attach_tty=True + ) # avoid password prompt while writing file logging.debug(f"Writing to {path} using elevated /bin/tee") - cmd: list[str] = generate_cmd_su(wrap_in_bash(f'tee {shell_quote(path)} >/dev/null', flatten_result=False), 'root') # type: ignore + cmd: list[str] = generate_cmd_su( + wrap_in_bash( + f"tee {shell_quote(path)} >/dev/null", + flatten_result=False, + ), + "root", + ) # type: ignore assert isinstance(cmd, list) s = subprocess.Popen( cmd, @@ -117,9 +153,13 @@ def write_file( s.communicate(content) s.wait(300) # 5 minute timeout if s.returncode: - raise Exception(f"Write command excited non-zero: {s.returncode}") + raise Exception( + f"Write command excited non-zero: {s.returncode}" + ) except Exception as ex: - logging.fatal(f"Writing to file '{path}' with elevated privileges failed") + logging.fatal( + f"Writing to file '{path}' with elevated privileges failed" + ) raise ex if chmod_mode: chmod(path, chmod_mode) @@ -128,8 +168,8 @@ def write_file( def root_write_file(*args, **kwargs): - kwargs['user'] = 'root' - kwargs['group'] = 'root' + kwargs["user"] = "root" + kwargs["group"] = "root" return write_file(*args, **kwargs) @@ -138,7 +178,7 @@ def remove_file(path: str, recursive=False): rm = rmtree if recursive else os.unlink rm(path) # type: ignore except: - cmd = ['rm'] + (['-r'] if recursive else []) + [path] + cmd = ["rm"] + (["-r"] if recursive else []) + [path] rc = run_root_cmd(cmd).returncode if rc: raise Exception(f"Unable to remove {path}: cmd returned {rc}") @@ -158,14 +198,14 @@ def makedir( else: os.mkdir(path) except: - run_root_cmd(['mkdir'] + (['-p'] if parents else []) + [path]) + run_root_cmd(["mkdir"] + (["-p"] if parents else []) + [path]) if mode is not None: chmod(path, mode=mode) chown(path, user, group) def root_makedir(path, parents: bool = True): - return makedir(path, user='root', group='root', parents=parents) + return makedir(path, user="root", group="root", parents=parents) def symlink(source, target): @@ -173,10 +213,12 @@ def symlink(source, target): try: os.symlink(source, target) except: - result = run_root_cmd(['ln', '-s', source, target]) + result = run_root_cmd(["ln", "-s", source, target]) assert isinstance(result, subprocess.CompletedProcess) if result.returncode: - raise Exception(f'Symlink creation of {target} pointing at {source} failed') + raise Exception( + f"Symlink creation of {target} pointing at {source} failed" + ) def get_temp_dir(register_cleanup=True, mode: int = 0o0755): diff --git a/src/kupferbootstrap/flavours/cli.py b/src/kupferbootstrap/flavours/cli.py index b983649..bb51fd1 100644 --- a/src/kupferbootstrap/flavours/cli.py +++ b/src/kupferbootstrap/flavours/cli.py @@ -11,19 +11,33 @@ from kupferbootstrap.version.cli import _check_kbs_version from .flavour import get_flavours, get_flavour -profile_option = click.option('-p', '--profile', help="name of the profile to use", required=False, default=None) +profile_option = click.option( + "-p", + "--profile", + help="name of the profile to use", + required=False, + default=None, +) -@click.command(name='flavours') -@click.option('-j', '--json', is_flag=True, help='output machine-parsable JSON format') -@click.option('--output-file', type=click.Path(exists=False, file_okay=True), help="Dump JSON to file") +@click.command(name="flavours") +@click.option( + "-j", "--json", is_flag=True, help="output machine-parsable JSON format" +) +@click.option( + "--output-file", + type=click.Path(exists=False, file_okay=True), + help="Dump JSON to file", +) def cmd_flavours(json: bool = False, output_file: Optional[str] = None): - 'list information about available flavours' + "list information about available flavours" results = [] json_results = {} profile_flavour = None interactive_json = json and not output_file - use_colors = colors_supported(config.runtime.colors) and not interactive_json + use_colors = ( + colors_supported(config.runtime.colors) and not interactive_json + ) profile_name = config.file.profiles.current selected, inherited_from = None, None if output_file: @@ -34,28 +48,40 @@ def cmd_flavours(json: bool = False, output_file: Optional[str] = None): raise Exception("No flavours found!") if not interactive_json: try: - selected, inherited_from = resolve_profile_field(None, profile_name, 'flavour', config.file.profiles) + selected, inherited_from = resolve_profile_field( + None, profile_name, "flavour", config.file.profiles + ) if selected: profile_flavour = get_flavour(selected) except Exception as ex: - logging.debug(f"Failed to get profile flavour for marking as currently selected, continuing anyway. Exception: {ex}") + logging.debug( + f"Failed to get profile flavour for marking as currently selected, continuing anyway. Exception: {ex}" + ) for name in sorted(flavours.keys()): f = flavours[name] try: f.parse_flavourinfo() except Exception as ex: - logging.debug(f"A problem happened while parsing flavourinfo for {name}, continuing anyway. Exception: {ex}") + logging.debug( + f"A problem happened while parsing flavourinfo for {name}, continuing anyway. Exception: {ex}" + ) if not interactive_json: snippet = f.nice_str(newlines=True, colors=use_colors) if profile_flavour == f: - snippet = color_mark_selected(snippet, profile_name or '[unknown]', inherited_from) - snippet += '\n' - results += snippet.split('\n') + snippet = color_mark_selected( + snippet, profile_name or "[unknown]", inherited_from + ) + snippet += "\n" + results += snippet.split("\n") if json: d = dict(f) - d["description"] = f.flavour_info.description if (f.flavour_info and f.flavour_info.description) else f.description + d["description"] = ( + f.flavour_info.description + if (f.flavour_info and f.flavour_info.description) + else f.description + ) if "flavour_info" in d and d["flavour_info"]: - for k in set(d["flavour_info"].keys()) - set(['description']): + for k in set(d["flavour_info"].keys()) - set(["description"]): d[k] = d["flavour_info"][k] del d["flavour_info"] d["pkgbuild"] = f.pkgbuild.path if f.pkgbuild else None @@ -64,7 +90,7 @@ def cmd_flavours(json: bool = False, output_file: Optional[str] = None): json_results[name] = d print() if output_file: - with open(output_file, 'w') as fd: + with open(output_file, "w") as fd: fd.write(json_dump(json_results)) if interactive_json: print(json_dump(json_results, indent=4)) diff --git a/src/kupferbootstrap/flavours/flavour.py b/src/kupferbootstrap/flavours/flavour.py index d3dd89a..d0c6b32 100644 --- a/src/kupferbootstrap/flavours/flavour.py +++ b/src/kupferbootstrap/flavours/flavour.py @@ -7,9 +7,17 @@ import os from typing import Optional from kupferbootstrap.config.state import config -from kupferbootstrap.constants import FLAVOUR_DESCRIPTION_PREFIX, FLAVOUR_INFO_FILE +from kupferbootstrap.constants import ( + FLAVOUR_DESCRIPTION_PREFIX, + FLAVOUR_INFO_FILE, +) from kupferbootstrap.dictscheme import DictScheme -from kupferbootstrap.packages.pkgbuild import discover_pkgbuilds, get_pkgbuild_by_name, init_pkgbuilds, Pkgbuild +from kupferbootstrap.packages.pkgbuild import ( + discover_pkgbuilds, + get_pkgbuild_by_name, + init_pkgbuilds, + Pkgbuild, +) from kupferbootstrap.utils import color_str @@ -18,7 +26,7 @@ class FlavourInfo(DictScheme): description: Optional[str] def __repr__(self): - return f'rootfs_size: {self.rootfs_size}' + return f"rootfs_size: {self.rootfs_size}" class Flavour(DictScheme): @@ -30,16 +38,25 @@ class Flavour(DictScheme): @staticmethod def from_pkgbuild(pkgbuild: Pkgbuild) -> Flavour: name = pkgbuild.name - if not name.startswith('flavour-'): - raise Exception(f'Flavour package "{name}" doesn\'t start with "flavour-": "{name}"') - if name.endswith('-common'): - raise Exception(f'Flavour package "{name}" ends with "-common": "{name}"') + if not name.startswith("flavour-"): + raise Exception( + f'Flavour package "{name}" doesn\'t start with "flavour-": "{name}"' + ) + if name.endswith("-common"): + raise Exception( + f'Flavour package "{name}" ends with "-common": "{name}"' + ) name = name[8:] # split off 'flavour-' description = pkgbuild.description # cut off FLAVOUR_DESCRIPTION_PREFIX if description.lower().startswith(FLAVOUR_DESCRIPTION_PREFIX.lower()): - description = description[len(FLAVOUR_DESCRIPTION_PREFIX):] - return Flavour(name=name, pkgbuild=pkgbuild, description=description.strip(), flavour_info=None) + description = description[len(FLAVOUR_DESCRIPTION_PREFIX) :] + return Flavour( + name=name, + pkgbuild=pkgbuild, + description=description.strip(), + flavour_info=None, + ) def __repr__(self): return f'Flavour<"{self.name}": "{self.description}", package: {self.pkgbuild.name if self.pkgbuild else "??? PROBABLY A BUG!"}{f", {self.flavour_info}" if self.flavour_info else ""}>' @@ -48,16 +65,16 @@ class Flavour(DictScheme): return self.nice_str() def nice_str(self, newlines: bool = False, colors: bool = False) -> str: - separator = '\n' if newlines else ', ' + separator = "\n" if newlines else ", " - def get_lines(k, v, key_prefix=''): + def get_lines(k, v, key_prefix=""): results = [] - full_k = f'{key_prefix}.{k}' if key_prefix else k + full_k = f"{key_prefix}.{k}" if key_prefix else k if not isinstance(v, (dict, DictScheme)): - results = [f'{color_str(full_k, bold=True)}: {v}'] + results = [f"{color_str(full_k, bold=True)}: {v}"] else: for _k, _v in v.items(): - if _k.startswith('_'): + if _k.startswith("_"): continue results += get_lines(_k, _v, key_prefix=full_k) return results @@ -67,16 +84,22 @@ class Flavour(DictScheme): def parse_flavourinfo(self, lazy: bool = True): if lazy and self.flavour_info is not None: return self.flavour_info - infopath = os.path.join(config.get_path('pkgbuilds'), self.pkgbuild.path, FLAVOUR_INFO_FILE) + infopath = os.path.join( + config.get_path("pkgbuilds"), self.pkgbuild.path, FLAVOUR_INFO_FILE + ) if not os.path.exists(infopath): - raise Exception(f"Error parsing flavour info for flavour {self.name}: file doesn't exist: {infopath}") + raise Exception( + f"Error parsing flavour info for flavour {self.name}: file doesn't exist: {infopath}" + ) try: - defaults = {'description': None} - with open(infopath, 'r') as fd: + defaults = {"description": None} + with open(infopath, "r") as fd: infodict = json.load(fd) i = FlavourInfo(**(defaults | infodict)) except Exception as ex: - raise Exception(f"Error parsing {FLAVOUR_INFO_FILE} for flavour {self.name}: {ex}") + raise Exception( + f"Error parsing {FLAVOUR_INFO_FILE} for flavour {self.name}: {ex}" + ) self.flavour_info = i if i.description: self.description = i.description @@ -93,10 +116,12 @@ def get_flavours(lazy: bool = True): return _flavours_cache logging.info("Searching PKGBUILDs for flavour packages") flavours: dict[str, Flavour] = {} - pkgbuilds: dict[str, Pkgbuild] = discover_pkgbuilds(lazy=(lazy or not _flavours_discovered)) + pkgbuilds: dict[str, Pkgbuild] = discover_pkgbuilds( + lazy=(lazy or not _flavours_discovered) + ) for pkg in pkgbuilds.values(): name = pkg.name - if not name.startswith('flavour-') or name.endswith('-common'): + if not name.startswith("flavour-") or name.endswith("-common"): continue name = name[8:] # split off 'flavour-' logging.info(f"Found flavour package {name}") @@ -109,7 +134,7 @@ def get_flavours(lazy: bool = True): def get_flavour(name: str, lazy: bool = True): global _flavours_cache - pkg_name = f'flavour-{name}' + pkg_name = f"flavour-{name}" if lazy and name in _flavours_cache: return _flavours_cache[name] try: @@ -117,7 +142,9 @@ def get_flavour(name: str, lazy: bool = True): init_pkgbuilds() pkg = get_pkgbuild_by_name(pkg_name) except Exception as ex: - raise Exception(f"Error parsing PKGBUILD for flavour package {pkg_name}:\n{ex}") + raise Exception( + f"Error parsing PKGBUILD for flavour package {pkg_name}:\n{ex}" + ) assert pkg and pkg.name == pkg_name flavour = Flavour.from_pkgbuild(pkg) _flavours_cache[name] = flavour diff --git a/src/kupferbootstrap/generator.py b/src/kupferbootstrap/generator.py index f936ada..ccd9a7b 100644 --- a/src/kupferbootstrap/generator.py +++ b/src/kupferbootstrap/generator.py @@ -1,10 +1,18 @@ from typing import Optional -from .constants import Arch, CFLAGS_ARCHES, CFLAGS_GENERAL, COMPILE_ARCHES, GCC_HOSTSPECS +from .constants import ( + Arch, + CFLAGS_ARCHES, + CFLAGS_GENERAL, + COMPILE_ARCHES, + GCC_HOSTSPECS, +) from .config.state import config -def generate_makepkg_conf(arch: Arch, cross: bool = False, chroot: Optional[str] = None) -> str: +def generate_makepkg_conf( + arch: Arch, cross: bool = False, chroot: Optional[str] = None +) -> str: """ Generate a makepkg.conf. For use with crosscompiling, specify `cross=True` and pass as `chroot` the relative path inside the native chroot where the foreign chroot will be mounted. @@ -13,7 +21,9 @@ def generate_makepkg_conf(arch: Arch, cross: bool = False, chroot: Optional[str] hostspec = GCC_HOSTSPECS[config.runtime.arch if cross else arch][arch] cflags = CFLAGS_ARCHES[arch] + CFLAGS_GENERAL if cross and not chroot: - raise Exception('Cross-compile makepkg conf requested but no chroot path given: "{chroot}"') + raise Exception( + 'Cross-compile makepkg conf requested but no chroot path given: "{chroot}"' + ) conf = f''' #!/hint/bash # @@ -57,7 +67,7 @@ CHOST="{hostspec}" # -march (or -mcpu) builds exclusively for an architecture # -mtune optimizes for an architecture, but builds for whole processor family CPPFLAGS="" -CFLAGS="{' '.join(cflags)}" +CFLAGS="{" ".join(cflags)}" CXXFLAGS="$CFLAGS -Wp,-D_GLIBCXX_ASSERTIONS" LDFLAGS="-Wl,-O1,--sort-common,--as-needed,-z,relro,-z,now" #RUSTFLAGS="-C opt-level=2" @@ -119,11 +129,11 @@ STRIP_SHARED="--strip-unneeded" #-- Options to be used when stripping static libraries. See `man strip' for details. STRIP_STATIC="--strip-debug" #-- Manual (man and info) directories to compress (if zipman is specified) -MAN_DIRS=({'{usr{,/local}{,/share},opt/*}/{man,info}'}) +MAN_DIRS=({"{usr{,/local}{,/share},opt/*}/{man,info}"}) #-- Doc directories to remove (if !docs is specified) -DOC_DIRS=({'usr/{,local/}{,share/}{doc,gtk-doc} opt/*/{doc,gtk-doc}'}) +DOC_DIRS=({"usr/{,local/}{,share/}{doc,gtk-doc} opt/*/{doc,gtk-doc}"}) #-- Files to be removed from all packages (if purge is specified) -PURGE_TARGETS=({'usr/{,share}/info/dir .packlist *.pod'}) +PURGE_TARGETS=({"usr/{,share}/info/dir .packlist *.pod"}) #-- Directory to store source code in for debug packages DBGSRCDIR="/usr/src/debug" @@ -176,9 +186,9 @@ SRCEXT='.src.tar.gz' ''' if cross: assert chroot - chroot = chroot.strip('/') - includes = f'-I/usr/{hostspec}/usr/include -I/{chroot}/usr/include' - libs = f'-L/usr/{hostspec}/lib -L/{chroot}/usr/lib' + chroot = chroot.strip("/") + includes = f"-I/usr/{hostspec}/usr/include -I/{chroot}/usr/include" + libs = f"-L/usr/{hostspec}/lib -L/{chroot}/usr/lib" conf += f''' export ARCH="{COMPILE_ARCHES[arch]}" @@ -198,8 +208,12 @@ def generate_pacman_conf_body( check_space: bool = True, in_chroot: bool = True, ): - pacman_cache = f"{config.get_path('pacman')}/{arch}" if not in_chroot else '/var/cache/pacman/pkg' - return f''' + pacman_cache = ( + f"{config.get_path('pacman')}/{arch}" + if not in_chroot + else "/var/cache/pacman/pkg" + ) + return f""" # # /etc/pacman.conf # @@ -234,7 +248,7 @@ Architecture = {arch} #UseSyslog Color #NoProgressBar -{'' if check_space else '#'}CheckSpace +{"" if check_space else "#"}CheckSpace VerbosePkgLists ParallelDownloads = {config.file.pacman.parallel_downloads} @@ -267,4 +281,4 @@ LocalFileSigLevel = Optional # uncommented to enable the repo. # -''' +""" diff --git a/src/kupferbootstrap/image/boot.py b/src/kupferbootstrap/image/boot.py index c36adb7..4c68883 100644 --- a/src/kupferbootstrap/image/boot.py +++ b/src/kupferbootstrap/image/boot.py @@ -5,7 +5,12 @@ import click from typing import Optional from kupferbootstrap.config.state import config -from kupferbootstrap.constants import FLASH_PARTS, FASTBOOT, JUMPDRIVE, JUMPDRIVE_VERSION +from kupferbootstrap.constants import ( + FLASH_PARTS, + FASTBOOT, + JUMPDRIVE, + JUMPDRIVE_VERSION, +) from kupferbootstrap.exec.file import makedir from kupferbootstrap.devices.device import get_profile_device from kupferbootstrap.flavours.flavour import get_profile_flavour @@ -14,26 +19,44 @@ from kupferbootstrap.version.cli import _check_kbs_version from kupferbootstrap.wrapper import enforce_wrap from .fastboot import fastboot_boot, fastboot_erase -from .image import get_device_name, losetup_rootfs_image, get_image_path, dump_aboot, dump_lk2nd +from .image import ( + get_device_name, + losetup_rootfs_image, + get_image_path, + dump_aboot, + dump_lk2nd, +) -LK2ND = FLASH_PARTS['LK2ND'] -ABOOT = FLASH_PARTS['ABOOT'] +LK2ND = FLASH_PARTS["LK2ND"] +ABOOT = FLASH_PARTS["ABOOT"] BOOT_TYPES = [ABOOT, LK2ND, JUMPDRIVE] -@click.command(name='boot') +@click.command(name="boot") @profile_option -@click.argument('type', required=False, default=ABOOT, type=click.Choice(BOOT_TYPES)) -@click.option('-b', '--sector-size', type=int, help="Override the device's sector size", default=None) +@click.argument( + "type", required=False, default=ABOOT, type=click.Choice(BOOT_TYPES) +) @click.option( - '--erase-dtbo/--no-erase-dtbo', + "-b", + "--sector-size", + type=int, + help="Override the device's sector size", + default=None, +) +@click.option( + "--erase-dtbo/--no-erase-dtbo", is_flag=True, default=True, show_default=True, help="Erase the DTBO partition before flashing", ) -@click.option('--confirm', is_flag=True, help="Ask for confirmation before executing fastboot commands") +@click.option( + "--confirm", + is_flag=True, + help="Ask for confirmation before executing fastboot commands", +) def cmd_boot( type: str, profile: Optional[str] = None, @@ -49,7 +72,9 @@ def cmd_boot( deviceinfo = device.parse_deviceinfo() sector_size = sector_size or device.get_image_sectorsize_default() if not sector_size: - raise Exception(f"Device {device.name} has no rootfs_image_sector_size specified") + raise Exception( + f"Device {device.name} has no rootfs_image_sector_size specified" + ) image_path = get_image_path(device, flavour) strategy = deviceinfo.flash_method if not strategy: @@ -57,21 +82,26 @@ def cmd_boot( if strategy == FASTBOOT: if type == JUMPDRIVE: - file = f'boot-{get_device_name(device)}.img' - path = os.path.join(config.get_path('jumpdrive'), file) + file = f"boot-{get_device_name(device)}.img" + path = os.path.join(config.get_path("jumpdrive"), file) makedir(os.path.dirname(path)) if not os.path.exists(path): - urllib.request.urlretrieve(f'https://github.com/dreemurrs-embedded/Jumpdrive/releases/download/{JUMPDRIVE_VERSION}/{file}', path) + urllib.request.urlretrieve( + f"https://github.com/dreemurrs-embedded/Jumpdrive/releases/download/{JUMPDRIVE_VERSION}/{file}", + path, + ) else: loop_device = losetup_rootfs_image(image_path, sector_size) if type == LK2ND: - path = dump_lk2nd(loop_device + 'p1') + path = dump_lk2nd(loop_device + "p1") elif type == ABOOT: - path = dump_aboot(loop_device + 'p1') + path = dump_aboot(loop_device + "p1") else: - raise Exception(f'Unknown boot image type {type}') + raise Exception(f"Unknown boot image type {type}") if erase_dtbo: - fastboot_erase('dtbo', confirm=confirm) + fastboot_erase("dtbo", confirm=confirm) fastboot_boot(path, confirm=confirm) else: - raise Exception(f'Unsupported flash strategy "{strategy}" for device {device.name}') + raise Exception( + f'Unsupported flash strategy "{strategy}" for device {device.name}' + ) diff --git a/src/kupferbootstrap/image/fastboot.py b/src/kupferbootstrap/image/fastboot.py index 165fcbc..f25448d 100644 --- a/src/kupferbootstrap/image/fastboot.py +++ b/src/kupferbootstrap/image/fastboot.py @@ -5,9 +5,14 @@ from kupferbootstrap.exec.cmd import run_cmd, CompletedProcess from typing import Optional -def confirm_cmd(cmd: list[str], color='green', default=True, msg='Really execute fastboot cmd?') -> bool: +def confirm_cmd( + cmd: list[str], + color="green", + default=True, + msg="Really execute fastboot cmd?", +) -> bool: return click.confirm( - f'{click.style(msg, fg=color, bold=True)} {" ".join(cmd)}', + f"{click.style(msg, fg=color, bold=True)} {' '.join(cmd)}", default=default, abort=False, ) @@ -17,12 +22,16 @@ def fastboot_erase(target: str, confirm: bool = False): if not target: raise Exception(f"No fastboot erase target specified: {repr(target)}") cmd = [ - 'fastboot', - 'erase', + "fastboot", + "erase", target, ] if confirm: - if not confirm_cmd(cmd, msg=f'Really erase fastboot "{target}" partition?', color='yellow'): + if not confirm_cmd( + cmd, + msg=f'Really erase fastboot "{target}" partition?', + color="yellow", + ): raise Exception("user aborted") logging.info(f"Fastboot: Erasing {target}") run_cmd( @@ -31,11 +40,16 @@ def fastboot_erase(target: str, confirm: bool = False): ) -def fastboot_flash(partition: str, file: str, sparse_size: Optional[str] = None, confirm: bool = False): +def fastboot_flash( + partition: str, + file: str, + sparse_size: Optional[str] = None, + confirm: bool = False, +): cmd = [ - 'fastboot', - *(['-S', sparse_size] if sparse_size is not None else []), - 'flash', + "fastboot", + *(["-S", sparse_size] if sparse_size is not None else []), + "flash", partition, file, ] @@ -46,13 +60,13 @@ def fastboot_flash(partition: str, file: str, sparse_size: Optional[str] = None, result = run_cmd(cmd) assert isinstance(result, CompletedProcess) if result.returncode != 0: - raise Exception(f'Failed to flash {file}') + raise Exception(f"Failed to flash {file}") def fastboot_boot(file, confirm: bool = False): cmd = [ - 'fastboot', - 'boot', + "fastboot", + "boot", file, ] if confirm: @@ -62,4 +76,4 @@ def fastboot_boot(file, confirm: bool = False): result = run_cmd(cmd) assert isinstance(result, CompletedProcess) if result.returncode != 0: - raise Exception(f'Failed to boot {file} using fastboot') + raise Exception(f"Failed to boot {file} using fastboot") diff --git a/src/kupferbootstrap/image/flash.py b/src/kupferbootstrap/image/flash.py index 1a7142f..a572467 100644 --- a/src/kupferbootstrap/image/flash.py +++ b/src/kupferbootstrap/image/flash.py @@ -5,7 +5,12 @@ import logging from typing import Optional -from kupferbootstrap.constants import FLASH_PARTS, LOCATIONS, FASTBOOT, JUMPDRIVE +from kupferbootstrap.constants import ( + FLASH_PARTS, + LOCATIONS, + FASTBOOT, + JUMPDRIVE, +) from kupferbootstrap.exec.cmd import run_root_cmd from kupferbootstrap.exec.file import get_temp_dir from kupferbootstrap.devices.device import get_profile_device @@ -15,43 +20,61 @@ from kupferbootstrap.version.cli import _check_kbs_version from kupferbootstrap.wrapper import enforce_wrap from .fastboot import fastboot_flash -from .image import dd_image, dump_aboot, dump_lk2nd, dump_qhypstub, get_image_path, losetup_destroy, losetup_rootfs_image, partprobe, shrink_fs +from .image import ( + dd_image, + dump_aboot, + dump_lk2nd, + dump_qhypstub, + get_image_path, + losetup_destroy, + losetup_rootfs_image, + partprobe, + shrink_fs, +) -ABOOT = FLASH_PARTS['ABOOT'] -LK2ND = FLASH_PARTS['LK2ND'] -QHYPSTUB = FLASH_PARTS['QHYPSTUB'] -FULL_IMG = FLASH_PARTS['FULL'] +ABOOT = FLASH_PARTS["ABOOT"] +LK2ND = FLASH_PARTS["LK2ND"] +QHYPSTUB = FLASH_PARTS["QHYPSTUB"] +FULL_IMG = FLASH_PARTS["FULL"] -DD = 'dd' +DD = "dd" FLASH_METHODS = [FASTBOOT, JUMPDRIVE, DD] def find_jumpdrive(location: str) -> str: if location not in LOCATIONS: - raise Exception(f'Invalid location {location}. Choose one of {", ".join(LOCATIONS)}') - dir = '/dev/disk/by-id' + raise Exception( + f"Invalid location {location}. Choose one of {', '.join(LOCATIONS)}" + ) + dir = "/dev/disk/by-id" for file in os.listdir(dir): - sanitized_file = file.replace('-', '').replace('_', '').lower() - if f'jumpdrive{location.split("-")[0]}' in sanitized_file: + sanitized_file = file.replace("-", "").replace("_", "").lower() + if f"jumpdrive{location.split('-')[0]}" in sanitized_file: return os.path.realpath(os.path.join(dir, file)) - raise Exception('Unable to discover Jumpdrive') + raise Exception("Unable to discover Jumpdrive") def test_blockdev(path: str): partprobe(path) - result = run_root_cmd(['lsblk', path, '-o', 'SIZE'], capture_output=True) + result = run_root_cmd(["lsblk", path, "-o", "SIZE"], capture_output=True) if result.returncode != 0: - raise Exception(f'Failed to lsblk {path}') - if result.stdout == b'SIZE\n 0B\n': - raise Exception(f'Disk {path} has a size of 0B. That probably means it is not available (e.g. no' - 'microSD inserted or no microSD card slot installed in the device) or corrupt or defect') + raise Exception(f"Failed to lsblk {path}") + if result.stdout == b"SIZE\n 0B\n": + raise Exception( + f"Disk {path} has a size of 0B. That probably means it is not available (e.g. no" + "microSD inserted or no microSD card slot installed in the device) or corrupt or defect" + ) def prepare_minimal_image(source_path: str, sector_size: int) -> str: minimal_image_dir = get_temp_dir(register_cleanup=True) - minimal_image_path = os.path.join(minimal_image_dir, f'minimal-{os.path.basename(source_path)}') - logging.info(f"Copying image {os.path.basename(source_path)} to {minimal_image_dir} for shrinking") + minimal_image_path = os.path.join( + minimal_image_dir, f"minimal-{os.path.basename(source_path)}" + ) + logging.info( + f"Copying image {os.path.basename(source_path)} to {minimal_image_dir} for shrinking" + ) shutil.copyfile(source_path, minimal_image_path) loop_device = losetup_rootfs_image(minimal_image_path, sector_size) @@ -61,15 +84,33 @@ def prepare_minimal_image(source_path: str, sector_size: int) -> str: return minimal_image_path -@click.command(name='flash') +@click.command(name="flash") @profile_option -@click.option('-m', '--method', type=click.Choice(FLASH_METHODS)) -@click.option('--split-size', help='Chunk size when splitting the image into sparse files via fastboot') -@click.option('--shrink/--no-shrink', is_flag=True, default=True, help="Copy and shrink the image file to minimal size") -@click.option('-b', '--sector-size', type=int, help="Override the device's sector size", default=None) -@click.option('--confirm', is_flag=True, help="Ask for confirmation before executing fastboot commands") -@click.argument('what', type=click.Choice(list(FLASH_PARTS.values()))) -@click.argument('location', type=str, required=False) +@click.option("-m", "--method", type=click.Choice(FLASH_METHODS)) +@click.option( + "--split-size", + help="Chunk size when splitting the image into sparse files via fastboot", +) +@click.option( + "--shrink/--no-shrink", + is_flag=True, + default=True, + help="Copy and shrink the image file to minimal size", +) +@click.option( + "-b", + "--sector-size", + type=int, + help="Override the device's sector size", + default=None, +) +@click.option( + "--confirm", + is_flag=True, + help="Ask for confirmation before executing fastboot commands", +) +@click.argument("what", type=click.Choice(list(FLASH_PARTS.values()))) +@click.argument("location", type=str, required=False) def cmd_flash( what: str, location: str, @@ -101,52 +142,70 @@ def cmd_flash( method = method or deviceinfo.flash_method if what not in FLASH_PARTS.values(): - raise Exception(f'Unknown what "{what}", must be one of {", ".join(FLASH_PARTS.values())}') + raise Exception( + f'Unknown what "{what}", must be one of {", ".join(FLASH_PARTS.values())}' + ) - if location and location.startswith('aboot'): - raise Exception("You're trying to flash something " - f"to your aboot partition ({location!r}), " - "which contains the android bootloader itself.\n" - "This will brick your phone and is not what you want.\n" - 'Aborting.\nDid you mean to flash to "boot"?') + if location and location.startswith("aboot"): + raise Exception( + "You're trying to flash something " + f"to your aboot partition ({location!r}), " + "which contains the android bootloader itself.\n" + "This will brick your phone and is not what you want.\n" + 'Aborting.\nDid you mean to flash to "boot"?' + ) if what == FULL_IMG: - path = '' + path = "" if method not in FLASH_METHODS: raise Exception(f"Flash method {method} not supported!") if not location: - raise Exception(f'You need to specify a location to flash {what} to') - path = '' - image_path = prepare_minimal_image(device_image_path, sector_size) if shrink else device_image_path + raise Exception( + f"You need to specify a location to flash {what} to" + ) + path = "" + image_path = ( + prepare_minimal_image(device_image_path, sector_size) + if shrink + else device_image_path + ) if method == FASTBOOT: fastboot_flash( partition=location, file=image_path, - sparse_size=split_size if split_size is not None else '100M', + sparse_size=split_size if split_size is not None else "100M", confirm=confirm, ) elif method in [JUMPDRIVE, DD]: - if method == DD or location.startswith("/") or (location not in LOCATIONS and os.path.exists(location)): + if ( + method == DD + or location.startswith("/") + or (location not in LOCATIONS and os.path.exists(location)) + ): path = location elif method == JUMPDRIVE: path = find_jumpdrive(location) test_blockdev(path) if dd_image(input=image_path, output=path).returncode != 0: - raise Exception(f'Failed to flash {image_path} to {path}') + raise Exception(f"Failed to flash {image_path} to {path}") else: raise Exception(f'Unhandled flash method "{method}" for "{what}"') else: if method and method != FASTBOOT: - raise Exception(f'Flashing "{what}" with method "{method}" not supported, try no parameter or "{FASTBOOT}"') + raise Exception( + f'Flashing "{what}" with method "{method}" not supported, try no parameter or "{FASTBOOT}"' + ) loop_device = losetup_rootfs_image(device_image_path, sector_size) if what == ABOOT: - path = dump_aboot(f'{loop_device}p1') - fastboot_flash(location or 'boot', path, confirm=confirm) + path = dump_aboot(f"{loop_device}p1") + fastboot_flash(location or "boot", path, confirm=confirm) elif what == LK2ND: - path = dump_lk2nd(f'{loop_device}p1') - fastboot_flash(location or 'lk2nd', path, confirm=confirm) + path = dump_lk2nd(f"{loop_device}p1") + fastboot_flash(location or "lk2nd", path, confirm=confirm) elif what == QHYPSTUB: - path = dump_qhypstub(f'{loop_device}p1') - fastboot_flash(location or 'qhypstub', path, confirm=confirm) + path = dump_qhypstub(f"{loop_device}p1") + fastboot_flash(location or "qhypstub", path, confirm=confirm) else: - raise Exception(f'Unknown what "{what}", this must be a bug in kupferbootstrap!') + raise Exception( + f'Unknown what "{what}", this must be a bug in kupferbootstrap!' + ) diff --git a/src/kupferbootstrap/image/image.py b/src/kupferbootstrap/image/image.py index dbb7150..c031f6c 100644 --- a/src/kupferbootstrap/image/image.py +++ b/src/kupferbootstrap/image/image.py @@ -11,14 +11,28 @@ from typing import Optional, Union from kupferbootstrap.config.state import config, Profile from kupferbootstrap.chroot.device import DeviceChroot, get_device_chroot -from kupferbootstrap.constants import Arch, BASE_LOCAL_PACKAGES, BASE_PACKAGES, POST_INSTALL_CMDS +from kupferbootstrap.constants import ( + Arch, + BASE_LOCAL_PACKAGES, + BASE_PACKAGES, + POST_INSTALL_CMDS, +) from kupferbootstrap.distro.distro import get_base_distro, get_kupfer_https from kupferbootstrap.devices.device import Device, get_profile_device from kupferbootstrap.exec.cmd import run_root_cmd, generate_cmd_su -from kupferbootstrap.exec.file import get_temp_dir, root_write_file, root_makedir, makedir +from kupferbootstrap.exec.file import ( + get_temp_dir, + root_write_file, + root_makedir, + makedir, +) from kupferbootstrap.flavours.flavour import Flavour, get_profile_flavour from kupferbootstrap.net.ssh import copy_ssh_keys -from kupferbootstrap.packages.build import build_enable_qemu_binfmt, build_packages, filter_pkgbuilds +from kupferbootstrap.packages.build import ( + build_enable_qemu_binfmt, + build_packages, + filter_pkgbuilds, +) from kupferbootstrap.version.cli import _check_kbs_version from kupferbootstrap.wrapper import enforce_wrap @@ -27,22 +41,22 @@ IMG_FILE_ROOT_DEFAULT_SIZE = "1800M" IMG_FILE_BOOT_DEFAULT_SIZE = "90M" -def dd_image(input: str, output: str, blocksize='1M') -> CompletedProcess: +def dd_image(input: str, output: str, blocksize="1M") -> CompletedProcess: cmd = [ - 'dd', - f'if={input}', - f'of={output}', - f'bs={blocksize}', - 'oflag=direct', - 'status=progress', - 'conv=sync,noerror', + "dd", + f"if={input}", + f"of={output}", + f"bs={blocksize}", + "oflag=direct", + "status=progress", + "conv=sync,noerror", ] - logging.debug(f'running dd cmd: {cmd}') + logging.debug(f"running dd cmd: {cmd}") return run_root_cmd(cmd) def partprobe(device: str): - return run_root_cmd(['partprobe', device]) + return run_root_cmd(["partprobe", device]) def bytes_to_sectors(b: int, sector_size: int, round_up: bool = True): @@ -53,19 +67,33 @@ def bytes_to_sectors(b: int, sector_size: int, round_up: bool = True): def get_fs_size(partition: str) -> tuple[int, int]: - blocks_cmd = run_root_cmd(['dumpe2fs', '-h', partition], env={"LC_ALL": "C"}, capture_output=True) + blocks_cmd = run_root_cmd( + ["dumpe2fs", "-h", partition], env={"LC_ALL": "C"}, capture_output=True + ) if blocks_cmd.returncode != 0: logging.debug(f"dumpe2fs stdout:\n: {blocks_cmd.stdout}") logging.debug(f"dumpe2fs stderr:\n {blocks_cmd.stderr}") - raise Exception(f'Failed to detect new filesystem size of {partition}') - blocks_text = blocks_cmd.stdout.decode('utf-8') if blocks_cmd.stdout else '' + raise Exception(f"Failed to detect new filesystem size of {partition}") + blocks_text = ( + blocks_cmd.stdout.decode("utf-8") if blocks_cmd.stdout else "" + ) try: - fs_blocks = int(re.search('\\nBlock count:[ ]+([0-9]+)\\n', blocks_text, flags=re.MULTILINE).group(1)) # type: ignore[union-attr] - fs_block_size = int(re.search('\\nBlock size:[ ]+([0-9]+)\\n', blocks_text).group(1)) # type: ignore[union-attr] + fs_blocks = int( + re.search( + "\\nBlock count:[ ]+([0-9]+)\\n", + blocks_text, + flags=re.MULTILINE, + ).group(1) + ) # type: ignore[union-attr] + fs_block_size = int( + re.search("\\nBlock size:[ ]+([0-9]+)\\n", blocks_text).group(1) + ) # type: ignore[union-attr] except Exception as ex: logging.debug(f"dumpe2fs stdout:\n {blocks_text}") logging.debug(f"dumpe2fs stderr:\n: {blocks_cmd.stderr}") - logging.info("Failed to scrape block size and count from dumpe2fs:", ex) + logging.info( + "Failed to scrape block size and count from dumpe2fs:", ex + ) raise ex return fs_blocks, fs_block_size @@ -80,36 +108,46 @@ def align_bytes(size_bytes: int, alignment: int = 4096) -> int: def shrink_fs(loop_device: str, file: str, sector_size: int): partprobe(loop_device) logging.debug(f"Checking filesystem at {loop_device}p2") - result = run_root_cmd(['e2fsck', '-fy', f'{loop_device}p2']) + result = run_root_cmd(["e2fsck", "-fy", f"{loop_device}p2"]) if result.returncode > 2: # https://man7.org/linux/man-pages/man8/e2fsck.8.html#EXIT_CODE - raise Exception(f'Failed to e2fsck {loop_device}p2 with exit code {result.returncode}') + raise Exception( + f"Failed to e2fsck {loop_device}p2 with exit code {result.returncode}" + ) - logging.info(f'Shrinking filesystem at {loop_device}p2') - result = run_root_cmd(['resize2fs', '-M', f'{loop_device}p2']) + logging.info(f"Shrinking filesystem at {loop_device}p2") + result = run_root_cmd(["resize2fs", "-M", f"{loop_device}p2"]) if result.returncode != 0: - raise Exception(f'Failed to resize2fs {loop_device}p2') + raise Exception(f"Failed to resize2fs {loop_device}p2") - logging.debug(f'Reading size of shrunken filesystem on {loop_device}p2') - fs_blocks, fs_block_size = get_fs_size(f'{loop_device}p2') + logging.debug(f"Reading size of shrunken filesystem on {loop_device}p2") + fs_blocks, fs_block_size = get_fs_size(f"{loop_device}p2") sectors = bytes_to_sectors(fs_blocks * fs_block_size, sector_size) - logging.info(f'Shrinking partition at {loop_device}p2 to {sectors} sectors ({sectors * sector_size} bytes)') + logging.info( + f"Shrinking partition at {loop_device}p2 to {sectors} sectors ({sectors * sector_size} bytes)" + ) child_proccess = subprocess.Popen( - generate_cmd_su(['fdisk', '-b', str(sector_size), loop_device], switch_user='root'), # type: ignore + generate_cmd_su( + ["fdisk", "-b", str(sector_size), loop_device], switch_user="root" + ), # type: ignore stdin=subprocess.PIPE, ) - child_proccess.stdin.write('\n'.join([ # type: ignore - 'd', - '2', - 'n', - 'p', - '2', - '', - f'+{sectors}', - 'w', - 'q', - ]).encode('utf-8')) + child_proccess.stdin.write( + "\n".join( + [ # type: ignore + "d", + "2", + "n", + "p", + "2", + "", + f"+{sectors}", + "w", + "q", + ] + ).encode("utf-8") + ) child_proccess.communicate() @@ -118,42 +156,49 @@ def shrink_fs(loop_device: str, file: str, sector_size: int): # For some reason re-reading the partition table fails, but that is not a problem partprobe(loop_device) if returncode > 1: - raise Exception(f'Failed to shrink partition size of {loop_device}p2 with fdisk') + raise Exception( + f"Failed to shrink partition size of {loop_device}p2 with fdisk" + ) partprobe(loop_device).check_returncode() - logging.debug(f'Finding end sector of partition at {loop_device}p2') - result = run_root_cmd(['fdisk', '-b', str(sector_size), '-l', loop_device], capture_output=True) + logging.debug(f"Finding end sector of partition at {loop_device}p2") + result = run_root_cmd( + ["fdisk", "-b", str(sector_size), "-l", loop_device], + capture_output=True, + ) if result.returncode != 0: print(result.stdout) print(result.stderr) - raise Exception(f'Failed to fdisk -l {loop_device}') + raise Exception(f"Failed to fdisk -l {loop_device}") end_sector = 0 - for line in result.stdout.decode('utf-8').split('\n'): - if line.startswith(f'{loop_device}p2'): - parts = list(filter(lambda part: part != '', line.split(' '))) + for line in result.stdout.decode("utf-8").split("\n"): + if line.startswith(f"{loop_device}p2"): + parts = list(filter(lambda part: part != "", line.split(" "))) end_sector = int(parts[2]) if end_sector == 0: - raise Exception(f'Failed to find end sector of {loop_device}p2') + raise Exception(f"Failed to find end sector of {loop_device}p2") end_size = align_bytes((end_sector + 1) * sector_size, 4096) - logging.debug(f'({end_sector} + 1) sectors * {sector_size} bytes/sector = {end_size} bytes') - logging.info(f'Truncating {file} to {end_size} bytes') - result = subprocess.run(['truncate', '-s', str(end_size), file]) + logging.debug( + f"({end_sector} + 1) sectors * {sector_size} bytes/sector = {end_size} bytes" + ) + logging.info(f"Truncating {file} to {end_size} bytes") + result = subprocess.run(["truncate", "-s", str(end_size), file]) if result.returncode != 0: - raise Exception(f'Failed to truncate {file}') + raise Exception(f"Failed to truncate {file}") partprobe(loop_device) def losetup_destroy(loop_device): - logging.debug(f'Destroying loop device {loop_device}') + logging.debug(f"Destroying loop device {loop_device}") run_root_cmd( [ - 'losetup', - '-d', + "losetup", + "-d", loop_device, ], stderr=subprocess.DEVNULL, @@ -170,44 +215,56 @@ def get_flavour_name(flavour: Union[str, Flavour]) -> str: return flavour -def get_image_name(device: Union[str, Device], flavour: Union[str, Flavour], img_type='full') -> str: - return f'{get_device_name(device)}-{get_flavour_name(flavour)}-{img_type}.img' +def get_image_name( + device: Union[str, Device], flavour: Union[str, Flavour], img_type="full" +) -> str: + return ( + f"{get_device_name(device)}-{get_flavour_name(flavour)}-{img_type}.img" + ) -def get_image_path(device: Union[str, Device], flavour: Union[str, Flavour], img_type='full') -> str: - return os.path.join(config.get_path('images'), get_image_name(device, flavour, img_type)) +def get_image_path( + device: Union[str, Device], flavour: Union[str, Flavour], img_type="full" +) -> str: + return os.path.join( + config.get_path("images"), get_image_name(device, flavour, img_type) + ) def losetup_rootfs_image(image_path: str, sector_size: int) -> str: - logging.debug(f'Creating loop device for {image_path} with sector size {sector_size}') - result = run_root_cmd([ - 'losetup', - '-f', - '-b', - str(sector_size), - '-P', - image_path, - ]) + logging.debug( + f"Creating loop device for {image_path} with sector size {sector_size}" + ) + result = run_root_cmd( + [ + "losetup", + "-f", + "-b", + str(sector_size), + "-P", + image_path, + ] + ) if result.returncode != 0: - raise Exception(f'Failed to create loop device for {image_path}') + raise Exception(f"Failed to create loop device for {image_path}") - logging.debug(f'Finding loop device for {image_path}') + logging.debug(f"Finding loop device for {image_path}") - result = subprocess.run(['losetup', '-J'], capture_output=True) + result = subprocess.run(["losetup", "-J"], capture_output=True) if result.returncode != 0: print(result.stdout) print(result.stderr) - raise Exception('Failed to list loop devices') + raise Exception("Failed to list loop devices") - data = json.loads(result.stdout.decode('utf-8')) - loop_device = '' - for d in data['loopdevices']: - if d['back-file'] == image_path: - loop_device = d['name'] + data = json.loads(result.stdout.decode("utf-8")) + loop_device = "" + for d in data["loopdevices"]: + if d["back-file"] == image_path: + loop_device = d["name"] break - if loop_device == '': - raise Exception(f'Failed to find loop device for {image_path}') + if loop_device == "": + raise Exception(f"Failed to find loop device for {image_path}") partprobe(loop_device) atexit.register(losetup_destroy, loop_device) @@ -216,98 +273,133 @@ def losetup_rootfs_image(image_path: str, sector_size: int) -> str: def mount_chroot(rootfs_source: str, boot_src: str, chroot: DeviceChroot): - logging.debug(f'Mounting {rootfs_source} at {chroot.path}') + logging.debug(f"Mounting {rootfs_source} at {chroot.path}") chroot.mount_rootfs(rootfs_source) - assert (os.path.ismount(chroot.path)) + assert os.path.ismount(chroot.path) - root_makedir(chroot.get_path('boot')) + root_makedir(chroot.get_path("boot")) - logging.debug(f'Mounting {boot_src} at {chroot.path}/boot') - chroot.mount(boot_src, '/boot', options=['defaults']) + logging.debug(f"Mounting {boot_src} at {chroot.path}/boot") + chroot.mount(boot_src, "/boot", options=["defaults"]) -def dump_file_from_image(image_path: str, file_path: str, target_path: Optional[str] = None): - target_path = target_path or os.path.join(get_temp_dir(), os.path.basename(file_path)) - result = run_root_cmd([ - 'debugfs', - image_path, - '-R', - f'\'dump /{file_path.lstrip("/")} {target_path}\'', - ]) +def dump_file_from_image( + image_path: str, file_path: str, target_path: Optional[str] = None +): + target_path = target_path or os.path.join( + get_temp_dir(), os.path.basename(file_path) + ) + result = run_root_cmd( + [ + "debugfs", + image_path, + "-R", + f"'dump /{file_path.lstrip('/')} {target_path}'", + ] + ) if result.returncode != 0 or not os.path.exists(target_path): - raise Exception(f'Failed to dump {file_path} from /boot') + raise Exception(f"Failed to dump {file_path} from /boot") return target_path def dump_aboot(image_path: str) -> str: - return dump_file_from_image(image_path, file_path='/aboot.img') + return dump_file_from_image(image_path, file_path="/aboot.img") def dump_lk2nd(image_path: str) -> str: """ This doesn't append the image with the appended DTB which is needed for some devices, so it should get added in the future. """ - return dump_file_from_image(image_path, file_path='/lk2nd.img') + return dump_file_from_image(image_path, file_path="/lk2nd.img") def dump_qhypstub(image_path: str) -> str: - return dump_file_from_image(image_path, file_path='/qhyptstub.img') + return dump_file_from_image(image_path, file_path="/qhyptstub.img") def create_img_file(image_path: str, size_str: str): - result = subprocess.run([ - 'truncate', - '-s', - size_str, - image_path, - ]) + result = subprocess.run( + [ + "truncate", + "-s", + size_str, + image_path, + ] + ) if result.returncode != 0: - raise Exception(f'Failed to allocate {image_path}') + raise Exception(f"Failed to allocate {image_path}") return image_path def partition_device(device: str): - boot_partition_size = '100MiB' - create_partition_table = ['mklabel', 'msdos'] - create_boot_partition = ['mkpart', 'primary', 'ext2', '0%', boot_partition_size] - create_root_partition = ['mkpart', 'primary', boot_partition_size, '100%'] - enable_boot = ['set', '1', 'boot', 'on'] - result = run_root_cmd([ - 'parted', - '--script', - device, - ] + create_partition_table + create_boot_partition + create_root_partition + enable_boot) + boot_partition_size = "100MiB" + create_partition_table = ["mklabel", "msdos"] + create_boot_partition = [ + "mkpart", + "primary", + "ext2", + "0%", + boot_partition_size, + ] + create_root_partition = ["mkpart", "primary", boot_partition_size, "100%"] + enable_boot = ["set", "1", "boot", "on"] + result = run_root_cmd( + [ + "parted", + "--script", + device, + ] + + create_partition_table + + create_boot_partition + + create_root_partition + + enable_boot + ) if result.returncode != 0: - raise Exception(f'Failed to create partitions on {device}') + raise Exception(f"Failed to create partitions on {device}") -def create_filesystem(device: str, blocksize: Optional[int], label=None, options=[], fstype='ext4'): +def create_filesystem( + device: str, + blocksize: Optional[int], + label=None, + options=[], + fstype="ext4", +): """Creates a new filesystem. Blocksize defaults""" - labels = ['-L', label] if label else [] - cmd = [f'mkfs.{fstype}', '-F', *labels] + labels = ["-L", label] if label else [] + cmd = [f"mkfs.{fstype}", "-F", *labels] if blocksize: # blocksize can be 4k max due to pagesize blocksize = min(blocksize, 4096) - if fstype.startswith('ext'): + if fstype.startswith("ext"): # blocksize for ext-fs must be >=1024 blocksize = max(blocksize, 1024) cmd += [ - '-b', + "-b", str(blocksize), ] cmd.append(device) result = run_root_cmd(cmd) if result.returncode != 0: - raise Exception(f'Failed to create {fstype} filesystem on {device} with CMD: {cmd}') + raise Exception( + f"Failed to create {fstype} filesystem on {device} with CMD: {cmd}" + ) def create_root_fs(device: str, blocksize: Optional[int]): - create_filesystem(device, blocksize=blocksize, label='kupfer_root', options=['-O', '^metadata_csum', '-N', '100000']) + create_filesystem( + device, + blocksize=blocksize, + label="kupfer_root", + options=["-O", "^metadata_csum", "-N", "100000"], + ) def create_boot_fs(device: str, blocksize: Optional[int]): - create_filesystem(device, blocksize=blocksize, label='kupfer_boot', fstype='ext2') + create_filesystem( + device, blocksize=blocksize, label="kupfer_boot", fstype="ext2" + ) def install_rootfs( @@ -320,8 +412,14 @@ def install_rootfs( use_local_repos: bool, profile: Profile, ): - user = profile['username'] or 'kupfer' - chroot = get_device_chroot(device=get_device_name(device), flavour=flavour.name, arch=arch, packages=packages, use_local_repos=use_local_repos) + user = profile["username"] or "kupfer" + chroot = get_device_chroot( + device=get_device_name(device), + flavour=flavour.name, + arch=arch, + packages=packages, + use_local_repos=use_local_repos, + ) mount_chroot(rootfs_device, bootfs_device, chroot) @@ -330,90 +428,92 @@ def install_rootfs( chroot.activate() chroot.create_user( user=user, - password=profile['password'], + password=profile["password"], + ) + chroot.add_sudo_config( + config_name="wheel", privilegee="%wheel", password_required=True ) - chroot.add_sudo_config(config_name='wheel', privilegee='%wheel', password_required=True) copy_ssh_keys( chroot, user=user, allow_fail=True, ) files = { - 'etc/pacman.conf': get_base_distro(arch).get_pacman_conf( + "etc/pacman.conf": get_base_distro(arch).get_pacman_conf( check_space=True, extra_repos=get_kupfer_https(arch).repos, in_chroot=True, ), - 'etc/hostname': profile['hostname'] or 'kupfer', + "etc/hostname": profile["hostname"] or "kupfer", } for target, content in files.items(): - root_write_file(os.path.join(chroot.path, target.lstrip('/')), content) + root_write_file(os.path.join(chroot.path, target.lstrip("/")), content) logging.info("Running post-install CMDs") for cmd in POST_INSTALL_CMDS: result = chroot.run_cmd(cmd) assert isinstance(result, subprocess.CompletedProcess) if result.returncode != 0: - raise Exception(f'Error running post-install cmd: {cmd}') + raise Exception(f"Error running post-install cmd: {cmd}") - logging.info('Preparing to unmount chroot') - res = chroot.run_cmd('sync && umount /boot', attach_tty=True) - logging.debug(f'rc: {res}') + logging.info("Preparing to unmount chroot") + res = chroot.run_cmd("sync && umount /boot", attach_tty=True) + logging.debug(f"rc: {res}") chroot.deactivate() logging.debug(f'Unmounting rootfs at "{chroot.path}"') - res = run_root_cmd(['umount', chroot.path]) + res = run_root_cmd(["umount", chroot.path]) assert isinstance(res, CompletedProcess) - logging.debug(f'rc: {res.returncode}') + logging.debug(f"rc: {res.returncode}") -@click.group(name='image') +@click.group(name="image") def cmd_image(): """Build, flash and boot device images""" sectorsize_option = click.option( - '-b', - '--sector-size', + "-b", + "--sector-size", help="Override the device's sector size", type=int, default=None, ) -@cmd_image.command(name='build') -@click.argument('profile_name', required=False) +@cmd_image.command(name="build") +@click.argument("profile_name", required=False) @click.option( - '--local-repos/--no-local-repos', - '-l/-L', - help='Whether to use local package repos at all or only use HTTPS repos.', + "--local-repos/--no-local-repos", + "-l/-L", + help="Whether to use local package repos at all or only use HTTPS repos.", default=True, show_default=True, is_flag=True, ) @click.option( - '--build-pkgs/--no-build-pkgs', - '-p/-P', - help='Whether to build missing/outdated local packages if local repos are enabled.', + "--build-pkgs/--no-build-pkgs", + "-p/-P", + help="Whether to build missing/outdated local packages if local repos are enabled.", default=True, show_default=True, is_flag=True, ) @click.option( - '--no-download-pkgs', - help='Disable trying to download packages instead of building if building is enabled.', + "--no-download-pkgs", + help="Disable trying to download packages instead of building if building is enabled.", default=False, is_flag=True, ) @click.option( - '--block-target', - help='Override the block device file to write the final image to', + "--block-target", + help="Override the block device file to write the final image to", type=click.Path(), default=None, ) @click.option( - '--skip-part-images', - help='Skip creating image files for the partitions and directly work on the target block device.', + "--skip-part-images", + help="Skip creating image files for the partitions and directly work on the target block device.", default=False, is_flag=True, ) @@ -442,9 +542,14 @@ def cmd_build( # check_programs_wrap(['makepkg', 'pacman', 'pacstrap']) profile: Profile = config.get_profile(profile_name) flavour = get_profile_flavour(profile_name) - rootfs_size_mb = flavour.parse_flavourinfo().rootfs_size * 1000 + int(profile.size_extra_mb) + rootfs_size_mb = flavour.parse_flavourinfo().rootfs_size * 1000 + int( + profile.size_extra_mb + ) - packages = BASE_LOCAL_PACKAGES + [device.package.name, flavour.pkgbuild.name] + packages = BASE_LOCAL_PACKAGES + [ + device.package.name, + flavour.pkgbuild.name, + ] packages_extra = BASE_PACKAGES + profile.pkgs_include if arch != config.runtime.arch: @@ -453,9 +558,20 @@ def cmd_build( if local_repos and build_pkgs: logging.info("Making sure all packages are built") # enforce that local base packages are built - pkgbuilds = set(filter_pkgbuilds(packages, arch=arch, allow_empty_results=False, use_paths=False)) + pkgbuilds = set( + filter_pkgbuilds( + packages, arch=arch, allow_empty_results=False, use_paths=False + ) + ) # extra packages might be a mix of package names that are in our PKGBUILDs and packages from the base distro - pkgbuilds |= set(filter_pkgbuilds(packages_extra, arch=arch, allow_empty_results=True, use_paths=False)) + pkgbuilds |= set( + filter_pkgbuilds( + packages_extra, + arch=arch, + allow_empty_results=True, + use_paths=False, + ) + ) build_packages(pkgbuilds, arch, try_download=not no_download_pkgs) sector_size = sector_size or device.get_image_sectorsize() @@ -464,25 +580,31 @@ def cmd_build( makedir(os.path.dirname(image_path)) - logging.info(f'Creating new file at {image_path}') + logging.info(f"Creating new file at {image_path}") create_img_file(image_path, f"{rootfs_size_mb}M") - loop_device = losetup_rootfs_image(image_path, sector_size or device.get_image_sectorsize_default()) + loop_device = losetup_rootfs_image( + image_path, sector_size or device.get_image_sectorsize_default() + ) partition_device(loop_device) partprobe(loop_device) boot_dev: str root_dev: str - loop_boot = loop_device + 'p1' - loop_root = loop_device + 'p2' + loop_boot = loop_device + "p1" + loop_root = loop_device + "p2" if skip_part_images: boot_dev = loop_boot root_dev = loop_root else: - logging.info('Creating per-partition image files') - boot_dev = create_img_file(get_image_path(device, flavour, 'boot'), IMG_FILE_BOOT_DEFAULT_SIZE) - root_dev = create_img_file(get_image_path(device, flavour, 'root'), f'{rootfs_size_mb - 200}M') + logging.info("Creating per-partition image files") + boot_dev = create_img_file( + get_image_path(device, flavour, "boot"), IMG_FILE_BOOT_DEFAULT_SIZE + ) + root_dev = create_img_file( + get_image_path(device, flavour, "root"), f"{rootfs_size_mb - 200}M" + ) create_boot_fs(boot_dev, sector_size) create_root_fs(root_dev, sector_size) @@ -499,20 +621,24 @@ def cmd_build( ) if not skip_part_images: - logging.info('Copying partition image files into full image:') - logging.info(f'Block-copying /boot to {image_path}') + logging.info("Copying partition image files into full image:") + logging.info(f"Block-copying /boot to {image_path}") dd_image(input=boot_dev, output=loop_boot) - logging.info(f'Block-copying rootfs to {image_path}') + logging.info(f"Block-copying rootfs to {image_path}") dd_image(input=root_dev, output=loop_root) - logging.info(f'Done! Image saved to {image_path}') + logging.info(f"Done! Image saved to {image_path}") -@cmd_image.command(name='inspect') -@click.option('--shell', '-s', is_flag=True) +@cmd_image.command(name="inspect") +@click.option("--shell", "-s", is_flag=True) @sectorsize_option -@click.argument('profile', required=False) -def cmd_inspect(profile: Optional[str] = None, shell: bool = False, sector_size: Optional[int] = None): +@click.argument("profile", required=False) +def cmd_inspect( + profile: Optional[str] = None, + shell: bool = False, + sector_size: Optional[int] = None, +): """Loop-mount the device image for inspection.""" config.enforce_profile_device_set() config.enforce_profile_flavour_set() @@ -527,17 +653,17 @@ def cmd_inspect(profile: Optional[str] = None, shell: bool = False, sector_size: image_path = get_image_path(device, flavour) loop_device = losetup_rootfs_image(image_path, sector_size) partprobe(loop_device) - mount_chroot(loop_device + 'p2', loop_device + 'p1', chroot) + mount_chroot(loop_device + "p2", loop_device + "p1", chroot) - logging.info(f'Inspect the rootfs image at {chroot.path}') + logging.info(f"Inspect the rootfs image at {chroot.path}") if shell: chroot.initialized = True chroot.activate() if arch != config.runtime.arch: - logging.info('Installing requisites for foreign-arch shell') + logging.info("Installing requisites for foreign-arch shell") build_enable_qemu_binfmt(arch) - logging.info('Starting inspection shell') - chroot.run_cmd('/bin/bash') + logging.info("Starting inspection shell") + chroot.run_cmd("/bin/bash") else: pause() diff --git a/src/kupferbootstrap/logger.py b/src/kupferbootstrap/logger.py index 63f94b1..3aa1116 100644 --- a/src/kupferbootstrap/logger.py +++ b/src/kupferbootstrap/logger.py @@ -6,14 +6,28 @@ import sys from typing import Optional -def setup_logging(verbose: bool, quiet: bool = False, force_colors: Optional[bool] = None, log_setup: bool = True): - level_colors = coloredlogs.DEFAULT_LEVEL_STYLES | {'info': {'color': 'magenta', 'bright': True}, 'debug': {'color': 'blue', 'bright': True}} - field_colors = coloredlogs.DEFAULT_FIELD_STYLES | {'asctime': {'color': 'white', 'faint': True}} - level = logging.DEBUG if verbose and not quiet else (logging.INFO if not quiet else logging.ERROR) +def setup_logging( + verbose: bool, + quiet: bool = False, + force_colors: Optional[bool] = None, + log_setup: bool = True, +): + level_colors = coloredlogs.DEFAULT_LEVEL_STYLES | { + "info": {"color": "magenta", "bright": True}, + "debug": {"color": "blue", "bright": True}, + } + field_colors = coloredlogs.DEFAULT_FIELD_STYLES | { + "asctime": {"color": "white", "faint": True} + } + level = ( + logging.DEBUG + if verbose and not quiet + else (logging.INFO if not quiet else logging.ERROR) + ) coloredlogs.install( stream=sys.stdout, - fmt='%(asctime)s %(levelname)s: %(message)s', - datefmt='%Y-%m-%d %H:%M:%S', + fmt="%(asctime)s %(levelname)s: %(message)s", + datefmt="%Y-%m-%d %H:%M:%S", level=level, level_styles=level_colors, field_styles=field_colors, @@ -22,28 +36,30 @@ def setup_logging(verbose: bool, quiet: bool = False, force_colors: Optional[boo # don't raise Exceptions when e.g. output stream is closed logging.raiseExceptions = False if log_setup: - logging.debug('Logger: Logging set up.') + logging.debug("Logger: Logging set up.") if force_colors is not None: - logging.debug(f'Logger: Force-{"en" if force_colors else "dis"}abled colors') + logging.debug( + f"Logger: Force-{'en' if force_colors else 'dis'}abled colors" + ) verbose_option = click.option( - '-v', - '--verbose', + "-v", + "--verbose", is_flag=True, - help='Enables verbose logging', + help="Enables verbose logging", ) quiet_option = click.option( - '-q', - '--quiet', + "-q", + "--quiet", is_flag=True, - help='Disable most logging, only log errors. (Currently only affects KBS logging, not called subprograms)', + help="Disable most logging, only log errors. (Currently only affects KBS logging, not called subprograms)", ) color_option = click.option( - '--force-colors/--no-colors', + "--force-colors/--no-colors", is_flag=True, default=None, - help='Force enable/disable log coloring. Defaults to autodetection.', + help="Force enable/disable log coloring. Defaults to autodetection.", ) diff --git a/src/kupferbootstrap/main.py b/src/kupferbootstrap/main.py index 28d4ff8..3011a10 100755 --- a/src/kupferbootstrap/main.py +++ b/src/kupferbootstrap/main.py @@ -7,7 +7,13 @@ from os import isatty from traceback import format_exc, format_exception_only, format_tb from typing import Optional -from .logger import color_option, logging, quiet_option, setup_logging, verbose_option +from .logger import ( + color_option, + logging, + quiet_option, + setup_logging, + verbose_option, +) from .wrapper import get_wrapper_type, enforce_wrap, nowrapper_option from .progressbar import progress_bars_option @@ -24,7 +30,14 @@ from .version.cli import cmd_version @click.group() -@click.option('--error-shell', '-E', 'error_shell', is_flag=True, default=False, help='Spawn shell after error occurs') +@click.option( + "--error-shell", + "-E", + "error_shell", + is_flag=True, + default=False, + help="Spawn shell after error occurs", +) @verbose_option @quiet_option @config_option @@ -49,33 +62,39 @@ def cli( config.runtime.error_shell = error_shell config.try_load_file(config_file) if config.file_state.exception: - logging.warning(f"Config file couldn't be loaded: {config.file_state.exception}") + logging.warning( + f"Config file couldn't be loaded: {config.file_state.exception}" + ) if wrapper_override: - logging.info(f'Force-wrapping in wrapper-type: "{get_wrapper_type()}"!') + logging.info( + f'Force-wrapping in wrapper-type: "{get_wrapper_type()}"!' + ) enforce_wrap() def main(): try: - return cli(prog_name='kupferbootstrap') + return cli(prog_name="kupferbootstrap") except Exception as ex: if config.runtime.verbose: msg = format_exc() else: - tb_start = ''.join(format_tb(ex.__traceback__, limit=1)).strip('\n') - tb_end = ''.join(format_tb(ex.__traceback__, limit=-1)).strip('\n') + tb_start = "".join(format_tb(ex.__traceback__, limit=1)).strip( + "\n" + ) + tb_end = "".join(format_tb(ex.__traceback__, limit=-1)).strip("\n") short_tb = [ - 'Traceback (most recent call last):', + "Traceback (most recent call last):", tb_start, - '[...]', + "[...]", tb_end, format_exception_only(ex)[-1], # type: ignore[arg-type] ] - msg = '\n'.join(short_tb) - logging.fatal('\n' + msg) + msg = "\n".join(short_tb) + logging.fatal("\n" + msg) if config.runtime.error_shell: - logging.info('Starting error shell. Type exit to quit.') - subprocess.call('/bin/bash') + logging.info("Starting error shell. Type exit to quit.") + subprocess.call("/bin/bash") exit(1) @@ -90,5 +109,5 @@ cli.add_command(cmd_net) cli.add_command(cmd_packages) cli.add_command(cmd_version) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/src/kupferbootstrap/net/cli.py b/src/kupferbootstrap/net/cli.py index 656c083..c2be9e6 100644 --- a/src/kupferbootstrap/net/cli.py +++ b/src/kupferbootstrap/net/cli.py @@ -4,6 +4,6 @@ from .forwarding import cmd_forwarding from .ssh import cmd_ssh from .telnet import cmd_telnet -cmd_net = click.Group('net', help='Network utilities like ssh and telnet') +cmd_net = click.Group("net", help="Network utilities like ssh and telnet") for cmd in cmd_forwarding, cmd_ssh, cmd_telnet: cmd_net.add_command(cmd) diff --git a/src/kupferbootstrap/net/forwarding.py b/src/kupferbootstrap/net/forwarding.py index 3dc4171..f9be6c0 100644 --- a/src/kupferbootstrap/net/forwarding.py +++ b/src/kupferbootstrap/net/forwarding.py @@ -7,45 +7,53 @@ from kupferbootstrap.wrapper import check_programs_wrap from .ssh import run_ssh_command -@click.command(name='forwarding') +@click.command(name="forwarding") def cmd_forwarding(): """Enable network forwarding for a usb-attached device""" - check_programs_wrap(['syctl', 'iptables']) + check_programs_wrap(["syctl", "iptables"]) logging.info("Enabling ipv4 forwarding with sysctl") - result = run_root_cmd([ - 'sysctl', - 'net.ipv4.ip_forward=1', - ]) + result = run_root_cmd( + [ + "sysctl", + "net.ipv4.ip_forward=1", + ] + ) if result.returncode != 0: - click.Abort('Failed to enable ipv4 forward via sysctl') + click.Abort("Failed to enable ipv4 forward via sysctl") logging.info("Enabling ipv4 forwarding with iptables") - result = run_root_cmd([ - 'iptables', - '-P', - 'FORWARD', - 'ACCEPT', - ]) + result = run_root_cmd( + [ + "iptables", + "-P", + "FORWARD", + "ACCEPT", + ] + ) if result.returncode != 0: - click.Abort('Failed set iptables rule') + click.Abort("Failed set iptables rule") logging.info("Enabling ipv4 NATting with iptables") - result = run_root_cmd([ - 'iptables', - '-A', - 'POSTROUTING', - '-t', - 'nat', - '-j', - 'MASQUERADE', - '-s', - '172.16.42.0/24', - ]) + result = run_root_cmd( + [ + "iptables", + "-A", + "POSTROUTING", + "-t", + "nat", + "-j", + "MASQUERADE", + "-s", + "172.16.42.0/24", + ] + ) if result.returncode != 0: - click.Abort('Failed set iptables rule') + click.Abort("Failed set iptables rule") logging.info("Setting default route on device via ssh") - result = run_ssh_command(cmd=['sudo -S route add default gw 172.16.42.2'], alloc_tty=True) + result = run_ssh_command( + cmd=["sudo -S route add default gw 172.16.42.2"], alloc_tty=True + ) if result.returncode != 0: - click.Abort('Failed to add gateway over ssh') + click.Abort("Failed to add gateway over ssh") diff --git a/src/kupferbootstrap/net/ssh.py b/src/kupferbootstrap/net/ssh.py index 82501b0..99ee3c6 100644 --- a/src/kupferbootstrap/net/ssh.py +++ b/src/kupferbootstrap/net/ssh.py @@ -5,110 +5,143 @@ import pathlib import click from kupferbootstrap.config.state import config -from kupferbootstrap.constants import SSH_COMMON_OPTIONS, SSH_DEFAULT_HOST, SSH_DEFAULT_PORT +from kupferbootstrap.constants import ( + SSH_COMMON_OPTIONS, + SSH_DEFAULT_HOST, + SSH_DEFAULT_PORT, +) from kupferbootstrap.exec.cmd import run_cmd from kupferbootstrap.exec.file import write_file from kupferbootstrap.chroot.abstract import Chroot from kupferbootstrap.wrapper import check_programs_wrap -@click.command(name='ssh') -@click.argument('cmd', nargs=-1) -@click.option('--user', '-u', help='the SSH username', default=None) -@click.option('--host', '-h', help='the SSH host', default=SSH_DEFAULT_HOST) -@click.option('--port', '-p', help='the SSH port', type=int, default=SSH_DEFAULT_PORT) +@click.command(name="ssh") +@click.argument("cmd", nargs=-1) +@click.option("--user", "-u", help="the SSH username", default=None) +@click.option("--host", "-h", help="the SSH host", default=SSH_DEFAULT_HOST) +@click.option( + "--port", "-p", help="the SSH port", type=int, default=SSH_DEFAULT_PORT +) def cmd_ssh(cmd: list[str], user: str, host: str, port: int): """Establish SSH connection to device""" run_ssh_command(list(cmd), user=user, host=host, port=port, alloc_tty=True) -def run_ssh_command(cmd: list[str] = [], - user: Optional[str] = None, - host: str = SSH_DEFAULT_HOST, - port: int = SSH_DEFAULT_PORT, - alloc_tty: bool = True): - check_programs_wrap(['ssh']) +def run_ssh_command( + cmd: list[str] = [], + user: Optional[str] = None, + host: str = SSH_DEFAULT_HOST, + port: int = SSH_DEFAULT_PORT, + alloc_tty: bool = True, +): + check_programs_wrap(["ssh"]) if not user: - user = config.get_profile()['username'] + user = config.get_profile()["username"] keys = find_ssh_keys() extra_args = [] if len(keys) > 0: - extra_args += ['-i', keys[0]] + extra_args += ["-i", keys[0]] if config.runtime.verbose: - extra_args += ['-v'] + extra_args += ["-v"] if alloc_tty: - extra_args += ['-t'] - hoststr = f'{(user + "@") if user else ""}{host}' - logging.info(f'Opening SSH connection to {hoststr} ({port})') + extra_args += ["-t"] + hoststr = f"{(user + '@') if user else ''}{host}" + logging.info(f"Opening SSH connection to {hoststr} ({port})") logging.debug(f"ssh: trying to run {cmd} on {hoststr}") - full_cmd = [ - 'ssh', - ] + extra_args + SSH_COMMON_OPTIONS + [ - '-p', - str(port), - hoststr, - '--', - ] + cmd + full_cmd = ( + [ + "ssh", + ] + + extra_args + + SSH_COMMON_OPTIONS + + [ + "-p", + str(port), + hoststr, + "--", + ] + + cmd + ) logging.debug(f"running cmd: {full_cmd}") return run_cmd(full_cmd) -def scp_put_files(src: list[str], dst: str, user: Optional[str] = None, host: str = SSH_DEFAULT_HOST, port: int = SSH_DEFAULT_PORT): - check_programs_wrap(['scp']) +def scp_put_files( + src: list[str], + dst: str, + user: Optional[str] = None, + host: str = SSH_DEFAULT_HOST, + port: int = SSH_DEFAULT_PORT, +): + check_programs_wrap(["scp"]) if not user: - user = config.get_profile()['username'] + user = config.get_profile()["username"] keys = find_ssh_keys() key_args = [] if len(keys) > 0: - key_args = ['-i', keys[0]] - cmd = [ - 'scp', - ] + key_args + SSH_COMMON_OPTIONS + [ - '-P', - str(port), - ] + src + [ - f'{user}@{host}:{dst}', - ] + key_args = ["-i", keys[0]] + cmd = ( + [ + "scp", + ] + + key_args + + SSH_COMMON_OPTIONS + + [ + "-P", + str(port), + ] + + src + + [ + f"{user}@{host}:{dst}", + ] + ) logging.info(f"Copying files to {user}@{host}:{dst}:\n{src}") logging.debug(f"running cmd: {cmd}") return run_cmd(cmd) def find_ssh_keys(): - dir = os.path.join(pathlib.Path.home(), '.ssh') + dir = os.path.join(pathlib.Path.home(), ".ssh") if not os.path.exists(dir): return [] keys = [] for file in os.listdir(dir): - if file.startswith('id_') and not file.endswith('.pub'): + if file.startswith("id_") and not file.endswith(".pub"): keys.append(os.path.join(dir, file)) return keys def copy_ssh_keys(chroot: Chroot, user: str, allow_fail: bool = False): - check_programs_wrap(['ssh-keygen']) - ssh_dir_relative = os.path.join('/home', user, '.ssh') + check_programs_wrap(["ssh-keygen"]) + ssh_dir_relative = os.path.join("/home", user, ".ssh") ssh_dir = chroot.get_path(ssh_dir_relative) - authorized_keys_file_rel = os.path.join(ssh_dir_relative, 'authorized_keys') + authorized_keys_file_rel = os.path.join( + ssh_dir_relative, "authorized_keys" + ) authorized_keys_file = chroot.get_path(authorized_keys_file_rel) keys = find_ssh_keys() if len(keys) == 0: logging.warning("Could not find any ssh key to copy") - create = click.confirm("Do you want me to generate an ssh key for you?", True) + create = click.confirm( + "Do you want me to generate an ssh key for you?", True + ) if not create: return - result = run_cmd([ - 'ssh-keygen', - '-f', - os.path.join(pathlib.Path.home(), '.ssh', 'id_ed25519_kupfer'), - '-t', - 'ed25519', - '-C', - 'kupfer', - '-N', - '', - ]) + result = run_cmd( + [ + "ssh-keygen", + "-f", + os.path.join(pathlib.Path.home(), ".ssh", "id_ed25519_kupfer"), + "-t", + "ed25519", + "-C", + "kupfer", + "-N", + "", + ] + ) if result.returncode != 0: # type: ignore logging.fatal("Failed to generate ssh key") keys = find_ssh_keys() @@ -119,12 +152,12 @@ def copy_ssh_keys(chroot: Chroot, user: str, allow_fail: bool = False): auth_key_lines = [] for key in keys: - pub = f'{key}.pub' + pub = f"{key}.pub" if not os.path.exists(pub): - logging.debug(f'Skipping key {key}: {pub} not found') + logging.debug(f"Skipping key {key}: {pub} not found") continue try: - with open(pub, 'r') as file: + with open(pub, "r") as file: contents = file.read() if not contents.strip(): continue @@ -134,13 +167,25 @@ def copy_ssh_keys(chroot: Chroot, user: str, allow_fail: bool = False): continue if not os.path.exists(ssh_dir): - logging.info(f"Creating {ssh_dir_relative!r} dir in chroot {chroot.path!r}") - chroot.run_cmd(["mkdir", "-p", "-m", "700", ssh_dir_relative], switch_user=user) + logging.info( + f"Creating {ssh_dir_relative!r} dir in chroot {chroot.path!r}" + ) + chroot.run_cmd( + ["mkdir", "-p", "-m", "700", ssh_dir_relative], switch_user=user + ) logging.info(f"Writing SSH pub keys to {authorized_keys_file}") try: - write_file(authorized_keys_file, "\n".join(auth_key_lines), user=str(chroot.get_uid(user)), mode="644") + write_file( + authorized_keys_file, + "\n".join(auth_key_lines), + user=str(chroot.get_uid(user)), + mode="644", + ) except Exception as ex: - logging.error(f"Failed to write SSH authorized_keys_file at {authorized_keys_file!r}:", exc_info=ex) + logging.error( + f"Failed to write SSH authorized_keys_file at {authorized_keys_file!r}:", + exc_info=ex, + ) if allow_fail: return raise ex from ex diff --git a/src/kupferbootstrap/net/telnet.py b/src/kupferbootstrap/net/telnet.py index 17e8985..415c486 100644 --- a/src/kupferbootstrap/net/telnet.py +++ b/src/kupferbootstrap/net/telnet.py @@ -3,11 +3,13 @@ import click from kupferbootstrap.wrapper import check_programs_wrap -@click.command(name='telnet') -def cmd_telnet(hostname: str = '172.16.42.1'): +@click.command(name="telnet") +def cmd_telnet(hostname: str = "172.16.42.1"): """Establish Telnet connection to device (e.g in debug-initramfs)""" - check_programs_wrap('telnet') - subprocess.run([ - 'telnet', - hostname, - ]) + check_programs_wrap("telnet") + subprocess.run( + [ + "telnet", + hostname, + ] + ) diff --git a/src/kupferbootstrap/packages/build.py b/src/kupferbootstrap/packages/build.py index 4274abf..2ed1af1 100644 --- a/src/kupferbootstrap/packages/build.py +++ b/src/kupferbootstrap/packages/build.py @@ -10,26 +10,44 @@ from urllib.error import HTTPError from typing import Iterable, Iterator, Optional from kupferbootstrap.binfmt.binfmt import binfmt_is_registered, binfmt_register -from kupferbootstrap.constants import CROSSDIRECT_PKGS, QEMU_BINFMT_PKGS, GCC_HOSTSPECS, ARCHES, Arch, CHROOT_PATHS, MAKEPKG_CMD +from kupferbootstrap.constants import ( + CROSSDIRECT_PKGS, + QEMU_BINFMT_PKGS, + GCC_HOSTSPECS, + ARCHES, + Arch, + CHROOT_PATHS, + MAKEPKG_CMD, +) from kupferbootstrap.config.state import config from kupferbootstrap.exec.cmd import run_cmd, run_root_cmd from kupferbootstrap.exec.file import makedir, remove_file, symlink from kupferbootstrap.chroot.build import get_build_chroot, BuildChroot -from kupferbootstrap.distro.distro import get_kupfer_https, get_kupfer_local, get_kupfer_repo_names +from kupferbootstrap.distro.distro import ( + get_kupfer_https, + get_kupfer_local, + get_kupfer_repo_names, +) from kupferbootstrap.distro.package import RemotePackage, LocalPackage from kupferbootstrap.distro.repo import LocalRepo from kupferbootstrap.progressbar import BAR_PADDING, get_levels_bar from kupferbootstrap.wrapper import check_programs_wrap, is_wrapped from kupferbootstrap.utils import ellipsize, sha256sum -from .pkgbuild import discover_pkgbuilds, filter_pkgbuilds, Pkgbase, Pkgbuild, SubPkgbuild +from .pkgbuild import ( + discover_pkgbuilds, + filter_pkgbuilds, + Pkgbase, + Pkgbuild, + SubPkgbuild, +) pacman_cmd = [ - 'pacman', - '-Syuu', - '--noconfirm', - '--overwrite=*', - '--needed', + "pacman", + "-Syuu", + "--noconfirm", + "--overwrite=*", + "--needed", ] @@ -38,15 +56,15 @@ def get_makepkg_env(arch: Optional[Arch] = None): threads = config.file.build.threads or multiprocessing.cpu_count() # env = {key: val for key, val in os.environ.items() if not key.split('_', maxsplit=1)[0] in ['CI', 'GITLAB', 'FF']} env = { - 'LANG': 'C', - 'CARGO_BUILD_JOBS': str(threads), - 'MAKEFLAGS': f"-j{threads}", - 'PATH': '/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin', + "LANG": "C", + "CARGO_BUILD_JOBS": str(threads), + "MAKEFLAGS": f"-j{threads}", + "PATH": "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", } native = config.runtime.arch assert native if arch and arch != native: - env |= {'QEMU_LD_PREFIX': f'/usr/{GCC_HOSTSPECS[native][arch]}'} + env |= {"QEMU_LD_PREFIX": f"/usr/{GCC_HOSTSPECS[native][arch]}"} return env @@ -55,18 +73,20 @@ def init_local_repo(repo: str, arch: Arch): if not os.path.exists(repo_dir): logging.info(f'Creating local repo "{repo}" ({arch})') makedir(repo_dir) - for ext in ['db', 'files']: - filename_stripped = f'{repo}.{ext}' - filename = f'{filename_stripped}.tar.xz' + for ext in ["db", "files"]: + filename_stripped = f"{repo}.{ext}" + filename = f"{filename_stripped}.tar.xz" if not os.path.exists(os.path.join(repo_dir, filename)): - logging.info(f'Initialising local repo {f"{ext} " if ext != "db" else ""}db for repo "{repo}" ({arch})') + logging.info( + f'Initialising local repo {f"{ext} " if ext != "db" else ""}db for repo "{repo}" ({arch})' + ) result = run_cmd( [ - 'tar', - '-czf', + "tar", + "-czf", filename, - '-T', - '/dev/null', + "-T", + "/dev/null", ], cwd=os.path.join(repo_dir), ) @@ -82,13 +102,15 @@ def init_local_repo(repo: str, arch: Arch): def init_prebuilts(arch: Arch): """Ensure that all `constants.REPOSITORIES` inside `dir` exist""" - prebuilts_dir = config.get_path('packages') + prebuilts_dir = config.get_path("packages") makedir(prebuilts_dir) for repo in get_kupfer_repo_names(local=True): init_local_repo(repo, arch) -def generate_dependency_chain(package_repo: dict[str, Pkgbuild], to_build: Iterable[Pkgbuild]) -> list[set[Pkgbuild]]: +def generate_dependency_chain( + package_repo: dict[str, Pkgbuild], to_build: Iterable[Pkgbuild] +) -> list[set[Pkgbuild]]: """ This figures out all dependencies and their sub-dependencies for the selection and adds those packages to the selection. First the top-level packages get selected by searching the paths. @@ -109,7 +131,9 @@ def generate_dependency_chain(package_repo: dict[str, Pkgbuild], to_build: Itera result[pkg] = i return result - def get_dependencies(package: Pkgbuild, package_repo: dict[str, Pkgbuild] = package_repo) -> Iterator[Pkgbuild]: + def get_dependencies( + package: Pkgbuild, package_repo: dict[str, Pkgbuild] = package_repo + ) -> Iterator[Pkgbuild]: for dep_name in package.depends: if dep_name in visited_names: continue @@ -118,21 +142,25 @@ def generate_dependency_chain(package_repo: dict[str, Pkgbuild], to_build: Itera visit(dep_pkg) yield dep_pkg - def get_recursive_dependencies(package: Pkgbuild, package_repo: dict[str, Pkgbuild] = package_repo) -> Iterator[Pkgbuild]: + def get_recursive_dependencies( + package: Pkgbuild, package_repo: dict[str, Pkgbuild] = package_repo + ) -> Iterator[Pkgbuild]: for pkg in get_dependencies(package, package_repo): yield pkg for sub_pkg in get_recursive_dependencies(pkg, package_repo): yield sub_pkg - logging.debug('Generating dependency chain:') + logging.debug("Generating dependency chain:") # init level 0 for package in to_build: visit(package) dep_levels[0].add(package) - logging.debug(f'Adding requested package {package.name}') + logging.debug(f"Adding requested package {package.name}") # add dependencies of our requested builds to level 0 for dep_pkg in get_recursive_dependencies(package): - logging.debug(f"Adding {package.name}'s dependency {dep_pkg.name} to level 0") + logging.debug( + f"Adding {package.name}'s dependency {dep_pkg.name} to level 0" + ) dep_levels[0].add(dep_pkg) visit(dep_pkg) """ @@ -148,9 +176,11 @@ def generate_dependency_chain(package_repo: dict[str, Pkgbuild], to_build: Itera while dep_levels[level]: level_copy = dep_levels[level].copy() modified = False - logging.debug(f'Scanning dependency level {level}') + logging.debug(f"Scanning dependency level {level}") if level > 100: - raise Exception('Dependency chain reached 100 levels depth, this is probably a bug. Aborting!') + raise Exception( + "Dependency chain reached 100 levels depth, this is probably a bug. Aborting!" + ) for pkg in level_copy: pkg_done = False @@ -164,12 +194,14 @@ def generate_dependency_chain(package_repo: dict[str, Pkgbuild], to_build: Itera if pkg_done: break if not issubclass(type(other_pkg), Pkgbuild): - raise Exception('Not a Pkgbuild object:' + repr(other_pkg)) + raise Exception("Not a Pkgbuild object:" + repr(other_pkg)) for dep_name in other_pkg.depends: if dep_name in pkg.names(): dep_levels[level].remove(pkg) dep_levels[level + 1].add(pkg) - logging.debug(f'Moving {pkg.name} to level {level+1} because {other_pkg.name} depends on it as {dep_name}') + logging.debug( + f"Moving {pkg.name} to level {level + 1} because {other_pkg.name} depends on it as {dep_name}" + ) modified = True pkg_done = True break @@ -178,7 +210,9 @@ def generate_dependency_chain(package_repo: dict[str, Pkgbuild], to_build: Itera continue elif dep_name in package_repo: dep_pkg = package_repo[dep_name] - logging.debug(f"Adding {pkg.name}'s dependency {dep_name} to level {level}") + logging.debug( + f"Adding {pkg.name}'s dependency {dep_name} to level {level}" + ) dep_levels[level].add(dep_pkg) visit(dep_pkg) modified = True @@ -188,7 +222,9 @@ def generate_dependency_chain(package_repo: dict[str, Pkgbuild], to_build: Itera else: repeat_count = 0 if repeat_count > 10: - raise Exception(f'Probable dependency cycle detected: Level has been passed on unmodifed multiple times: #{level}: {_last_level}') + raise Exception( + f"Probable dependency cycle detected: Level has been passed on unmodifed multiple times: #{level}: {_last_level}" + ) _last_level = dep_levels[level].copy() if not modified: # if the level was modified, make another pass. level += 1 @@ -197,16 +233,18 @@ def generate_dependency_chain(package_repo: dict[str, Pkgbuild], to_build: Itera return list([lvl for lvl in dep_levels[::-1] if lvl]) -def add_file_to_repo(file_path: str, repo_name: str, arch: Arch, remove_original: bool = True): - check_programs_wrap(['repo-add']) +def add_file_to_repo( + file_path: str, repo_name: str, arch: Arch, remove_original: bool = True +): + check_programs_wrap(["repo-add"]) repo_dir = os.path.join(config.get_package_dir(arch), repo_name) - pacman_cache_dir = os.path.join(config.get_path('pacman'), arch) + pacman_cache_dir = os.path.join(config.get_path("pacman"), arch) file_name = os.path.basename(file_path) target_file = os.path.join(repo_dir, file_name) init_local_repo(repo_name, arch) if file_path != target_file: - logging.debug(f'moving {file_path} to {target_file} ({repo_dir})') + logging.debug(f"moving {file_path} to {target_file} ({repo_dir})") shutil.copy( file_path, repo_dir, @@ -220,62 +258,70 @@ def add_file_to_repo(file_path: str, repo_name: str, arch: Arch, remove_original logging.debug(f"Removing cached package file {cache_file}") remove_file(cache_file) cmd = [ - 'repo-add', - '--remove', + "repo-add", + "--remove", os.path.join( repo_dir, - f'{repo_name}.db.tar.xz', + f"{repo_name}.db.tar.xz", ), target_file, ] - logging.debug(f'repo: running cmd: {cmd}') + logging.debug(f"repo: running cmd: {cmd}") result = run_cmd(cmd, stderr=sys.stdout) assert isinstance(result, subprocess.CompletedProcess) if result.returncode != 0: - raise Exception(f'Failed add package {target_file} to repo {repo_name}') - for ext in ['db', 'files']: - old = os.path.join(repo_dir, f'{repo_name}.{ext}.tar.xz.old') + raise Exception( + f"Failed add package {target_file} to repo {repo_name}" + ) + for ext in ["db", "files"]: + old = os.path.join(repo_dir, f"{repo_name}.{ext}.tar.xz.old") if os.path.exists(old): remove_file(old) def strip_compression_extension(filename: str): - for ext in ['zst', 'xz', 'gz', 'bz2']: - if filename.endswith(f'.pkg.tar.{ext}'): - return filename[:-(len(ext) + 1)] + for ext in ["zst", "xz", "gz", "bz2"]: + if filename.endswith(f".pkg.tar.{ext}"): + return filename[: -(len(ext) + 1)] logging.debug(f"file {filename} matches no known package extension") return filename def add_package_to_repo(package: Pkgbuild, arch: Arch): - logging.info(f'Adding {package.path} to repo {package.repo}') - pkgbuild_dir = os.path.join(config.get_path('pkgbuilds'), package.path) + logging.info(f"Adding {package.path} to repo {package.repo}") + pkgbuild_dir = os.path.join(config.get_path("pkgbuilds"), package.path) files = [] for file in os.listdir(pkgbuild_dir): # Forced extension by makepkg.conf - pkgext = '.pkg.tar' + pkgext = ".pkg.tar" if pkgext not in file: continue stripped_name = strip_compression_extension(file) if not stripped_name.endswith(pkgext): continue - repo_file = os.path.join(config.get_package_dir(arch), package.repo, file) + repo_file = os.path.join( + config.get_package_dir(arch), package.repo, file + ) files.append(repo_file) add_file_to_repo(os.path.join(pkgbuild_dir, file), package.repo, arch) # copy any-arch packages to other repos as well - if stripped_name.endswith(f'-any{pkgext}'): + if stripped_name.endswith(f"-any{pkgext}"): for repo_arch in ARCHES: if repo_arch == arch: continue # done already - add_file_to_repo(repo_file, package.repo, repo_arch, remove_original=False) + add_file_to_repo( + repo_file, package.repo, repo_arch, remove_original=False + ) return files -def try_download_package(dest_file_path: str, package: Pkgbuild, arch: Arch) -> Optional[str]: +def try_download_package( + dest_file_path: str, package: Pkgbuild, arch: Arch +) -> Optional[str]: filename = os.path.basename(dest_file_path) logging.debug(f"checking if we can download {filename}") pkgname = package.name @@ -286,30 +332,50 @@ def try_download_package(dest_file_path: str, package: Pkgbuild, arch: Arch) -> return None repo = repos[repo_name] if pkgname not in repo.packages: - logging.warning(f"Package {pkgname} not found in remote repos, building instead.") + logging.warning( + f"Package {pkgname} not found in remote repos, building instead." + ) return None repo_pkg: RemotePackage = repo.packages[pkgname] if repo_pkg.version != package.version: - logging.debug(f"Package {pkgname} versions differ: local: {package.version}, " - f"remote: {repo_pkg.version}. Building instead.") + logging.debug( + f"Package {pkgname} versions differ: local: {package.version}, " + f"remote: {repo_pkg.version}. Building instead." + ) return None if repo_pkg.filename != filename: versions_str = f"local: {filename}, remote: {repo_pkg.filename}" - if strip_compression_extension(repo_pkg.filename) != strip_compression_extension(filename): + if strip_compression_extension( + repo_pkg.filename + ) != strip_compression_extension(filename): logging.debug(f"package filenames don't match: {versions_str}") return None - logging.debug(f"ignoring compression extension difference: {versions_str}") - cache_file = os.path.join(config.get_path('pacman'), arch, repo_pkg.filename) + logging.debug( + f"ignoring compression extension difference: {versions_str}" + ) + cache_file = os.path.join( + config.get_path("pacman"), arch, repo_pkg.filename + ) if os.path.exists(cache_file): - if not repo_pkg._desc or 'SHA256SUM' not in repo_pkg._desc: + if not repo_pkg._desc or "SHA256SUM" not in repo_pkg._desc: cache_matches = False extra_msg = ". However, we can't validate it, as the https repo doesnt provide a SHA256SUM for it." else: - cache_matches = sha256sum(cache_file) == repo_pkg._desc['SHA256SUM'] - extra_msg = (". However its checksum doesn't match." if not cache_matches else " and its checksum matches.") - logging.debug(f"While checking the HTTPS repo DB, we found a matching filename in the pacman cache{extra_msg}") + cache_matches = ( + sha256sum(cache_file) == repo_pkg._desc["SHA256SUM"] + ) + extra_msg = ( + ". However its checksum doesn't match." + if not cache_matches + else " and its checksum matches." + ) + logging.debug( + f"While checking the HTTPS repo DB, we found a matching filename in the pacman cache{extra_msg}" + ) if cache_matches: - logging.info(f'copying cache file {cache_file} to repo as verified by remote checksum') + logging.info( + f"copying cache file {cache_file} to repo as verified by remote checksum" + ) shutil.copy(cache_file, dest_file_path) remove_file(cache_file) return dest_file_path @@ -321,9 +387,13 @@ def try_download_package(dest_file_path: str, package: Pkgbuild, arch: Arch) -> return path except HTTPError as e: if e.code == 404: - logging.debug(f"remote package {filename} missing on server: {url}") + logging.debug( + f"remote package {filename} missing on server: {url}" + ) else: - logging.error(f"remote package {filename} failed to download ({e.code}): {url}: {e}") + logging.error( + f"remote package {filename} failed to download ({e.code}): {url}: {e}" + ) return None @@ -333,7 +403,9 @@ def check_package_version_built( try_download: bool = False, refresh_sources: bool = False, ) -> bool: - logging.info(f"Checking if {package.name} is built for architecture {arch}") + logging.info( + f"Checking if {package.name} is built for architecture {arch}" + ) if refresh_sources: setup_sources(package) @@ -342,11 +414,13 @@ def check_package_version_built( filename = package.get_filename(arch) filename_stripped = strip_compression_extension(filename) local_repo: Optional[LocalRepo] = None - if not filename_stripped.endswith('.pkg.tar'): - raise Exception(f'{package.name}: stripped filename has unknown extension. {filename}') - logging.debug(f'Checking if {filename_stripped} is built') + if not filename_stripped.endswith(".pkg.tar"): + raise Exception( + f"{package.name}: stripped filename has unknown extension. {filename}" + ) + logging.debug(f"Checking if {filename_stripped} is built") - any_arch = filename_stripped.endswith('any.pkg.tar') + any_arch = filename_stripped.endswith("any.pkg.tar") if any_arch: logging.debug(f"{package.name}: any-arch pkg detected") @@ -363,36 +437,56 @@ def check_package_version_built( raise Exception(f"Package '{package.name}' not found") binpkg: LocalPackage = local_repo.packages[package.name] if package.version != binpkg.version: - raise Exception(f"Versions differ: PKGBUILD: {package.version}, Repo: {binpkg.version}") - if binpkg.arch not in (['any'] if package.arches == ['any'] else [arch]): - raise Exception(f"Wrong Architecture: {binpkg.arch}, requested: {arch}") + raise Exception( + f"Versions differ: PKGBUILD: {package.version}, Repo: {binpkg.version}" + ) + if binpkg.arch not in ( + ["any"] if package.arches == ["any"] else [arch] + ): + raise Exception( + f"Wrong Architecture: {binpkg.arch}, requested: {arch}" + ) assert binpkg.resolved_url - filepath = binpkg.resolved_url.split('file://')[1] + filepath = binpkg.resolved_url.split("file://")[1] if filename_stripped != strip_compression_extension(binpkg.filename): - raise Exception(f"Repo entry exists but the filename {binpkg.filename} doesn't match expected {filename_stripped}") + raise Exception( + f"Repo entry exists but the filename {binpkg.filename} doesn't match expected {filename_stripped}" + ) if not os.path.exists(filepath): - raise Exception(f"Repo entry exists but file {filepath} is missing from disk") + raise Exception( + f"Repo entry exists but file {filepath} is missing from disk" + ) assert binpkg._desc - if 'SHA256SUM' not in binpkg._desc or not binpkg._desc['SHA256SUM']: + if "SHA256SUM" not in binpkg._desc or not binpkg._desc["SHA256SUM"]: raise Exception("Repo entry exists but has no checksum") - if sha256sum(filepath) != binpkg._desc['SHA256SUM']: + if sha256sum(filepath) != binpkg._desc["SHA256SUM"]: raise Exception("Repo entry exists but checksum doesn't match") missing = False file = filepath filename = binpkg.filename - logging.debug(f"{filename} found in {package.repo}.db ({arch}) and checksum matches") + logging.debug( + f"{filename} found in {package.repo}.db ({arch}) and checksum matches" + ) except Exception as ex: - logging.debug(f"Failed to search local repos for package {package.name}: {ex}") + logging.debug( + f"Failed to search local repos for package {package.name}: {ex}" + ) # file might be in repo directory but not in DB or checksum mismatch - for ext in ['xz', 'zst']: + for ext in ["xz", "zst"]: if not missing: break - file = os.path.join(config.get_package_dir(arch), package.repo, f'{filename_stripped}.{ext}') + file = os.path.join( + config.get_package_dir(arch), + package.repo, + f"{filename_stripped}.{ext}", + ) if not os.path.exists(file): # look for 'any' arch packages in other repos if any_arch: - target_repo_file = os.path.join(config.get_package_dir(arch), package.repo, filename) + target_repo_file = os.path.join( + config.get_package_dir(arch), package.repo, filename + ) if os.path.exists(target_repo_file): file = target_repo_file missing = False @@ -401,9 +495,15 @@ def check_package_version_built( for repo_arch in ARCHES: if repo_arch == arch: continue # we already checked that - other_repo_file = os.path.join(config.get_package_dir(repo_arch), package.repo, filename) + other_repo_file = os.path.join( + config.get_package_dir(repo_arch), + package.repo, + filename, + ) if os.path.exists(other_repo_file): - logging.info(f"package {file} found in {repo_arch} repo, copying to {arch}") + logging.info( + f"package {file} found in {repo_arch} repo, copying to {arch}" + ) file = other_repo_file missing = False if try_download and missing: @@ -412,10 +512,14 @@ def check_package_version_built( file = downloaded filename = os.path.basename(file) missing = False - logging.info(f"Successfully downloaded {filename} from HTTPS mirror") + logging.info( + f"Successfully downloaded {filename} from HTTPS mirror" + ) if os.path.exists(file): missing = False - add_file_to_repo(file, repo_name=package.repo, arch=arch, remove_original=False) + add_file_to_repo( + file, repo_name=package.repo, arch=arch, remove_original=False + ) assert local_repo local_repo.scan() # copy arch=(any) packages to all arches @@ -424,11 +528,19 @@ def check_package_version_built( for repo_arch in ARCHES: if repo_arch == arch: continue # we already have that - copy_target = os.path.join(config.get_package_dir(repo_arch), package.repo, filename) + copy_target = os.path.join( + config.get_package_dir(repo_arch), package.repo, filename + ) if not os.path.exists(copy_target): - logging.info(f"copying any-arch package {package.name} to {repo_arch} repo: {copy_target}") - add_file_to_repo(file, package.repo, repo_arch, remove_original=False) - other_repo = get_kupfer_local(repo_arch, in_chroot=False, scan=False).repos.get(package.repo, None) + logging.info( + f"copying any-arch package {package.name} to {repo_arch} repo: {copy_target}" + ) + add_file_to_repo( + file, package.repo, repo_arch, remove_original=False + ) + other_repo = get_kupfer_local( + repo_arch, in_chroot=False, scan=False + ).repos.get(package.repo, None) if other_repo and other_repo.scanned: other_repo.scan() return not missing @@ -443,11 +555,13 @@ def setup_build_chroot( ) -> BuildChroot: assert config.runtime.arch if arch != config.runtime.arch: - build_enable_qemu_binfmt(arch, repo=repo or discover_pkgbuilds(), lazy=False) + build_enable_qemu_binfmt( + arch, repo=repo or discover_pkgbuilds(), lazy=False + ) init_prebuilts(arch) chroot = get_build_chroot(arch, add_kupfer_repos=add_kupfer_repos) chroot.mount_packages() - logging.debug(f'Initializing {arch} build chroot') + logging.debug(f"Initializing {arch} build chroot") chroot.initialize(reset=clean_chroot) chroot.write_pacman_conf() # in case it was initialized with different repos chroot.activate() @@ -456,14 +570,18 @@ def setup_build_chroot( if extra_packages: chroot.try_install_packages(extra_packages, allow_fail=False) assert config.runtime.uid is not None - chroot.create_user('kupfer', password='12345678', uid=config.runtime.uid, non_unique=True) - if not os.path.exists(chroot.get_path('/etc/sudoers.d/kupfer_nopw')): - chroot.add_sudo_config('kupfer_nopw', 'kupfer', password_required=False) + chroot.create_user( + "kupfer", password="12345678", uid=config.runtime.uid, non_unique=True + ) + if not os.path.exists(chroot.get_path("/etc/sudoers.d/kupfer_nopw")): + chroot.add_sudo_config( + "kupfer_nopw", "kupfer", password_required=False + ) return chroot -def setup_git_insecure_paths(chroot: BuildChroot, username: str = 'kupfer'): +def setup_git_insecure_paths(chroot: BuildChroot, username: str = "kupfer"): chroot.run_cmd( ["git", "config", "--global", "--add", "safe.directory", "'*'"], switch_user=username, @@ -479,28 +597,34 @@ def setup_sources(package: Pkgbuild, lazy: bool = True): logging.info(f"{package.path}: Sources already set up.") return makepkg_setup = MAKEPKG_CMD + [ - '--nodeps', - '--nobuild', - '--noprepare', - '--skippgpcheck', + "--nodeps", + "--nobuild", + "--noprepare", + "--skippgpcheck", ] - logging.info(f'{package.path}: Getting build chroot for source setup') + logging.info(f"{package.path}: Getting build chroot for source setup") # we need to use a chroot here because makepkg symlinks sources into src/ via an absolute path - dir = os.path.join(CHROOT_PATHS['pkgbuilds'], package.path) + dir = os.path.join(CHROOT_PATHS["pkgbuilds"], package.path) assert config.runtime.arch chroot = setup_build_chroot(config.runtime.arch) - logging.info(f'{package.path}: Setting up sources with makepkg') - result = chroot.run_cmd(makepkg_setup, cwd=dir, switch_user='kupfer', stderr=sys.stdout) + logging.info(f"{package.path}: Setting up sources with makepkg") + result = chroot.run_cmd( + makepkg_setup, cwd=dir, switch_user="kupfer", stderr=sys.stdout + ) assert isinstance(result, subprocess.CompletedProcess) if result.returncode != 0: - raise Exception(f'{package.path}: Failed to setup sources, exit code: {result.returncode}') + raise Exception( + f"{package.path}: Failed to setup sources, exit code: {result.returncode}" + ) cache.refresh_all(write=True) cache.write_src_initialised() old_version = package.version package.refresh_sources() if package.version != old_version: - logging.info(f"{package.path}: version refreshed from {old_version} to {package.version}") + logging.info( + f"{package.path}: version refreshed from {old_version} to {package.version}" + ) def build_package( @@ -511,12 +635,12 @@ def build_package( enable_crossdirect: bool = True, enable_ccache: bool = True, clean_chroot: bool = False, - build_user: str = 'kupfer', + build_user: str = "kupfer", repo: Optional[dict[str, Pkgbuild]] = None, ): - makepkg_compile_opts = ['--holdver'] - makepkg_conf_path = 'etc/makepkg.conf' - repo_dir = repo_dir if repo_dir else config.get_path('pkgbuilds') + makepkg_compile_opts = ["--holdver"] + makepkg_conf_path = "etc/makepkg.conf" + repo_dir = repo_dir if repo_dir else config.get_path("pkgbuilds") foreign_arch = config.runtime.arch != arch deps = list(package.makedepends) names = set(package.names()) @@ -525,8 +649,10 @@ def build_package( if not package.nodeps: deps += list(package.depends) deps = list(set(deps) - names) - needs_rust = 'rust' in deps - logging.info(f"{package.path}: Preparing to build: getting native arch build chroot") + needs_rust = "rust" in deps + logging.info( + f"{package.path}: Preparing to build: getting native arch build chroot" + ) build_root: BuildChroot target_chroot = setup_build_chroot( arch=arch, @@ -537,78 +663,125 @@ def build_package( assert config.runtime.arch native_chroot = target_chroot if foreign_arch: - logging.info(f"{package.path}: Preparing to build: getting {arch} build chroot") + logging.info( + f"{package.path}: Preparing to build: getting {arch} build chroot" + ) native_chroot = setup_build_chroot( arch=config.runtime.arch, - extra_packages=['base-devel'] + CROSSDIRECT_PKGS, + extra_packages=["base-devel"] + CROSSDIRECT_PKGS, clean_chroot=clean_chroot, repo=repo, ) if not package.mode: - logging.warning(f'Package {package.path} has no _mode set, assuming "host"') - cross = foreign_arch and package.mode == 'cross' and enable_crosscompile + logging.warning( + f'Package {package.path} has no _mode set, assuming "host"' + ) + cross = foreign_arch and package.mode == "cross" and enable_crosscompile if cross: - logging.info(f'Cross-compiling {package.path}') + logging.info(f"Cross-compiling {package.path}") build_root = native_chroot - makepkg_compile_opts += ['--nodeps'] + makepkg_compile_opts += ["--nodeps"] env = deepcopy(get_makepkg_env(arch)) if enable_ccache: - env['PATH'] = f"/usr/lib/ccache:{env['PATH']}" + env["PATH"] = f"/usr/lib/ccache:{env['PATH']}" native_chroot.mount_ccache(user=build_user) - logging.info(f'{package.path}: Setting up dependencies for cross-compilation') + logging.info( + f"{package.path}: Setting up dependencies for cross-compilation" + ) # include crossdirect for ccache symlinks and qemu-user - cross_deps = list(package.makedepends) if package.nodeps else (deps + CROSSDIRECT_PKGS + [f"{GCC_HOSTSPECS[native_chroot.arch][arch]}-gcc"]) + cross_deps = ( + list(package.makedepends) + if package.nodeps + else ( + deps + + CROSSDIRECT_PKGS + + [f"{GCC_HOSTSPECS[native_chroot.arch][arch]}-gcc"] + ) + ) results = native_chroot.try_install_packages(cross_deps) if not package.nodeps: - res_crossdirect = results['crossdirect'] + res_crossdirect = results["crossdirect"] assert isinstance(res_crossdirect, subprocess.CompletedProcess) if res_crossdirect.returncode != 0: - raise Exception('Unable to install crossdirect') + raise Exception("Unable to install crossdirect") # mount foreign arch chroot inside native chroot - chroot_relative = os.path.join(CHROOT_PATHS['chroots'], target_chroot.name) - makepkg_path_absolute = native_chroot.write_makepkg_conf(target_arch=arch, cross_chroot_relative=chroot_relative, cross=True) - makepkg_conf_path = os.path.join('etc', os.path.basename(makepkg_path_absolute)) + chroot_relative = os.path.join( + CHROOT_PATHS["chroots"], target_chroot.name + ) + makepkg_path_absolute = native_chroot.write_makepkg_conf( + target_arch=arch, cross_chroot_relative=chroot_relative, cross=True + ) + makepkg_conf_path = os.path.join( + "etc", os.path.basename(makepkg_path_absolute) + ) native_chroot.mount_crosscompile(target_chroot) else: - logging.info(f'Host-compiling {package.path}') + logging.info(f"Host-compiling {package.path}") build_root = target_chroot - makepkg_compile_opts += ['--nodeps' if package.nodeps else '--syncdeps'] + makepkg_compile_opts += [ + "--nodeps" if package.nodeps else "--syncdeps" + ] env = deepcopy(get_makepkg_env(arch)) - if foreign_arch and package.crossdirect and enable_crossdirect and package.name not in CROSSDIRECT_PKGS: - env['PATH'] = f"/native/usr/lib/crossdirect/{arch}:{env['PATH']}" + if ( + foreign_arch + and package.crossdirect + and enable_crossdirect + and package.name not in CROSSDIRECT_PKGS + ): + env["PATH"] = f"/native/usr/lib/crossdirect/{arch}:{env['PATH']}" target_chroot.mount_crossdirect(native_chroot) else: if enable_ccache: - logging.debug('ccache enabled') - env['PATH'] = f"/usr/lib/ccache:{env['PATH']}" - deps += ['ccache'] - logging.debug(('Building for native arch. ' if not foreign_arch else '') + 'Skipping crossdirect.') + logging.debug("ccache enabled") + env["PATH"] = f"/usr/lib/ccache:{env['PATH']}" + deps += ["ccache"] + logging.debug( + ("Building for native arch. " if not foreign_arch else "") + + "Skipping crossdirect." + ) if not package.nodeps: - dep_install = target_chroot.try_install_packages(deps, allow_fail=False) - failed_deps = [name for name, res in dep_install.items() if res.returncode != 0] # type: ignore[union-attr] + dep_install = target_chroot.try_install_packages( + deps, allow_fail=False + ) + failed_deps = [ + name + for name, res in dep_install.items() + if res.returncode != 0 + ] # type: ignore[union-attr] if failed_deps: - raise Exception(f'{package.path}: Dependencies failed to install: {failed_deps}') + raise Exception( + f"{package.path}: Dependencies failed to install: {failed_deps}" + ) if enable_ccache: build_root.mount_ccache(user=build_user) if needs_rust: build_root.mount_rust(user=build_user) setup_git_insecure_paths(build_root) - makepkg_conf_absolute = os.path.join('/', makepkg_conf_path) + makepkg_conf_absolute = os.path.join("/", makepkg_conf_path) - build_cmd = ['source', '/etc/profile', '&&', *MAKEPKG_CMD, '--config', makepkg_conf_absolute, '--skippgpcheck', *makepkg_compile_opts] - logging.debug(f'Building: Running {build_cmd}') + build_cmd = [ + "source", + "/etc/profile", + "&&", + *MAKEPKG_CMD, + "--config", + makepkg_conf_absolute, + "--skippgpcheck", + *makepkg_compile_opts, + ] + logging.debug(f"Building: Running {build_cmd}") result = build_root.run_cmd( build_cmd, inner_env=env, - cwd=os.path.join(CHROOT_PATHS['pkgbuilds'], package.path), + cwd=os.path.join(CHROOT_PATHS["pkgbuilds"], package.path), switch_user=build_user, stderr=sys.stdout, ) assert isinstance(result, subprocess.CompletedProcess) if result.returncode != 0: - raise Exception(f'Failed to compile package {package.path}') + raise Exception(f"Failed to compile package {package.path}") def get_dependants( @@ -621,8 +794,10 @@ def get_dependants( to_add = set[Pkgbuild]() for pkg in repo.values(): if set.intersection(names, set(pkg.depends)): - if not set([arch, 'any']).intersection(pkg.arches): - logging.warn(f'get_dependants: skipping matched pkg {pkg.name} due to wrong arch: {pkg.arches}') + if not set([arch, "any"]).intersection(pkg.arches): + logging.warn( + f"get_dependants: skipping matched pkg {pkg.name} due to wrong arch: {pkg.arches}" + ) continue to_add.add(pkg) if recursive and to_add: @@ -631,11 +806,14 @@ def get_dependants( def get_pkg_names_str(pkgs: Iterable[Pkgbuild]) -> str: - return ', '.join(x.name for x in pkgs) + return ", ".join(x.name for x in pkgs) def get_pkg_levels_str(pkg_levels: Iterable[Iterable[Pkgbuild]]): - return '\n'.join(f'{i}: {get_pkg_names_str(level)}' for i, level in enumerate(pkg_levels)) + return "\n".join( + f"{i}: {get_pkg_names_str(level)}" + for i, level in enumerate(pkg_levels) + ) def get_unbuilt_package_levels( @@ -651,22 +829,28 @@ def get_unbuilt_package_levels( dependants = set[Pkgbuild]() if rebuild_dependants: dependants = get_dependants(repo, packages, arch=arch) - package_levels = generate_dependency_chain(repo, set(packages).union(dependants)) + package_levels = generate_dependency_chain( + repo, set(packages).union(dependants) + ) build_names = set[str]() build_levels = list[set[Pkgbuild]]() - includes_dependants = " (includes dependants)" if rebuild_dependants else "" - logging.info(f"Checking for unbuilt packages ({arch}) in dependency order{includes_dependants}:\n{get_pkg_levels_str(package_levels)}") + includes_dependants = ( + " (includes dependants)" if rebuild_dependants else "" + ) + logging.info( + f"Checking for unbuilt packages ({arch}) in dependency order{includes_dependants}:\n{get_pkg_levels_str(package_levels)}" + ) i = 0 total_levels = len(package_levels) package_bar = get_levels_bar( total=sum([len(lev) for lev in package_levels]), desc=f"Checking pkgs ({arch})", - unit='pkgs', + unit="pkgs", fields={"levels_total": total_levels}, enable_rate=False, ) - counter_built = package_bar.add_subcounter('green') - counter_unbuilt = package_bar.add_subcounter('blue') + counter_built = package_bar.add_subcounter("green") + counter_unbuilt = package_bar.add_subcounter("blue") for level_num, level_packages in enumerate(package_levels): level_num = level_num + 1 package_bar.update(0, name=" " * BAR_PADDING, level=level_num) @@ -674,27 +858,46 @@ def get_unbuilt_package_levels( if not level_packages: continue - def add_to_level(pkg, level, reason=''): + def add_to_level(pkg, level, reason=""): if reason: - reason = f': {reason}' + reason = f": {reason}" counter_unbuilt.update(force=True) - logging.info(f"Level {level}/{total_levels} ({arch}): Adding {package.path}{reason}") + logging.info( + f"Level {level}/{total_levels} ({arch}): Adding {package.path}{reason}" + ) level.add(package) build_names.update(package.names()) for package in level_packages: - package_bar.update(0, force=True, name=ellipsize(package.name, padding=" ", length=BAR_PADDING)) - if (force and package in packages): - add_to_level(package, level, 'query match and force=True') + package_bar.update( + 0, + force=True, + name=ellipsize(package.name, padding=" ", length=BAR_PADDING), + ) + if force and package in packages: + add_to_level(package, level, "query match and force=True") elif rebuild_dependants and package in dependants: - add_to_level(package, level, 'package is a dependant, dependant-rebuilds requested') - elif not check_package_version_built(package, arch, try_download=try_download, refresh_sources=refresh_sources): - add_to_level(package, level, 'package unbuilt') + add_to_level( + package, + level, + "package is a dependant, dependant-rebuilds requested", + ) + elif not check_package_version_built( + package, + arch, + try_download=try_download, + refresh_sources=refresh_sources, + ): + add_to_level(package, level, "package unbuilt") else: - logging.info(f"Level {level_num}/{total_levels} ({arch}): {package.path}: Package doesn't need [re]building") + logging.info( + f"Level {level_num}/{total_levels} ({arch}): {package.path}: Package doesn't need [re]building" + ) counter_built.update(force=True) - logging.debug(f'Finished checking level {level_num}/{total_levels} ({arch}). Adding unbuilt pkgs: {get_pkg_names_str(level)}') + logging.debug( + f"Finished checking level {level_num}/{total_levels} ({arch}). Adding unbuilt pkgs: {get_pkg_names_str(level)}" + ) if level: build_levels.append(level) i += 1 @@ -714,7 +917,7 @@ def build_packages( enable_ccache: bool = True, clean_chroot: bool = False, ): - check_programs_wrap(['makepkg', 'pacman', 'pacstrap']) + check_programs_wrap(["makepkg", "pacman", "pacstrap"]) init_prebuilts(arch) build_levels = get_unbuilt_package_levels( packages, @@ -726,16 +929,16 @@ def build_packages( ) if not build_levels: - logging.info('Everything built already') + logging.info("Everything built already") return logging.info(f"Build plan made:\n{get_pkg_levels_str(build_levels)}") total_levels = len(build_levels) package_bar = get_levels_bar( - desc=f'Building pkgs ({arch})', - color='purple', - unit='pkgs', + desc=f"Building pkgs ({arch})", + color="purple", + unit="pkgs", total=sum([len(lev) for lev in build_levels]), fields={"levels_total": total_levels}, enable_rate=False, @@ -745,14 +948,27 @@ def build_packages( package_bar.update(-1) for level, need_build in enumerate(build_levels): level = level + 1 - package_bar.update(incr=0, force=True, name=" " * BAR_PADDING, level=level) - logging.info(f"(Level {level}/{total_levels}) Building {get_pkg_names_str(need_build)}") + package_bar.update( + incr=0, force=True, name=" " * BAR_PADDING, level=level + ) + logging.info( + f"(Level {level}/{total_levels}) Building {get_pkg_names_str(need_build)}" + ) for package in need_build: - package_bar.update(force=True, name=ellipsize(package.name, padding=" ", length=BAR_PADDING)) - base = package.pkgbase if isinstance(package, SubPkgbuild) else package + package_bar.update( + force=True, + name=ellipsize(package.name, padding=" ", length=BAR_PADDING), + ) + base = ( + package.pkgbase + if isinstance(package, SubPkgbuild) + else package + ) assert isinstance(base, Pkgbase) if package.is_built(arch): - logging.info(f"Skipping building {package.name} since it was already built this run as part of pkgbase {base.name}") + logging.info( + f"Skipping building {package.name} since it was already built this run as part of pkgbase {base.name}" + ) continue build_package( package, @@ -765,7 +981,7 @@ def build_packages( ) files += add_package_to_repo(package, arch) updated_repos.add(package.repo) - for _arch in ['any', arch]: + for _arch in ["any", arch]: if _arch in base.arches: base._built_for.add(_arch) package_bar.update() @@ -794,11 +1010,13 @@ def build_packages_by_paths( if isinstance(paths, str): paths = [paths] - check_programs_wrap(['makepkg', 'pacman', 'pacstrap']) + check_programs_wrap(["makepkg", "pacman", "pacstrap"]) assert config.runtime.arch for _arch in set([arch, config.runtime.arch]): init_prebuilts(_arch) - packages = filter_pkgbuilds(paths, arch=arch, repo=repo, allow_empty_results=False) + packages = filter_pkgbuilds( + paths, arch=arch, repo=repo, allow_empty_results=False + ) return build_packages( packages, arch, @@ -816,13 +1034,20 @@ def build_packages_by_paths( _qemu_enabled: dict[Arch, bool] = {arch: False for arch in ARCHES} -def build_enable_qemu_binfmt(arch: Arch, repo: Optional[dict[str, Pkgbuild]] = None, lazy: bool = True, native_chroot: Optional[BuildChroot] = None): +def build_enable_qemu_binfmt( + arch: Arch, + repo: Optional[dict[str, Pkgbuild]] = None, + lazy: bool = True, + native_chroot: Optional[BuildChroot] = None, +): """ Build and enable qemu-user-static, binfmt and crossdirect Specify lazy=False to force building the packages. """ if arch not in ARCHES: - raise Exception(f'Unknown binfmt architecture "{arch}". Choices: {", ".join(ARCHES)}') + raise Exception( + f'Unknown binfmt architecture "{arch}". Choices: {", ".join(ARCHES)}' + ) if _qemu_enabled[arch] or (lazy and binfmt_is_registered(arch)): if not _qemu_enabled[arch]: logging.info(f"qemu binfmt for {arch} was already enabled!") @@ -833,20 +1058,24 @@ def build_enable_qemu_binfmt(arch: Arch, repo: Optional[dict[str, Pkgbuild]] = N _qemu_enabled[arch] = True logging.warning("Not enabling binfmt for host architecture!") return - logging.info('Installing qemu-user (building if necessary)') - check_programs_wrap(['pacman', 'makepkg', 'pacstrap']) + logging.info("Installing qemu-user (building if necessary)") + check_programs_wrap(["pacman", "makepkg", "pacstrap"]) # build qemu-user, binfmt, crossdirect packages = list(CROSSDIRECT_PKGS) hostspec = GCC_HOSTSPECS[arch][arch] cross_gcc = f"{hostspec}-gcc" if repo: for pkg in repo.values(): - if (pkg.name == cross_gcc or cross_gcc in pkg.provides): + if pkg.name == cross_gcc or cross_gcc in pkg.provides: if config.runtime.arch not in pkg.arches: - logging.debug(f"Package {pkg.path} matches {cross_gcc=} name but not arch: {pkg.arches=}") + logging.debug( + f"Package {pkg.path} matches {cross_gcc=} name but not arch: {pkg.arches=}" + ) continue packages.append(pkg.path) - logging.debug(f"Adding gcc package {pkg.path} to the necessary crosscompilation tools") + logging.debug( + f"Adding gcc package {pkg.path} to the necessary crosscompilation tools" + ) break build_packages_by_paths( packages, @@ -857,19 +1086,33 @@ def build_enable_qemu_binfmt(arch: Arch, repo: Optional[dict[str, Pkgbuild]] = N enable_crossdirect=False, enable_ccache=False, ) - crossrepo = get_kupfer_local(native, in_chroot=False, scan=True).repos['cross'].packages - pkgfiles = [os.path.join(crossrepo[pkg].resolved_url.split('file://')[1]) for pkg in QEMU_BINFMT_PKGS] # type: ignore + crossrepo = ( + get_kupfer_local(native, in_chroot=False, scan=True) + .repos["cross"] + .packages + ) + pkgfiles = [ + os.path.join(crossrepo[pkg].resolved_url.split("file://")[1]) + for pkg in QEMU_BINFMT_PKGS + ] # type: ignore runcmd = run_root_cmd if native_chroot or not is_wrapped(): native_chroot = native_chroot or setup_build_chroot(native) runcmd = native_chroot.run_cmd - hostdir = config.get_path('packages') + hostdir = config.get_path("packages") _files = [] # convert host paths to in-chroot paths for p in pkgfiles: assert p.startswith(hostdir) - _files.append(os.path.join(CHROOT_PATHS['packages'], p[len(hostdir):].lstrip('/'))) + _files.append( + os.path.join( + CHROOT_PATHS["packages"], p[len(hostdir) :].lstrip("/") + ) + ) pkgfiles = _files - runcmd(['pacman', '-U', '--noconfirm', '--needed'] + pkgfiles, stderr=sys.stdout) + runcmd( + ["pacman", "-U", "--noconfirm", "--needed"] + pkgfiles, + stderr=sys.stdout, + ) binfmt_register(arch, chroot=native_chroot) _qemu_enabled[arch] = True diff --git a/src/kupferbootstrap/packages/cli.py b/src/kupferbootstrap/packages/cli.py index 51f9296..7299064 100644 --- a/src/kupferbootstrap/packages/cli.py +++ b/src/kupferbootstrap/packages/cli.py @@ -7,11 +7,23 @@ from glob import glob from typing import Iterable, Optional from kupferbootstrap.config.state import config -from kupferbootstrap.constants import Arch, ARCHES, SRCINFO_FILE, SRCINFO_INITIALISED_FILE, SRCINFO_METADATA_FILE, SRCINFO_TARBALL_FILE, SRCINFO_TARBALL_URL +from kupferbootstrap.constants import ( + Arch, + ARCHES, + SRCINFO_FILE, + SRCINFO_INITIALISED_FILE, + SRCINFO_METADATA_FILE, + SRCINFO_TARBALL_FILE, + SRCINFO_TARBALL_URL, +) from kupferbootstrap.exec.cmd import run_cmd, shell_quote, CompletedProcess from kupferbootstrap.exec.file import get_temp_dir, makedir, remove_file from kupferbootstrap.devices.device import get_profile_device -from kupferbootstrap.distro.distro import get_kupfer_local, get_kupfer_url, get_kupfer_repo_names +from kupferbootstrap.distro.distro import ( + get_kupfer_local, + get_kupfer_url, + get_kupfer_repo_names, +) from kupferbootstrap.distro.package import LocalPackage from kupferbootstrap.net.ssh import run_ssh_command, scp_put_files from kupferbootstrap.utils import download_file, git, sha256sum @@ -19,9 +31,18 @@ from kupferbootstrap.version.cli import _check_kbs_version from kupferbootstrap.wrapper import check_programs_wrap, enforce_wrap from .build import build_packages_by_paths, init_prebuilts -from .pkgbuild import discover_pkgbuilds, filter_pkgbuilds, get_pkgbuild_dirs, init_pkgbuilds +from .pkgbuild import ( + discover_pkgbuilds, + filter_pkgbuilds, + get_pkgbuild_dirs, + init_pkgbuilds, +) -SRCINFO_CACHE_FILES = [SRCINFO_FILE, SRCINFO_INITIALISED_FILE, SRCINFO_METADATA_FILE] +SRCINFO_CACHE_FILES = [ + SRCINFO_FILE, + SRCINFO_INITIALISED_FILE, + SRCINFO_METADATA_FILE, +] def build( @@ -36,7 +57,9 @@ def build( arch = arch or get_profile_device(hint_or_set_arch=True).arch if arch not in ARCHES: - raise Exception(f'Unknown architecture "{arch}". Choices: {", ".join(ARCHES)}') + raise Exception( + f'Unknown architecture "{arch}". Choices: {", ".join(ARCHES)}' + ) _check_kbs_version(init_pkgbuilds=True) return build_packages_by_paths( @@ -52,50 +75,66 @@ def build( ) -def init_pkgbuild_caches(clean_src_dirs: bool = True, remote_branch: Optional[str] = None): - +def init_pkgbuild_caches( + clean_src_dirs: bool = True, remote_branch: Optional[str] = None +): def read_srcinitialised_checksum(src_initialised): with open(src_initialised) as fd: d = json.load(fd) if isinstance(d, dict): - return d.get('PKGBUILD', '!!!ERROR!!!') + return d.get("PKGBUILD", "!!!ERROR!!!") raise Exception("JSON content not a dictionary!") # get_kupfer_url() resolves repo branch variable in url url = get_kupfer_url(url=SRCINFO_TARBALL_URL, branch=remote_branch) - cachetar = os.path.join(config.get_path('packages'), SRCINFO_TARBALL_FILE) + cachetar = os.path.join(config.get_path("packages"), SRCINFO_TARBALL_FILE) makedir(os.path.dirname(cachetar)) - logging.info(f"Updating PKGBUILD caches from {url}" + (", pruning outdated src/ directories" if clean_src_dirs else "")) + logging.info( + f"Updating PKGBUILD caches from {url}" + + (", pruning outdated src/ directories" if clean_src_dirs else "") + ) updated = download_file(cachetar, url) - logging.info("Cache tarball was " + ('downloaded successfully' if updated else 'already up to date')) + logging.info( + "Cache tarball was " + + ("downloaded successfully" if updated else "already up to date") + ) tmpdir = get_temp_dir() logging.debug(f"Extracting {cachetar} to {tmpdir}") - res = run_cmd(['tar', 'xf', cachetar], cwd=tmpdir) + res = run_cmd(["tar", "xf", cachetar], cwd=tmpdir) assert isinstance(res, CompletedProcess) if res.returncode: - raise Exception(f"failed to extract srcinfo cache archive '{cachetar}'") + raise Exception( + f"failed to extract srcinfo cache archive '{cachetar}'" + ) pkgbuild_dirs = get_pkgbuild_dirs() for pkg in pkgbuild_dirs: logging.info(f"{pkg}: analyzing cache") - pkgdir = os.path.join(config.get_path('pkgbuilds'), pkg) - srcdir = os.path.join(pkgdir, 'src') + pkgdir = os.path.join(config.get_path("pkgbuilds"), pkg) + srcdir = os.path.join(pkgdir, "src") src_initialised = os.path.join(pkgdir, SRCINFO_INITIALISED_FILE) cachedir = os.path.join(tmpdir, pkg) - pkgbuild_checksum = sha256sum(os.path.join(pkgdir, 'PKGBUILD')) + pkgbuild_checksum = sha256sum(os.path.join(pkgdir, "PKGBUILD")) copy_files: set[str] = set(SRCINFO_CACHE_FILES) if os.path.exists(src_initialised): try: - if read_srcinitialised_checksum(src_initialised) == pkgbuild_checksum: + if ( + read_srcinitialised_checksum(src_initialised) + == pkgbuild_checksum + ): copy_files.remove(SRCINFO_INITIALISED_FILE) for f in copy_files.copy(): fpath = os.path.join(pkgdir, f) if os.path.exists(fpath): copy_files.remove(f) if not copy_files: - logging.info(f"{pkg}: SRCINFO cache already up to date") + logging.info( + f"{pkg}: SRCINFO cache already up to date" + ) continue except Exception as ex: - logging.warning(f"{pkg}: Something went wrong parsing {SRCINFO_INITIALISED_FILE}, treating as outdated!:\n{ex}") + logging.warning( + f"{pkg}: Something went wrong parsing {SRCINFO_INITIALISED_FILE}, treating as outdated!:\n{ex}" + ) if clean_src_dirs and os.path.exists(srcdir): logging.info(f"{pkg}: outdated src/ detected, removing") remove_file(srcdir, recursive=True) @@ -105,48 +144,67 @@ def init_pkgbuild_caches(clean_src_dirs: bool = True, remote_branch: Optional[st continue cache_initialised = os.path.join(cachedir, SRCINFO_INITIALISED_FILE) try: - if read_srcinitialised_checksum(cache_initialised) != pkgbuild_checksum: - logging.info(f"{pkg}: PKGBUILD checksum differs from remote repo cache, skipping") + if ( + read_srcinitialised_checksum(cache_initialised) + != pkgbuild_checksum + ): + logging.info( + f"{pkg}: PKGBUILD checksum differs from remote repo cache, skipping" + ) continue except Exception as ex: - logging.warning(f"{pkg}: Failed to parse the remote repo's cached {SRCINFO_INITIALISED_FILE}, skipping!:\n{ex}") + logging.warning( + f"{pkg}: Failed to parse the remote repo's cached {SRCINFO_INITIALISED_FILE}, skipping!:\n{ex}" + ) continue if not copy_files: continue logging.info(f"{pkg}: Copying srcinfo cache from remote repo") - logging.debug(f'{pkg}: copying {copy_files}') - copy_files_list = [shell_quote(os.path.join(cachedir, f)) for f in copy_files] + logging.debug(f"{pkg}: copying {copy_files}") + copy_files_list = [ + shell_quote(os.path.join(cachedir, f)) for f in copy_files + ] res = run_cmd(f"cp {' '.join(copy_files_list)} {shell_quote(pkgdir)}/") assert isinstance(res, CompletedProcess) if res.returncode: - raise Exception(f"{pkg}: failed to copy cache contents from {cachedir}") + raise Exception( + f"{pkg}: failed to copy cache contents from {cachedir}" + ) -non_interactive_flag = click.option('--non-interactive', is_flag=True) +non_interactive_flag = click.option("--non-interactive", is_flag=True) init_caches_flag = click.option( - '--init-caches/--no-init-caches', + "--init-caches/--no-init-caches", is_flag=True, default=True, show_default=True, help="Fill PKGBUILDs caches from HTTPS repo where checksums match", ) remove_outdated_src_flag = click.option( - '--clean-src-dirs/--no-clean-src-dirs', + "--clean-src-dirs/--no-clean-src-dirs", is_flag=True, default=True, show_default=True, help="Remove outdated src/ directories to avoid problems", ) -switch_branch_flag = click.option('--switch-branch', is_flag=True, help="Force the branch to be corrected even in non-interactive mode") -discard_changes_flag = click.option('--discard-changes', is_flag=True, help="When switching branches, discard any locally changed conflicting files") +switch_branch_flag = click.option( + "--switch-branch", + is_flag=True, + help="Force the branch to be corrected even in non-interactive mode", +) +discard_changes_flag = click.option( + "--discard-changes", + is_flag=True, + help="When switching branches, discard any locally changed conflicting files", +) -@click.group(name='packages') +@click.group(name="packages") def cmd_packages(): """Build and manage packages and PKGBUILDs""" -@cmd_packages.command(name='update') +@cmd_packages.command(name="update") @non_interactive_flag @init_caches_flag @switch_branch_flag @@ -161,7 +219,13 @@ def cmd_update( ): """Update PKGBUILDs git repo""" enforce_wrap() - init_pkgbuilds(interactive=not non_interactive, lazy=False, update=True, switch_branch=switch_branch, discard_changes=discard_changes) + init_pkgbuilds( + interactive=not non_interactive, + lazy=False, + update=True, + switch_branch=switch_branch, + discard_changes=discard_changes, + ) _check_kbs_version(init_pkgbuilds=False) if init_caches: init_pkgbuild_caches(clean_src_dirs=clean_src_dirs) @@ -169,13 +233,15 @@ def cmd_update( discover_pkgbuilds(lazy=False) -@cmd_packages.command(name='init') +@cmd_packages.command(name="init") @non_interactive_flag @init_caches_flag @switch_branch_flag @discard_changes_flag @remove_outdated_src_flag -@click.option('-u', '--update', is_flag=True, help='Use git pull to update the PKGBUILDs') +@click.option( + "-u", "--update", is_flag=True, help="Use git pull to update the PKGBUILDs" +) def cmd_init( non_interactive: bool = False, init_caches: bool = True, @@ -185,7 +251,13 @@ def cmd_init( update: bool = False, ): "Ensure PKGBUILDs git repo is checked out locally" - init_pkgbuilds(interactive=not non_interactive, lazy=False, update=update, switch_branch=switch_branch, discard_changes=discard_changes) + init_pkgbuilds( + interactive=not non_interactive, + lazy=False, + update=update, + switch_branch=switch_branch, + discard_changes=discard_changes, + ) _check_kbs_version(init_pkgbuilds=False) if init_caches: init_pkgbuild_caches(clean_src_dirs=clean_src_dirs) @@ -193,13 +265,40 @@ def cmd_init( init_prebuilts(arch) -@cmd_packages.command(name='build') -@click.option('--force', is_flag=True, default=False, help='Rebuild even if package is already built') -@click.option('--arch', default=None, required=False, type=click.Choice(ARCHES), help="The CPU architecture to build for") -@click.option('--rebuild-dependants', is_flag=True, default=False, help='Rebuild packages that depend on packages that will be [re]built') -@click.option('--no-download', is_flag=True, default=False, help="Don't try downloading packages from online repos before building") -@click.argument('paths', nargs=-1) -def cmd_build(paths: list[str], force=False, arch: Optional[Arch] = None, rebuild_dependants: bool = False, no_download: bool = False): +@cmd_packages.command(name="build") +@click.option( + "--force", + is_flag=True, + default=False, + help="Rebuild even if package is already built", +) +@click.option( + "--arch", + default=None, + required=False, + type=click.Choice(ARCHES), + help="The CPU architecture to build for", +) +@click.option( + "--rebuild-dependants", + is_flag=True, + default=False, + help="Rebuild packages that depend on packages that will be [re]built", +) +@click.option( + "--no-download", + is_flag=True, + default=False, + help="Don't try downloading packages from online repos before building", +) +@click.argument("paths", nargs=-1) +def cmd_build( + paths: list[str], + force=False, + arch: Optional[Arch] = None, + rebuild_dependants: bool = False, + no_download: bool = False, +): """ Build packages (and dependencies) by paths as required. @@ -210,48 +309,90 @@ def cmd_build(paths: list[str], force=False, arch: Optional[Arch] = None, rebuil Packages that aren't built already will be downloaded from HTTPS repos unless --no-download is passed, if an exact version match exists on the server. """ - build(paths, force, arch=arch, rebuild_dependants=rebuild_dependants, try_download=not no_download) + build( + paths, + force, + arch=arch, + rebuild_dependants=rebuild_dependants, + try_download=not no_download, + ) -@cmd_packages.command(name='sideload') -@click.argument('paths', nargs=-1) -@click.option('--arch', default=None, required=False, type=click.Choice(ARCHES), help="The CPU architecture to build for") -@click.option('-B', '--no-build', is_flag=True, default=False, help="Don't try to build packages, just copy and install") -def cmd_sideload(paths: Iterable[str], arch: Optional[Arch] = None, no_build: bool = False): +@cmd_packages.command(name="sideload") +@click.argument("paths", nargs=-1) +@click.option( + "--arch", + default=None, + required=False, + type=click.Choice(ARCHES), + help="The CPU architecture to build for", +) +@click.option( + "-B", + "--no-build", + is_flag=True, + default=False, + help="Don't try to build packages, just copy and install", +) +def cmd_sideload( + paths: Iterable[str], arch: Optional[Arch] = None, no_build: bool = False +): """Build packages, copy to the device via SSH and install them""" if not paths: raise Exception("No packages specified") arch = arch or get_profile_device(hint_or_set_arch=True).arch if not no_build: build(paths, False, arch=arch, try_download=True) - repo: dict[str, LocalPackage] = get_kupfer_local(arch=arch, scan=True, in_chroot=False).get_packages() - files = [pkg.resolved_url.split('file://')[1] for pkg in repo.values() if pkg.resolved_url and pkg.name in paths] + repo: dict[str, LocalPackage] = get_kupfer_local( + arch=arch, scan=True, in_chroot=False + ).get_packages() + files = [ + pkg.resolved_url.split("file://")[1] + for pkg in repo.values() + if pkg.resolved_url and pkg.name in paths + ] logging.debug(f"Sideload: Found package files: {files}") if not files: logging.fatal("No packages matched") return - scp_put_files(files, '/tmp').check_returncode() + scp_put_files(files, "/tmp").check_returncode() run_ssh_command( [ - 'sudo', - 'pacman', - '-U', - *[os.path.join('/tmp', os.path.basename(file)) for file in files], - '--noconfirm', + "sudo", + "pacman", + "-U", + *[os.path.join("/tmp", os.path.basename(file)) for file in files], + "--noconfirm", "'--overwrite=\\*'", ], alloc_tty=True, ).check_returncode() -CLEAN_LOCATIONS = ['src', 'pkg', *SRCINFO_CACHE_FILES] +CLEAN_LOCATIONS = ["src", "pkg", *SRCINFO_CACHE_FILES] -@cmd_packages.command(name='clean') -@click.option('-f', '--force', is_flag=True, default=False, help="Don't prompt for confirmation") -@click.option('-n', '--noop', is_flag=True, default=False, help="Print what would be removed but dont execute") -@click.argument('what', type=click.Choice(['all', 'git', *CLEAN_LOCATIONS]), nargs=-1) -def cmd_clean(what: Iterable[str] = ['all'], force: bool = False, noop: bool = False): +@cmd_packages.command(name="clean") +@click.option( + "-f", + "--force", + is_flag=True, + default=False, + help="Don't prompt for confirmation", +) +@click.option( + "-n", + "--noop", + is_flag=True, + default=False, + help="Print what would be removed but dont execute", +) +@click.argument( + "what", type=click.Choice(["all", "git", *CLEAN_LOCATIONS]), nargs=-1 +) +def cmd_clean( + what: Iterable[str] = ["all"], force: bool = False, noop: bool = False +): """ Clean temporary files from PKGBUILDs @@ -261,40 +402,41 @@ def cmd_clean(what: Iterable[str] = ['all'], force: bool = False, noop: bool = F Be careful with it, as it means re-downloading sources for your packages. """ if noop: - logging.debug('Running in noop mode!') + logging.debug("Running in noop mode!") if force: - logging.debug('Running in FORCE mode!') - what = what or ['all'] - logging.debug(f'Clearing {what} from PKGBUILDs') - pkgbuilds = config.get_path('pkgbuilds') - if 'git' in what: - check_programs_wrap(['git']) + logging.debug("Running in FORCE mode!") + what = what or ["all"] + logging.debug(f"Clearing {what} from PKGBUILDs") + pkgbuilds = config.get_path("pkgbuilds") + if "git" in what: + check_programs_wrap(["git"]) warning = "Really reset PKGBUILDs to git state completely?\nThis will erase any untracked changes to your PKGBUILDs directory." if not (noop or force or click.confirm(warning)): return result = git( [ - 'clean', - '-dffX' + ('n' if noop else ''), - ] + get_kupfer_repo_names(local=True), + "clean", + "-dffX" + ("n" if noop else ""), + ] + + get_kupfer_repo_names(local=True), dir=pkgbuilds, ) if result.returncode != 0: - logging.fatal('Failed to git clean') + logging.fatal("Failed to git clean") exit(1) else: - if 'all' in what: + if "all" in what: what = CLEAN_LOCATIONS what = set(what) dirs = [] for loc in CLEAN_LOCATIONS: if loc in what: - logging.info(f'gathering {loc} instances') - dirs += glob(os.path.join(pkgbuilds, '*', '*', loc)) + logging.info(f"gathering {loc} instances") + dirs += glob(os.path.join(pkgbuilds, "*", "*", loc)) - dir_lines = '\n'.join(dirs) - verb = 'Would remove' if noop else 'Removing' - logging.info(verb + ':\n' + dir_lines) + dir_lines = "\n".join(dirs) + verb = "Would remove" if noop else "Removing" + logging.info(verb + ":\n" + dir_lines) if not (noop or force): if not click.confirm("Really remove all of these?", default=True): @@ -305,30 +447,36 @@ def cmd_clean(what: Iterable[str] = ['all'], force: bool = False, noop: bool = F remove_file(dir, recursive=True) -@cmd_packages.command(name='list') +@cmd_packages.command(name="list") def cmd_list(): "List information about available source packages (PKGBUILDs)" - pkgdir = os.path.join(config.get_path('pkgbuilds'), get_kupfer_repo_names(local=False)[0]) + pkgdir = os.path.join( + config.get_path("pkgbuilds"), get_kupfer_repo_names(local=False)[0] + ) if not os.path.exists(pkgdir): - raise Exception(f"PKGBUILDs seem not to be initialised yet: {pkgdir} doesn't exist!\n" - f"Try running `kupferbootstrap packages init` first!") - check_programs_wrap(['git', 'makepkg', 'pacman']) + raise Exception( + f"PKGBUILDs seem not to be initialised yet: {pkgdir} doesn't exist!\n" + f"Try running `kupferbootstrap packages init` first!" + ) + check_programs_wrap(["git", "makepkg", "pacman"]) _check_kbs_version(init_pkgbuilds=False) packages = discover_pkgbuilds() - logging.info(f'Done! {len(packages)} Pkgbuilds:') + logging.info(f"Done! {len(packages)} Pkgbuilds:") for name in sorted(packages.keys()): p = packages[name] - print(f'name: {p.name}; ver: {p.version}; mode: {p.mode}; crossdirect: {p.crossdirect} provides: {p.provides}; replaces: {p.replaces};' - f'local_depends: {p.local_depends}; depends: {p.depends}') + print( + f"name: {p.name}; ver: {p.version}; mode: {p.mode}; crossdirect: {p.crossdirect} provides: {p.provides}; replaces: {p.replaces};" + f"local_depends: {p.local_depends}; depends: {p.depends}" + ) -@cmd_packages.command(name='check') +@cmd_packages.command(name="check") @click.option("--ci-mode", "--ci", is_flag=True, default=False) -@click.argument('paths', nargs=-1) +@click.argument("paths", nargs=-1) def cmd_check(paths: list[str], ci_mode: bool = False): """Check that specified PKGBUILDs are formatted correctly""" config.enforce_config_loaded() - check_programs_wrap(['makepkg', 'git']) + check_programs_wrap(["makepkg", "git"]) _check_kbs_version(init_pkgbuilds=False, ci_mode=ci_mode) def check_quoteworthy(s: str) -> bool: @@ -338,65 +486,67 @@ def cmd_check(paths: list[str], ci_mode: bool = False): return True return False - paths = list(paths) or ['all'] + paths = list(paths) or ["all"] packages = filter_pkgbuilds(paths, allow_empty_results=False) for package in packages: name = package.name is_git_package = False - if name.endswith('-git'): + if name.endswith("-git"): is_git_package = True - required_arches = '' + required_arches = "" provided_arches: list[str] = [] - mode_key = '_mode' - nodeps_key = '_nodeps' - crossdirect_key = '_crossdirect' - pkgbase_key = 'pkgbase' - pkgname_key = 'pkgname' - arches_key = '_arches' - arch_key = 'arch' - commit_key = '_commit' - source_key = 'source' - sha256sums_key = 'sha256sums' + mode_key = "_mode" + nodeps_key = "_nodeps" + crossdirect_key = "_crossdirect" + pkgbase_key = "pkgbase" + pkgname_key = "pkgname" + arches_key = "_arches" + arch_key = "arch" + commit_key = "_commit" + source_key = "source" + sha256sums_key = "sha256sums" required = { mode_key: True, nodeps_key: False, crossdirect_key: False, pkgbase_key: False, pkgname_key: True, - 'pkgdesc': False, - 'pkgver': True, - 'pkgrel': True, + "pkgdesc": False, + "pkgver": True, + "pkgrel": True, arches_key: True, arch_key: True, - 'license': True, - 'url': False, - 'provides': is_git_package, - 'conflicts': False, - 'replaces': False, - 'depends': False, - 'optdepends': False, - 'makedepends': False, - 'backup': False, - 'install': False, - 'options': False, + "license": True, + "url": False, + "provides": is_git_package, + "conflicts": False, + "replaces": False, + "depends": False, + "optdepends": False, + "makedepends": False, + "backup": False, + "install": False, + "options": False, commit_key: is_git_package, source_key: False, sha256sums_key: False, - 'noextract': False, + "noextract": False, } - pkgbuild_path = os.path.join(config.get_path('pkgbuilds'), package.path, 'PKGBUILD') - with open(pkgbuild_path, 'r') as file: + pkgbuild_path = os.path.join( + config.get_path("pkgbuilds"), package.path, "PKGBUILD" + ) + with open(pkgbuild_path, "r") as file: content = file.read() - if '\t' in content: - logging.fatal(f'\\t is not allowed in {pkgbuild_path}') + if "\t" in content: + logging.fatal(f"\\t is not allowed in {pkgbuild_path}") exit(1) - lines = content.split('\n') + lines = content.split("\n") if len(lines) == 0: - logging.fatal(f'Empty {pkgbuild_path}') + logging.fatal(f"Empty {pkgbuild_path}") exit(1) line_index = 0 key_index = 0 @@ -405,11 +555,16 @@ def cmd_check(paths: list[str], ci_mode: bool = False): while True: line = lines[line_index] - if line.startswith('#'): + if line.startswith("#"): line_index += 1 continue - if line.startswith('_') and line.split('=', 1)[0] not in [mode_key, nodeps_key, arches_key, commit_key]: + if line.startswith("_") and line.split("=", 1)[0] not in [ + mode_key, + nodeps_key, + arches_key, + commit_key, + ]: line_index += 1 continue @@ -433,22 +588,22 @@ def cmd_check(paths: list[str], ci_mode: bool = False): elif key in required and not required[key]: next_key = True - if line == ')': + if line == ")": hold_key = False next_key = True if key == arches_key: - required_arches = line.split('=')[1] + required_arches = line.split("=")[1] - if line.endswith('=('): + if line.endswith("=("): hold_key = True - if line.startswith(' ') or line == ')': + if line.startswith(" ") or line == ")": next_line = True - if line.startswith(' ') and not line.startswith(' '): + if line.startswith(" ") and not line.startswith(" "): formatted = False - reason = 'Multiline variables should be indented with 4 spaces' + reason = "Multiline variables should be indented with 4 spaces" if '"' in line and not check_quoteworthy(line): formatted = False @@ -456,36 +611,51 @@ def cmd_check(paths: list[str], ci_mode: bool = False): if "'" in line and '"' not in line: formatted = False - reason = 'Found literal \' although either a literal " or no qoutes should be used' + reason = "Found literal ' although either a literal \" or no qoutes should be used" - if ('=(' in line and ' ' in line and '"' not in line and not line.endswith('=(')) or (hold_key and line.endswith(')')): + if ( + "=(" in line + and " " in line + and '"' not in line + and not line.endswith("=(") + ) or (hold_key and line.endswith(")")): formatted = False - reason = 'Multiple elements in a list need to be in separate lines' + reason = ( + "Multiple elements in a list need to be in separate lines" + ) if formatted and not next_key and not next_line: if key_index == len(required): - if lines[line_index] == '': + if lines[line_index] == "": break else: formatted = False - reason = 'Expected final emtpy line after all variables' + reason = ( + "Expected final emtpy line after all variables" + ) else: formatted = False reason = f'Expected to find "{key}"' if not formatted: - logging.fatal(f'Formatting error in {pkgbuild_path}: Line {line_index+1}: "{line}"') + logging.fatal( + f'Formatting error in {pkgbuild_path}: Line {line_index + 1}: "{line}"' + ) if reason != "": logging.fatal(reason) exit(1) if key == arch_key: - if line.endswith(')'): - if line.startswith(f'{arch_key}=('): - check_arches_hint(pkgbuild_path, required_arches, [line[6:-1]]) + if line.endswith(")"): + if line.startswith(f"{arch_key}=("): + check_arches_hint( + pkgbuild_path, required_arches, [line[6:-1]] + ) else: - check_arches_hint(pkgbuild_path, required_arches, provided_arches) - elif line.startswith(' '): + check_arches_hint( + pkgbuild_path, required_arches, provided_arches + ) + elif line.startswith(" "): provided_arches.append(line[4:]) if next_key and not hold_key: @@ -493,11 +663,13 @@ def cmd_check(paths: list[str], ci_mode: bool = False): if next_line: line_index += 1 - logging.info(f'{package.path} nicely formatted!') + logging.info(f"{package.path} nicely formatted!") def check_arches_hint(path: str, required: str, provided: list[str]): - if required == 'all': + if required == "all": for arch in ARCHES: if arch not in provided: - logging.warning(f'Missing {arch} in arches list in {path}, because _arches hint is `all`') + logging.warning( + f"Missing {arch} in arches list in {path}, because _arches hint is `all`" + ) diff --git a/src/kupferbootstrap/packages/pkgbuild.py b/src/kupferbootstrap/packages/pkgbuild.py index e634d2e..8be9e8b 100644 --- a/src/kupferbootstrap/packages/pkgbuild.py +++ b/src/kupferbootstrap/packages/pkgbuild.py @@ -30,60 +30,91 @@ def clone_pkgbuilds( switch_branch: bool = False, discard_changes: bool = False, ): - check_programs_wrap(['git']) - git_dir = os.path.join(pkgbuilds_dir, '.git') + check_programs_wrap(["git"]) + git_dir = os.path.join(pkgbuilds_dir, ".git") if not os.path.exists(git_dir): - logging.info(f'Cloning branch {branch} from {repo_url}') - result = git(['clone', '-b', branch, repo_url, pkgbuilds_dir]) + logging.info(f"Cloning branch {branch} from {repo_url}") + result = git(["clone", "-b", branch, repo_url, pkgbuilds_dir]) if result.returncode != 0: - raise Exception('Error cloning pkgbuilds') + raise Exception("Error cloning pkgbuilds") else: current_branch = git_get_branch(pkgbuilds_dir) if current_branch != branch: - logging.warning(f'pkgbuilds repository is on the wrong branch: {current_branch}, requested: {branch}') - if switch_branch or (interactive and click.confirm('Would you like to switch branches?', default=False)): - result = git(['remote', 'update'], dir=pkgbuilds_dir) + logging.warning( + f"pkgbuilds repository is on the wrong branch: {current_branch}, requested: {branch}" + ) + if switch_branch or ( + interactive + and click.confirm( + "Would you like to switch branches?", default=False + ) + ): + result = git(["remote", "update"], dir=pkgbuilds_dir) if result.returncode != 0: - raise Exception('failed updating PKGBUILDs branches') - result = git(['switch', *(['-f'] if discard_changes else []), branch], dir=pkgbuilds_dir) + raise Exception("failed updating PKGBUILDs branches") + result = git( + ["switch", *(["-f"] if discard_changes else []), branch], + dir=pkgbuilds_dir, + ) if result.returncode != 0: - raise Exception('failed switching PKGBUILDs branches') - logging.warning('Hint: you can use `kupferbootstrap packages update` to switch branches') + raise Exception("failed switching PKGBUILDs branches") + logging.warning( + "Hint: you can use `kupferbootstrap packages update` to switch branches" + ) if update: if interactive: - if not click.confirm('Would you like to try updating the PKGBUILDs repo?', default=True): + if not click.confirm( + "Would you like to try updating the PKGBUILDs repo?", + default=True, + ): return - result = git(['fetch'], dir=pkgbuilds_dir) + result = git(["fetch"], dir=pkgbuilds_dir) if result.returncode != 0: raise Exception("Failed to fetch updates with git") - pull_cmd = ['pull', '--ff-only'] + pull_cmd = ["pull", "--ff-only"] result = git(pull_cmd, dir=pkgbuilds_dir) if result.returncode != 0: if discard_changes: - logging.info("git pull failed, detecting conflicting changes") + logging.info( + "git pull failed, detecting conflicting changes" + ) # '@{u}' is a git placeholder for the latest upstream commit - result = git(['diff', '--name-only', '--diff-filter=UD', '@{u}'], capture_output=True, dir=pkgbuilds_dir) + result = git( + ["diff", "--name-only", "--diff-filter=UD", "@{u}"], + capture_output=True, + dir=pkgbuilds_dir, + ) result.check_returncode() if result.stdout: logging.info("Discarding conflicting changes") - for f in result.stdout.decode().split('\n'): + for f in result.stdout.decode().split("\n"): path = os.path.join(pkgbuilds_dir, f) if not os.path.exists(path): continue - result = git(['checkout', '--', f], dir=pkgbuilds_dir, capture_output=True) + result = git( + ["checkout", "--", f], + dir=pkgbuilds_dir, + capture_output=True, + ) if result.returncode != 0: - logging.debug(f'git checkout of file "{f}" failed; removing.') + logging.debug( + f'git checkout of file "{f}" failed; removing.' + ) remove_file(path) logging.info("Retrying git pull") result = git(pull_cmd, dir=pkgbuilds_dir) if result.returncode != 0: logging.info("Last resort: git reset --hard") - result = git(['reset', '--hard', '@{u}'], capture_output=True, dir=pkgbuilds_dir) + result = git( + ["reset", "--hard", "@{u}"], + capture_output=True, + dir=pkgbuilds_dir, + ) if result.returncode == 0: return - raise Exception('`git pull` failed to update pkgbuilds') + raise Exception("`git pull` failed to update pkgbuilds") _pkgbuilds_initialised: bool = False @@ -99,7 +130,7 @@ def init_pkgbuilds( global _pkgbuilds_initialised if lazy and _pkgbuilds_initialised: return - pkgbuilds_dir = config.get_path('pkgbuilds') + pkgbuilds_dir = config.get_path("pkgbuilds") repo_url = config.file.pkgbuilds.git_repo branch = config.file.pkgbuilds.git_branch clone_pkgbuilds( @@ -119,20 +150,22 @@ VersionSpecs: TypeAlias = dict[str, Optional[list[VersionSpec]]] def parse_version_spec(spec: str) -> tuple[str, VersionSpec]: - for op in ['<', '>', '=']: + for op in ["<", ">", "="]: if op in spec: name, ver = spec.split(op, 1) assert name and ver ver = op + ver - if name[-1] == '=': - assert op != '=' + if name[-1] == "=": + assert op != "=" name = name[:-1] - ver = '=' + ver + ver = "=" + ver return name, ver return spec.strip(), None -def get_version_specs(spec: str, existing_specs: Optional[VersionSpecs] = None) -> VersionSpecs: +def get_version_specs( + spec: str, existing_specs: Optional[VersionSpecs] = None +) -> VersionSpecs: specs = existing_specs or {} name, ver = parse_version_spec(spec) _specs = specs.get(name, None) @@ -181,38 +214,40 @@ class Pkgbuild(PackageInfo): `relative_path` will be stored in `self.path`. """ self.name = os.path.basename(relative_path) - self.version = '' + self.version = "" self.arches = list(arches) self.depends = dict(depends) self.makedepends = dict(makedepends) self.provides = dict(provides) self.replaces = list(replaces) self.local_depends = [] - self.repo = repo or '' - self.mode = '' + self.repo = repo or "" + self.mode = "" self.nodeps = False self.crossdirect = True self.path = relative_path - self.pkgver = '' - self.pkgrel = '' - self.description = '' + self.pkgver = "" + self.pkgrel = "" + self.description = "" self.sources_refreshed = sources_refreshed self.srcinfo_cache = srcinfo_cache def __repr__(self): - return ','.join([ - 'Pkgbuild(' + self.name, - repr(self.path), - str(self.version) + ("🔄" if self.sources_refreshed else ""), - repr(self.mode) + ')', - ]) + return ",".join( + [ + "Pkgbuild(" + self.name, + repr(self.path), + str(self.version) + ("🔄" if self.sources_refreshed else ""), + repr(self.mode) + ")", + ] + ) def names(self) -> list[str]: return list({self.name, *self.provides, *self.replaces}) def update_version(self): """updates `self.version` from `self.pkgver` and `self.pkgrel`""" - self.version = f'{self.pkgver}-{self.pkgrel}' + self.version = f"{self.pkgver}-{self.pkgrel}" def update(self, pkg: Pkgbuild): self.version = pkg.version @@ -230,7 +265,9 @@ class Pkgbuild(PackageInfo): self.pkgver = pkg.pkgver self.pkgrel = pkg.pkgrel self.description = pkg.description - self.sources_refreshed = self.sources_refreshed or pkg.sources_refreshed + self.sources_refreshed = ( + self.sources_refreshed or pkg.sources_refreshed + ) self.update_version() def refresh_sources(self): @@ -239,9 +276,9 @@ class Pkgbuild(PackageInfo): def get_filename(self, arch: Arch): if not self.version: self.update_version() - if self.arches[0] == 'any': - arch = 'any' - return f'{self.name}-{self.version}-{arch}.pkg.tar.zst' + if self.arches[0] == "any": + arch = "any" + return f"{self.name}-{self.version}-{arch}.pkg.tar.zst" def is_built(self, arch: Arch, tolerate_archless: bool = True) -> bool: raise NotImplementedError() @@ -251,14 +288,18 @@ class Pkgbase(Pkgbuild): subpackages: list[SubPkgbuild] _built_for: set[Arch] - def __init__(self, relative_path: str, subpackages: list[SubPkgbuild] = [], **args): + def __init__( + self, relative_path: str, subpackages: list[SubPkgbuild] = [], **args + ): self._built_for = set() self.subpackages = list(subpackages) super().__init__(relative_path, **args) def update(self, pkg: Pkgbuild): if not isinstance(pkg, Pkgbase): - raise Exception(f"Tried to update pkgbase {self.name} with non-base pkg {pkg}") + raise Exception( + f"Tried to update pkgbase {self.name} with non-base pkg {pkg}" + ) Pkgbuild.update(self, pkg) self._built_for.update(pkg._built_for) sub_dict = {p.name: p for p in self.subpackages} @@ -274,10 +315,10 @@ class Pkgbase(Pkgbuild): self.subpackages.append(updated) def refresh_sources(self, lazy: bool = True): - ''' + """ Reloads the pkgbuild from disk. Does **NOT** actually perform the makepkg action to refresh the pkgver() first! - ''' + """ if lazy and self.sources_refreshed: return parsed = parse_pkgbuild(self.path, sources_refreshed=True) @@ -297,7 +338,7 @@ class Pkgbase(Pkgbuild): def is_built(self, arch: Arch, tolerate_archless: bool = True) -> bool: arches = {arch} if tolerate_archless: - arches.add('any') + arches.add("any") return bool(self._built_for.intersection(arches)) @@ -305,7 +346,6 @@ class SubPkgbuild(Pkgbuild): pkgbase: Pkgbase def __init__(self, name: str, pkgbase: Pkgbase): - self.name = name self.pkgbase = pkgbase self.srcinfo_cache = pkgbase.srcinfo_cache @@ -339,22 +379,28 @@ def parse_pkgbuild( global config if _config: config = _config - setup_logging(verbose=config.runtime.verbose, force_colors=config.runtime.colors, log_setup=False) # different subprocess needs log setup. + setup_logging( + verbose=config.runtime.verbose, + force_colors=config.runtime.colors, + log_setup=False, + ) # different subprocess needs log setup. logging.info(f"Discovering PKGBUILD for {relative_pkg_dir}") if force_refresh_srcinfo: - logging.info('force-refreshing SRCINFOs') + logging.info("force-refreshing SRCINFOs") # parse SRCINFO cache metadata and get correct SRCINFO lines - srcinfo_cache, lines = SrcinfoMetaFile.handle_directory(relative_pkg_dir, force_refresh=force_refresh_srcinfo, write=True) + srcinfo_cache, lines = SrcinfoMetaFile.handle_directory( + relative_pkg_dir, force_refresh=force_refresh_srcinfo, write=True + ) assert lines and srcinfo_cache - assert 'build_mode' in srcinfo_cache + assert "build_mode" in srcinfo_cache mode = srcinfo_cache.build_mode - assert 'build_nodeps' in srcinfo_cache + assert "build_nodeps" in srcinfo_cache nodeps = srcinfo_cache.build_nodeps - if mode not in ['host', 'cross']: - err = 'an invalid' if mode is not None else 'no' + if mode not in ["host", "cross"]: + err = "an invalid" if mode is not None else "no" err_end = f": {repr(mode)}" if mode is not None else "." - msg = f'{relative_pkg_dir}/PKGBUILD has {err} mode configured{err_end}' + msg = f"{relative_pkg_dir}/PKGBUILD has {err} mode configured{err_end}" if mode is None: logging.warning(msg) else: @@ -363,11 +409,15 @@ def parse_pkgbuild( # if _crossdirect is unset (None), it defaults to True crossdirect_enabled = srcinfo_cache.build_crossdirect in (None, True) - base_package = Pkgbase(relative_pkg_dir, sources_refreshed=sources_refreshed, srcinfo_cache=srcinfo_cache) + base_package = Pkgbase( + relative_pkg_dir, + sources_refreshed=sources_refreshed, + srcinfo_cache=srcinfo_cache, + ) base_package.crossdirect = crossdirect_enabled base_package.mode = mode base_package.nodeps = nodeps - base_package.repo = relative_pkg_dir.split('/')[0] + base_package.repo = relative_pkg_dir.split("/")[0] current: Pkgbuild = base_package multi_pkgs = False @@ -375,51 +425,62 @@ def parse_pkgbuild( line = line_raw.strip() if not line: continue - splits = line.split(' = ') - if line.startswith('pkgbase'): + splits = line.split(" = ") + if line.startswith("pkgbase"): base_package.name = splits[1] - elif line.startswith('pkgname'): + elif line.startswith("pkgname"): current = SubPkgbuild(splits[1], base_package) assert isinstance(base_package.subpackages, list) base_package.subpackages.append(current) if current.name != base_package.name: multi_pkgs = True - elif line.startswith('pkgver'): + elif line.startswith("pkgver"): current.pkgver = splits[1] - elif line.startswith('pkgrel'): + elif line.startswith("pkgrel"): current.pkgrel = splits[1] - elif line.startswith('pkgdesc'): + elif line.startswith("pkgdesc"): current.description = splits[1] - elif line.startswith('arch'): + elif line.startswith("arch"): current.arches.append(splits[1]) - elif line.startswith('provides'): + elif line.startswith("provides"): if not current.provides: current.provides = {} current.provides = get_version_specs(splits[1], current.provides) - elif line.startswith('replaces'): + elif line.startswith("replaces"): if not current.replaces: current.replaces = [] current.replaces.append(splits[1]) - elif splits[0] in ['depends', 'makedepends', 'checkdepends', 'optdepends']: - spec = splits[1].split(': ', 1)[0] + elif splits[0] in [ + "depends", + "makedepends", + "checkdepends", + "optdepends", + ]: + spec = splits[1].split(": ", 1)[0] if not current.depends: current.depends = (base_package.makedepends or {}).copy() current.depends = get_version_specs(spec, current.depends) - if splits[0] == 'makedepends': + if splits[0] == "makedepends": if not current.makedepends: current.makedepends = {} - current.makedepends = get_version_specs(spec, current.makedepends) + current.makedepends = get_version_specs( + spec, current.makedepends + ) results: list[Pkgbuild] = list(base_package.subpackages) if multi_pkgs: - logging.debug(f" Split package detected: {base_package.name}: {results}") + logging.debug( + f" Split package detected: {base_package.name}: {results}" + ) base_package.update_version() for pkg in results: assert isinstance(pkg, Pkgbuild) pkg.update_version() if not (pkg.version == base_package.version): - raise Exception(f'Subpackage malformed! Versions differ! base: {base_package}, subpackage: {pkg}') + raise Exception( + f"Subpackage malformed! Versions differ! base: {base_package}, subpackage: {pkg}" + ) if isinstance(pkg, SubPkgbuild): if pkg.depends is None: pkg.depends = base_package.depends @@ -444,9 +505,17 @@ def get_pkgbuild_by_path( _config: Optional[ConfigStateHolder] = None, ) -> list[Pkgbuild]: global _pkgbuilds_cache, _pkgbuilds_paths - if lazy and not force_refresh_srcinfo and relative_path in _pkgbuilds_paths: + if ( + lazy + and not force_refresh_srcinfo + and relative_path in _pkgbuilds_paths + ): return _pkgbuilds_paths[relative_path] - parsed = parse_pkgbuild(relative_path, force_refresh_srcinfo=force_refresh_srcinfo, _config=_config) + parsed = parse_pkgbuild( + relative_path, + force_refresh_srcinfo=force_refresh_srcinfo, + _config=_config, + ) _pkgbuilds_paths[relative_path] = parsed for pkg in parsed: _pkgbuilds_cache[pkg.name] = pkg @@ -463,44 +532,60 @@ def get_pkgbuild_by_name(name: str, lazy: bool = True): return get_pkgbuild_by_name(name=name, lazy=lazy) -def get_pkgbuild_dirs(quiet: bool = True, repositories: Optional[list[str]] = None) -> list[str]: +def get_pkgbuild_dirs( + quiet: bool = True, repositories: Optional[list[str]] = None +) -> list[str]: """Gets the relative paths to directories containing PKGBUILDs, optionally warns about dirs without a PKGBUILD""" - pkgbuilds_dir = config.get_path('pkgbuilds') + pkgbuilds_dir = config.get_path("pkgbuilds") paths = [] for repo in repositories or get_kupfer_repo_names(local=True): path = os.path.join(pkgbuilds_dir, repo) if not os.path.exists(path): if not quiet: - logging.warning(f'repo "{repo}" can\'t be listed: "{path}" doesn\'t exist; skipping') + logging.warning( + f'repo "{repo}" can\'t be listed: "{path}" doesn\'t exist; skipping' + ) continue for dir in os.listdir(path): p = os.path.join(repo, dir) - if not os.path.exists(os.path.join(pkgbuilds_dir, p, 'PKGBUILD')): + if not os.path.exists(os.path.join(pkgbuilds_dir, p, "PKGBUILD")): if not quiet: - logging.warning(f"{p} doesn't include a PKGBUILD file; skipping") + logging.warning( + f"{p} doesn't include a PKGBUILD file; skipping" + ) continue paths.append(p) return paths -def discover_pkgbuilds(parallel: bool = True, lazy: bool = True, repositories: Optional[list[str]] = None) -> dict[str, Pkgbuild]: +def discover_pkgbuilds( + parallel: bool = True, + lazy: bool = True, + repositories: Optional[list[str]] = None, +) -> dict[str, Pkgbuild]: global _pkgbuilds_cache, _pkgbuilds_scanned if lazy and _pkgbuilds_scanned: logging.debug("Reusing cached pkgbuilds repo") return _pkgbuilds_cache.copy() - check_programs_wrap(['makepkg']) + check_programs_wrap(["makepkg"]) packages: dict[str, Pkgbuild] = {} init_pkgbuilds(interactive=False) paths = get_pkgbuild_dirs(quiet=False, repositories=repositories) - logging.info(f"Discovering PKGBUILDs{f' in repositories: {repositories}' if repositories else ''}") + logging.info( + f"Discovering PKGBUILDs{f' in repositories: {repositories}' if repositories else ''}" + ) results = [] if parallel: paths_filtered = paths - backend = 'threading' - pass_config = config if backend != 'threading' else None - chunks = (Parallel(n_jobs=multiprocessing.cpu_count() * 4, - backend=backend)(delayed(get_pkgbuild_by_path)(path, lazy=lazy, _config=pass_config) for path in paths_filtered)) + backend = "threading" + pass_config = config if backend != "threading" else None + chunks = Parallel( + n_jobs=multiprocessing.cpu_count() * 4, backend=backend + )( + delayed(get_pkgbuild_by_path)(path, lazy=lazy, _config=pass_config) + for path in paths_filtered + ) else: chunks = (get_pkgbuild_by_path(path, lazy=lazy) for path in paths) @@ -511,11 +596,13 @@ def discover_pkgbuilds(parallel: bool = True, lazy: bool = True, repositories: O _pkgbuilds_paths[pkglist[0].path] = pkglist results += pkglist - logging.info('Building package dictionary') + logging.info("Building package dictionary") for package in results: for name in [package.name] + package.replaces: if name in packages: - logging.warning(f'Overriding {packages[package.name]} with {package}') + logging.warning( + f"Overriding {packages[package.name]} with {package}" + ) packages[name] = package if repositories is None: @@ -534,11 +621,15 @@ def discover_pkgbuilds(parallel: bool = True, lazy: bool = True, repositories: O if found: break if dep in pkg.names(): - logging.debug(f'{package.path}: Found {pkg.name} that provides {dep}') + logging.debug( + f"{package.path}: Found {pkg.name} that provides {dep}" + ) found = True break if not found: - logging.debug(f'{package.path}: Removing {dep} from local dependencies') + logging.debug( + f"{package.path}: Removing {dep} from local dependencies" + ) package.local_depends.remove(dep) return packages @@ -553,22 +644,28 @@ def filter_pkgbuilds( use_names=True, ) -> Iterable[Pkgbuild]: if not (use_names or use_paths): - raise Exception('Error: filter_packages instructed to match neither by names nor paths; impossible!') + raise Exception( + "Error: filter_packages instructed to match neither by names nor paths; impossible!" + ) paths = list(paths) - plural = 's' if len(paths) > 1 else '' + plural = "s" if len(paths) > 1 else "" fields = [] if use_names: - fields.append('name' + plural) + fields.append("name" + plural) if use_paths: - fields.append('path' + plural) - fields_err = ' or '.join(fields) + fields.append("path" + plural) + fields_err = " or ".join(fields) if not allow_empty_results and not paths: raise Exception(f"Can't search for packages: no {fields_err} given") repo = repo or discover_pkgbuilds() - if 'all' in paths: + if "all" in paths: all_pkgs = list(repo.values()) if arch: - all_pkgs = [pkg for pkg in all_pkgs if set([arch, 'any']).intersection(pkg.arches)] + all_pkgs = [ + pkg + for pkg in all_pkgs + if set([arch, "any"]).intersection(pkg.arches) + ] return all_pkgs result = [] to_find = list(paths) @@ -581,8 +678,10 @@ def filter_pkgbuilds( matches = list(comparison.intersection(paths)) if matches: assert pkg.arches - if arch and not set([arch, 'any']).intersection(pkg.arches): - logging.warn(f"Pkg {pkg.name} matches query {matches[0]} but isn't available for architecture {arch}: {pkg.arches}") + if arch and not set([arch, "any"]).intersection(pkg.arches): + logging.warn( + f"Pkg {pkg.name} matches query {matches[0]} but isn't available for architecture {arch}: {pkg.arches}" + ) continue result += [pkg] for m in set(matches).intersection(to_find): @@ -590,8 +689,14 @@ def filter_pkgbuilds( if not allow_empty_results: if not result: - raise Exception(f'No packages matched by {fields_err}: ' + ', '.join([f'"{p}"' for p in paths])) + raise Exception( + f"No packages matched by {fields_err}: " + + ", ".join([f'"{p}"' for p in paths]) + ) if to_find: - raise Exception(f"No packagages matched by {fields_err}: " + ', '.join([f'"{p}"' for p in to_find])) + raise Exception( + f"No packagages matched by {fields_err}: " + + ", ".join([f'"{p}"' for p in to_find]) + ) return result diff --git a/src/kupferbootstrap/packages/srcinfo_cache.py b/src/kupferbootstrap/packages/srcinfo_cache.py index 4fe4792..45b9659 100644 --- a/src/kupferbootstrap/packages/srcinfo_cache.py +++ b/src/kupferbootstrap/packages/srcinfo_cache.py @@ -8,16 +8,20 @@ import subprocess from typing import Any, ClassVar, Optional from kupferbootstrap.config.state import config -from kupferbootstrap.constants import MAKEPKG_CMD, SRCINFO_FILE, SRCINFO_METADATA_FILE, SRCINFO_INITIALISED_FILE +from kupferbootstrap.constants import ( + MAKEPKG_CMD, + SRCINFO_FILE, + SRCINFO_METADATA_FILE, + SRCINFO_INITIALISED_FILE, +) from kupferbootstrap.dictscheme import DictScheme from kupferbootstrap.exec.cmd import run_cmd from kupferbootstrap.utils import sha256sum -SRCINFO_CHECKSUM_FILES = ['PKGBUILD', SRCINFO_FILE] +SRCINFO_CHECKSUM_FILES = ["PKGBUILD", SRCINFO_FILE] class JsonFile(DictScheme): - _filename: ClassVar[str] _relative_path: str _strip_hidden: ClassVar[bool] = True @@ -28,19 +32,21 @@ class JsonFile(DictScheme): return json.dumps(self.toDict(), indent=2) def write(self): - 'Write the filtered json representation to disk' - filepath = os.path.join(config.get_path('pkgbuilds'), self._relative_path, self._filename) - logging.debug(f'{self._relative_path}: writing {self._filename}') - with open(filepath, 'w') as fd: + "Write the filtered json representation to disk" + filepath = os.path.join( + config.get_path("pkgbuilds"), self._relative_path, self._filename + ) + logging.debug(f"{self._relative_path}: writing {self._filename}") + with open(filepath, "w") as fd: fd.write(self.toJSON()) @classmethod def _read_file(cls, relative_path) -> Optional[dict]: - pkgdir = os.path.join(config.get_path('pkgbuilds'), relative_path) + pkgdir = os.path.join(config.get_path("pkgbuilds"), relative_path) filepath = os.path.join(pkgdir, cls._filename) if not os.path.exists(filepath): raise Exception(f"{relative_path}: {cls._filename} doesn't exist") - with open(filepath, 'r') as fd: + with open(filepath, "r") as fd: contents = json.load(fd) return contents @@ -53,7 +59,6 @@ class JsonFile(DictScheme): class SrcInitialisedFile(JsonFile): - PKGBUILD: str _filename: ClassVar[str] = SRCINFO_INITIALISED_FILE @@ -69,14 +74,13 @@ class SrcInitialisedFile(JsonFile): srcinfo_meta_defaults = { - 'build_mode': None, + "build_mode": None, "build_nodeps": None, "build_crossdirect": None, } class SrcinfoMetaFile(JsonFile): - checksums: dict[str, str] build_mode: Optional[str] build_nodeps: Optional[bool] @@ -87,37 +91,52 @@ class SrcinfoMetaFile(JsonFile): @staticmethod def parse_existing(relative_pkg_dir: str) -> SrcinfoMetaFile: - 'tries to parse the srcinfo_meta.json file in the specified pkgbuild dir' + "tries to parse the srcinfo_meta.json file in the specified pkgbuild dir" metadata_raw = SrcinfoMetaFile._read_file(relative_pkg_dir) - return SrcinfoMetaFile.fromDict(metadata_raw | { - '_relative_path': relative_pkg_dir, - '_changed': False, - }) + return SrcinfoMetaFile.fromDict( + metadata_raw + | { + "_relative_path": relative_pkg_dir, + "_changed": False, + } + ) @staticmethod - def generate_new(relative_pkg_dir: str, write: bool = True) -> tuple[SrcinfoMetaFile, list[str]]: - 'Creates a new SrcinfoMetaFile object with checksums, creating a SRCINFO as necessary' - s = SrcinfoMetaFile({ - '_relative_path': relative_pkg_dir, - '_changed': True, - 'checksums': {}, - **srcinfo_meta_defaults, - }) + def generate_new( + relative_pkg_dir: str, write: bool = True + ) -> tuple[SrcinfoMetaFile, list[str]]: + "Creates a new SrcinfoMetaFile object with checksums, creating a SRCINFO as necessary" + s = SrcinfoMetaFile( + { + "_relative_path": relative_pkg_dir, + "_changed": True, + "checksums": {}, + **srcinfo_meta_defaults, + } + ) return s, s.refresh_all() @staticmethod - def handle_directory(relative_pkg_dir: str, force_refresh: bool = False, write: bool = True) -> tuple[SrcinfoMetaFile, list[str]]: + def handle_directory( + relative_pkg_dir: str, force_refresh: bool = False, write: bool = True + ) -> tuple[SrcinfoMetaFile, list[str]]: lines = None # try reading existing cache metadata try: metadata = SrcinfoMetaFile.parse_existing(relative_pkg_dir) except Exception as ex: - logging.debug(f"{relative_pkg_dir}: something went wrong parsing json from {SrcinfoMetaFile._filename}," - f"running `makepkg --printsrcinfo` instead instead: {ex}") + logging.debug( + f"{relative_pkg_dir}: something went wrong parsing json from {SrcinfoMetaFile._filename}," + f"running `makepkg --printsrcinfo` instead instead: {ex}" + ) return SrcinfoMetaFile.generate_new(relative_pkg_dir, write=write) # if for whatever reason only the SRCINFO got deleted but PKGBUILD has not been modified, # we do want the checksum verification to work. So regenerate SRCINFO first. - if not os.path.exists(os.path.join(config.get_path('pkgbuilds'), relative_pkg_dir, SRCINFO_FILE)): + if not os.path.exists( + os.path.join( + config.get_path("pkgbuilds"), relative_pkg_dir, SRCINFO_FILE + ) + ): lines = metadata.refresh_srcinfo() if not metadata.validate_checksums(): # metadata is invalid @@ -125,7 +144,9 @@ class SrcinfoMetaFile(JsonFile): # metadata is valid assert metadata if not force_refresh: - logging.debug(f'{metadata._relative_path}: srcinfo checksums match!') + logging.debug( + f"{metadata._relative_path}: srcinfo checksums match!" + ) lines = lines or metadata.read_srcinfo_file() for build_field in srcinfo_meta_defaults.keys(): if build_field not in metadata: @@ -138,11 +159,16 @@ class SrcinfoMetaFile(JsonFile): return metadata, lines def refresh_checksums(self): - pkgdir = os.path.join(config.get_path('pkgbuilds'), self._relative_path) - if 'checksums' not in self: - self['checksums'] = None + pkgdir = os.path.join( + config.get_path("pkgbuilds"), self._relative_path + ) + if "checksums" not in self: + self["checksums"] = None checksums_old = self.checksums.copy() - checksums = {p: sha256sum(os.path.join(pkgdir, p)) for p in SRCINFO_CHECKSUM_FILES} + checksums = { + p: sha256sum(os.path.join(pkgdir, p)) + for p in SRCINFO_CHECKSUM_FILES + } if self.checksums is None: self.checksums = checksums else: @@ -153,43 +179,57 @@ class SrcinfoMetaFile(JsonFile): def refresh_build_fields(self): self.update(srcinfo_meta_defaults) - with open(os.path.join(config.get_path('pkgbuilds'), self._relative_path, 'PKGBUILD'), 'r') as file: - lines = file.read().split('\n') + with open( + os.path.join( + config.get_path("pkgbuilds"), self._relative_path, "PKGBUILD" + ), + "r", + ) as file: + lines = file.read().split("\n") for line in lines: - if not line.startswith('_') or '=' not in line: + if not line.startswith("_") or "=" not in line: continue - key, val = line.split('=', 1) + key, val = line.split("=", 1) val = val.strip("\"'") - if key == '_mode': + if key == "_mode": self.build_mode = val - elif key == '_nodeps': - self.build_nodeps = val.lower() == 'true' - elif key == '_crossdirect': - self.build_crossdirect = val.lower() == 'true' + elif key == "_nodeps": + self.build_nodeps = val.lower() == "true" + elif key == "_crossdirect": + self.build_crossdirect = val.lower() == "true" else: continue def refresh_srcinfo(self) -> list[str]: - 'Run `makepkg --printsrcinfo` to create an updated SRCINFO file and return the lines from it' + "Run `makepkg --printsrcinfo` to create an updated SRCINFO file and return the lines from it" logging.info(f"{self._relative_path}: Generating SRCINFO with makepkg") - pkgdir = os.path.join(config.get_path('pkgbuilds'), self._relative_path) + pkgdir = os.path.join( + config.get_path("pkgbuilds"), self._relative_path + ) srcinfo_file = os.path.join(pkgdir, SRCINFO_FILE) sproc = run_cmd( - MAKEPKG_CMD + ['--printsrcinfo'], + MAKEPKG_CMD + ["--printsrcinfo"], cwd=pkgdir, stdout=subprocess.PIPE, ) - assert (isinstance(sproc, subprocess.CompletedProcess)) + assert isinstance(sproc, subprocess.CompletedProcess) if sproc.returncode: - raise Exception(f"{self._relative_path}: makepkg failed to parse the PKGBUILD! Error code: {sproc.returncode}") - output = sproc.stdout.decode('utf-8') - with open(srcinfo_file, 'w') as srcinfo_fd: + raise Exception( + f"{self._relative_path}: makepkg failed to parse the PKGBUILD! Error code: {sproc.returncode}" + ) + output = sproc.stdout.decode("utf-8") + with open(srcinfo_file, "w") as srcinfo_fd: srcinfo_fd.write(output) - return output.split('\n') + return output.split("\n") def read_srcinfo_file(self) -> list[str]: - with open(os.path.join(config.get_path('pkgbuilds'), self._relative_path, SRCINFO_FILE), 'r') as srcinfo_fd: - lines = srcinfo_fd.read().split('\n') + with open( + os.path.join( + config.get_path("pkgbuilds"), self._relative_path, SRCINFO_FILE + ), + "r", + ) as srcinfo_fd: + lines = srcinfo_fd.read().split("\n") return lines def refresh_all(self, write: bool = True) -> list[str]: @@ -202,20 +242,28 @@ class SrcinfoMetaFile(JsonFile): def validate_checksums(self) -> bool: "Returns True if all checksummed files exist and checksums match" - pkgdir = os.path.join(config.get_path('pkgbuilds'), self._relative_path) + pkgdir = os.path.join( + config.get_path("pkgbuilds"), self._relative_path + ) assert self.checksums for filename in SRCINFO_CHECKSUM_FILES: if filename not in self.checksums: - logging.debug(f"{self._relative_path}: No checksum for {filename} available") + logging.debug( + f"{self._relative_path}: No checksum for {filename} available" + ) return False checksum = self.checksums[filename] path = os.path.join(pkgdir, filename) if not os.path.exists(path): - logging.debug(f"{self._relative_path}: can't checksum'{filename}: file doesn't exist") + logging.debug( + f"{self._relative_path}: can't checksum'{filename}: file doesn't exist" + ) return False file_sum = sha256sum(path) if file_sum != checksum: - logging.debug(f'{self._relative_path}: Checksum for file "{filename}" doesn\'t match') + logging.debug( + f'{self._relative_path}: Checksum for file "{filename}" doesn\'t match' + ) return False return True @@ -223,18 +271,24 @@ class SrcinfoMetaFile(JsonFile): checksum = self.checksums["PKGBUILD"] assert checksum try: - initfile = SrcInitialisedFile(self._relative_path, raise_exception=True) + initfile = SrcInitialisedFile( + self._relative_path, raise_exception=True + ) if "PKGBUILD" not in initfile: raise Exception("'PKGBUILD' not in parser output") initialised_checksum = initfile.PKGBUILD except Exception as ex: - logging.debug(f"{self._relative_path}: Couldn't read or parse {SRCINFO_INITIALISED_FILE}: {ex}") + logging.debug( + f"{self._relative_path}: Couldn't read or parse {SRCINFO_INITIALISED_FILE}: {ex}" + ) initialised_checksum = None result = checksum == initialised_checksum if initialised_checksum and not result: - logging.debug("Sources were set up for a different version. " - f"Current PKGBUILD checksum: {checksum}; " - f"Initialised for: {initialised_checksum}") + logging.debug( + "Sources were set up for a different version. " + f"Current PKGBUILD checksum: {checksum}; " + f"Initialised for: {initialised_checksum}" + ) return result def write_src_initialised(self): diff --git a/src/kupferbootstrap/progressbar.py b/src/kupferbootstrap/progressbar.py index 8899da8..8ac106d 100644 --- a/src/kupferbootstrap/progressbar.py +++ b/src/kupferbootstrap/progressbar.py @@ -12,14 +12,16 @@ DEFAULT_OUTPUT = sys.stderr managers: dict[Hashable, Manager] = {} progress_bars_option = click.option( - '--force-progress-bars/--no-progress-bars', + "--force-progress-bars/--no-progress-bars", is_flag=True, default=None, - help='Force enable/disable progress bars. Defaults to autodetection.', + help="Force enable/disable progress bars. Defaults to autodetection.", ) -def get_manager(file=DEFAULT_OUTPUT, enabled: Optional[bool] = None) -> Manager: +def get_manager( + file=DEFAULT_OUTPUT, enabled: Optional[bool] = None +) -> Manager: global managers m = managers.get(file, None) if not m: @@ -33,7 +35,9 @@ def get_manager(file=DEFAULT_OUTPUT, enabled: Optional[bool] = None) -> Manager: return m -def get_progress_bar(*kargs, file=DEFAULT_OUTPUT, leave=False, **kwargs) -> Counter: +def get_progress_bar( + *kargs, file=DEFAULT_OUTPUT, leave=False, **kwargs +) -> Counter: m = get_manager(file=file) kwargs["file"] = file @@ -42,11 +46,15 @@ def get_progress_bar(*kargs, file=DEFAULT_OUTPUT, leave=False, **kwargs) -> Coun def get_levels_bar(*kargs, file=DEFAULT_OUTPUT, enable_rate=True, **kwargs): - kwargs["fields"] = {"name": "None", "level": 1, "levels_total": 1} | (kwargs.get("fields", None) or {}) - f = (u'{desc}: {name}{desc_pad}{percentage:3.0f}%|{bar}| ' - u'{count:{len_total}d}/{total:d} ' - u'[lvl: {level}/{levels_total}] ') + kwargs["fields"] = {"name": "None", "level": 1, "levels_total": 1} | ( + kwargs.get("fields", None) or {} + ) + f = ( + "{desc}: {name}{desc_pad}{percentage:3.0f}%|{bar}| " + "{count:{len_total}d}/{total:d} " + "[lvl: {level}/{levels_total}] " + ) if enable_rate: - f += u'[{elapsed}<{eta}, {rate:.2f}{unit_pad}{unit}/s]' + f += "[{elapsed}<{eta}, {rate:.2f}{unit_pad}{unit}/s]" kwargs["bar_format"] = f return get_progress_bar(*kargs, **kwargs) diff --git a/src/kupferbootstrap/utils.py b/src/kupferbootstrap/utils.py index 1de8ca6..e7f0458 100644 --- a/src/kupferbootstrap/utils.py +++ b/src/kupferbootstrap/utils.py @@ -19,7 +19,9 @@ from .exec.cmd import run_cmd, run_root_cmd _programs_available = dict[str, bool]() -def programs_available(programs: Union[str, Sequence[str]], lazy: bool = True) -> bool: +def programs_available( + programs: Union[str, Sequence[str]], lazy: bool = True +) -> bool: global _programs_available if type(programs) is str: programs = [programs] @@ -35,24 +37,32 @@ def programs_available(programs: Union[str, Sequence[str]], lazy: bool = True) - def umount(dest: str, lazy=False) -> subprocess.CompletedProcess: return run_root_cmd( [ - 'umount', - '-c' + ('l' if lazy else ''), + "umount", + "-c" + ("l" if lazy else ""), dest, ], capture_output=True, ) -def mount(src: str, dest: str, options: list[str] = ['bind'], fs_type: Optional[str] = None, register_unmount=True) -> subprocess.CompletedProcess: +def mount( + src: str, + dest: str, + options: list[str] = ["bind"], + fs_type: Optional[str] = None, + register_unmount=True, +) -> subprocess.CompletedProcess: opts = [] for opt in options: - opts += ['-o', opt] + opts += ["-o", opt] if fs_type: - opts += ['-t', fs_type] + opts += ["-t", fs_type] result = run_root_cmd( - ['mount'] + opts + [ + ["mount"] + + opts + + [ src, dest, ], @@ -66,10 +76,10 @@ def mount(src: str, dest: str, options: list[str] = ['bind'], fs_type: Optional[ def check_findmnt(path: str) -> subprocess.CompletedProcess: result = run_root_cmd( [ - 'findmnt', - '-n', - '-o', - 'source', + "findmnt", + "-n", + "-o", + "source", path, ], capture_output=True, @@ -81,24 +91,42 @@ def git( cmd: list[str], dir: Optional[str] = None, use_git_dir: bool = False, - git_dir: str = './.git', + git_dir: str = "./.git", capture_output=False, user: Optional[str] = None, ) -> subprocess.CompletedProcess: - dirarg = [f'--git-dir={git_dir}'] if use_git_dir else [] - result = run_cmd(['git', *dirarg] + cmd, cwd=dir, capture_output=capture_output, switch_user=user) + dirarg = [f"--git-dir={git_dir}"] if use_git_dir else [] + result = run_cmd( + ["git", *dirarg] + cmd, + cwd=dir, + capture_output=capture_output, + switch_user=user, + ) assert isinstance(result, subprocess.CompletedProcess) return result -def git_get_branch(path, use_git_dir: bool = True, git_dir='./.git') -> str: - result = git(['branch', '--show-current'], dir=path, use_git_dir=True, git_dir=git_dir, capture_output=True) +def git_get_branch(path, use_git_dir: bool = True, git_dir="./.git") -> str: + result = git( + ["branch", "--show-current"], + dir=path, + use_git_dir=True, + git_dir=git_dir, + capture_output=True, + ) if result.returncode: - raise Exception(f'Error getting git branch for {path}: {result.stderr}') + raise Exception( + f"Error getting git branch for {path}: {result.stderr}" + ) return result.stdout.decode().strip() -def log_or_exception(raise_exception: bool, msg: str, exc_class=Exception, log_level=logging.WARNING): +def log_or_exception( + raise_exception: bool, + msg: str, + exc_class=Exception, + log_level=logging.WARNING, +): if raise_exception: raise exc_class(msg) else: @@ -129,7 +157,9 @@ def get_gid(group: Union[int, str]) -> int: return grp.getgrnam(group).gr_gid -def read_files_from_tar(tar_file: str, files: Sequence[str]) -> Generator[tuple[str, IO], None, None]: +def read_files_from_tar( + tar_file: str, files: Sequence[str] +) -> Generator[tuple[str, IO], None, None]: assert os.path.exists(tar_file) with tarfile.open(tar_file) as index: for path in files: @@ -144,25 +174,38 @@ def download_file(path: str, url: str, update: bool = True): if os.path.exists(path) and update: headers = requests.head(url).headers file_size = os.path.getsize(path) - missing = [i for i in ['Content-Length', 'last-modified'] if i not in headers] + missing = [ + i for i in ["Content-Length", "last-modified"] if i not in headers + ] if missing: logging.debug(f"Headers not specified: {missing}") - if 'Content-Length' in headers and int(headers['Content-Length']) != file_size: - logging.debug(f"{path} size differs: local: {file_size}, http: {headers['Content-Length']}") - elif 'last-modified' in headers: - url_time = parsedate(headers['last-modified']).astimezone() - file_time = datetime.datetime.fromtimestamp(os.path.getmtime(path)).astimezone() + if ( + "Content-Length" in headers + and int(headers["Content-Length"]) != file_size + ): + logging.debug( + f"{path} size differs: local: {file_size}, http: {headers['Content-Length']}" + ) + elif "last-modified" in headers: + url_time = parsedate(headers["last-modified"]).astimezone() + file_time = datetime.datetime.fromtimestamp( + os.path.getmtime(path) + ).astimezone() if url_time == file_time: logging.debug(f"{path} seems already up to date") return False - user_agent = {"User-agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:46.0) Gecko/20100101 Firefox/46.0"} + user_agent = { + "User-agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:46.0) Gecko/20100101 Firefox/46.0" + } download = requests.get(url, headers=user_agent) - with open(path, 'wb') as fd: + with open(path, "wb") as fd: for chunk in download.iter_content(4096): fd.write(chunk) - if 'last-modified' in download.headers: - url_time = parsedate(download.headers['last-modified']).astimezone() - os.utime(path, (datetime.datetime.now().timestamp(), url_time.timestamp())) + if "last-modified" in download.headers: + url_time = parsedate(download.headers["last-modified"]).astimezone() + os.utime( + path, (datetime.datetime.now().timestamp(), url_time.timestamp()) + ) logging.debug(f"{path} downloaded!") return True @@ -172,19 +215,25 @@ def sha256sum(filename): h = hashlib.sha256() b = bytearray(128 * 1024) mv = memoryview(b) - with open(filename, 'rb', buffering=0) as f: + with open(filename, "rb", buffering=0) as f: while n := f.readinto(mv): h.update(mv[:n]) return h.hexdigest() -def ellipsize(s: str, length: int = 25, padding: Optional[str] = None, ellipsis: str = '...', rjust: bool = False): +def ellipsize( + s: str, + length: int = 25, + padding: Optional[str] = None, + ellipsis: str = "...", + rjust: bool = False, +): """ Ellipsize `s`, shortening it to `(length - len(ellipsis))` and appending `ellipsis` if `s` is longer than `length`. If `padding` is non-empty and `s` is shorter than length, `s` is padded with `padding` until it's `length` long. """ if len(s) > length: - return s[:length - len(ellipsis)] + ellipsis + return s[: length - len(ellipsis)] + ellipsis if not padding: return s pad = s.rjust if rjust else s.ljust @@ -217,13 +266,12 @@ def color_mark_selected( inherited_from: Optional[str] = None, msg_fmt: str = 'Currently selected by profile "%s"%s', msg_item_colors: dict[str, Any] = dict(bold=True, fg="bright_green"), - marker: str = '>>> ', + marker: str = ">>> ", marker_config: dict[str, Any] = dict(bold=True, fg="bright_green"), - split_on: str = '\n', - suffix: str = '\n\n', + split_on: str = "\n", + suffix: str = "\n\n", use_colors: Optional[bool] = None, ) -> str: - def bold(s: str, _bold=True, **kwargs): return color_bold(s, use_colors=use_colors, **kwargs) @@ -232,12 +280,17 @@ def color_mark_selected( marker_full = color_str(marker, use_colors=use_colors, **marker_config) - msg_items = [color_str(profile_name, use_colors=use_colors, **msg_item_colors), ''] + msg_items = [ + color_str(profile_name, use_colors=use_colors, **msg_item_colors), + "", + ] if inherited_from and inherited_from != profile_name: - msg_items[1] = ''.join([ - bold(' (inherited from profile "'), - green(inherited_from, bold=True), - bold('")'), - ]) - output = f'{item}{suffix}{msg_fmt % tuple(msg_items)}' - return '\n'.join([(marker_full + o) for o in output.split(split_on)]) + msg_items[1] = "".join( + [ + bold(' (inherited from profile "'), + green(inherited_from, bold=True), + bold('")'), + ] + ) + output = f"{item}{suffix}{msg_fmt % tuple(msg_items)}" + return "\n".join([(marker_full + o) for o in output.split(split_on)]) diff --git a/src/kupferbootstrap/version/cli.py b/src/kupferbootstrap/version/cli.py index 474a6fd..c40006b 100644 --- a/src/kupferbootstrap/version/cli.py +++ b/src/kupferbootstrap/version/cli.py @@ -6,8 +6,12 @@ from kupferbootstrap.distro.repo_config import get_repo_config from .kbs import get_kbs_version, compare_kbs_version, compare_kbs_ci_version -def _check_kbs_version(*, init_pkgbuilds: bool = False, ci_mode: bool = False): # quiet helper for other modules - repo_config, repo_config_found = get_repo_config(initialize_pkgbuilds=init_pkgbuilds) +def _check_kbs_version( + *, init_pkgbuilds: bool = False, ci_mode: bool = False +): # quiet helper for other modules + repo_config, repo_config_found = get_repo_config( + initialize_pkgbuilds=init_pkgbuilds + ) if not repo_config_found: return kbs_version = get_kbs_version() @@ -39,7 +43,13 @@ def cmd_version_show(): @cmd_version.command("check") -@click.option("--ci-mode", "--ci", is_flag=True, default=False, help="Compare local version against required Build-CI version") +@click.option( + "--ci-mode", + "--ci", + is_flag=True, + default=False, + help="Compare local version against required Build-CI version", +) def cmd_version_check(ci_mode: bool = False): """ Compare KBS version against minimum version from PKGBUILDs @@ -56,11 +66,15 @@ def cmd_version_check(ci_mode: bool = False): exit(2) repo_config, file_found = get_repo_config(initialize_pkgbuilds=False) if not file_found: - logging.error(f"{REPOS_CONFIG_FILE} not found in PKGBUILDs, can't check KBS version for compatibility") + logging.error( + f"{REPOS_CONFIG_FILE} not found in PKGBUILDs, can't check KBS version for compatibility" + ) exit(2) res = compare_kbs_version(kbs_version=kbs_version, repo_config=repo_config) if ci_mode: - res_ci = compare_kbs_ci_version(kbs_version=kbs_version, repo_config=repo_config) + res_ci = compare_kbs_ci_version( + kbs_version=kbs_version, repo_config=repo_config + ) if res_ci is None: exit(2) if res_ci: @@ -68,5 +82,7 @@ def cmd_version_check(ci_mode: bool = False): if res is None: exit(2) if res: - logging.info(f"{'Success: ' if res_ci else ''}KBS version {kbs_version!r} is new enough for PKGBUILDs!") + logging.info( + f"{'Success: ' if res_ci else ''}KBS version {kbs_version!r} is new enough for PKGBUILDs!" + ) exit(0 if res and res_ci else 1) diff --git a/src/kupferbootstrap/version/kbs.py b/src/kupferbootstrap/version/kbs.py index 0a42ca3..7705b77 100644 --- a/src/kupferbootstrap/version/kbs.py +++ b/src/kupferbootstrap/version/kbs.py @@ -10,7 +10,7 @@ from .compare import semver_compare, VerComp KBS_VERSION: Union[str, None] = None KBS_VERSION_MIN_KEY = "kbs_min_version" -KBS_VERSION_CI_MIN_KEY = 'kbs_ci_version' +KBS_VERSION_CI_MIN_KEY = "kbs_ci_version" def get_kbs_version(kbs_folder: Union[str, None] = None) -> Union[str, None]: @@ -20,7 +20,7 @@ def get_kbs_version(kbs_folder: Union[str, None] = None) -> Union[str, None]: kbs_folder = os.path.join(os.path.dirname(__file__), "../" * 3) try: res = git( - ['describe', '--tags', '--match', 'v*.*.*'], + ["describe", "--tags", "--match", "v*.*.*"], use_git_dir=True, git_dir=os.path.join(kbs_folder, ".git"), capture_output=True, @@ -29,8 +29,11 @@ def get_kbs_version(kbs_folder: Union[str, None] = None) -> Union[str, None]: output = res.stderr or res.stdout if output and not isinstance(output, str): output = output.decode().strip() - raise Exception(output or f'[Git failed without output. Return Code: {res.returncode}]') - return (res.stdout or b'').decode().strip() + raise Exception( + output + or f"[Git failed without output. Return Code: {res.returncode}]" + ) + return (res.stdout or b"").decode().strip() except Exception as ex: logging.warning(f"Failed to fetch KBS version with git: {ex!s}") return None @@ -45,35 +48,53 @@ def compare_kbs_version_generic( try: return semver_compare(minimum_ver.lstrip("v"), kbs_version.lstrip("v")) except Exception as ex: - logging.warning(f'Failed to compare KBS version {kbs_version!r} to required minimum version {minimum_ver!r}: {ex!r}') + logging.warning( + f"Failed to compare KBS version {kbs_version!r} to required minimum version {minimum_ver!r}: {ex!r}" + ) return None -def compare_kbs_version(kbs_version: str, repo_config: ReposConfigFile) -> bool | None: +def compare_kbs_version( + kbs_version: str, repo_config: ReposConfigFile +) -> bool | None: """Returns True if KBS is new enough for PKGBUILDs""" minimum_ver = repo_config.get(KBS_VERSION_MIN_KEY) - kbs_state = compare_kbs_version_generic(kbs_version=kbs_version, minimum_ver=minimum_ver) + kbs_state = compare_kbs_version_generic( + kbs_version=kbs_version, minimum_ver=minimum_ver + ) if not minimum_ver: - logging.warning(f"Can't check PKGBUILDs for compatible KBS version as {KBS_VERSION_MIN_KEY!r} " - 'is empty in PKGBUILDs repos.yml') + logging.warning( + f"Can't check PKGBUILDs for compatible KBS version as {KBS_VERSION_MIN_KEY!r} " + "is empty in PKGBUILDs repos.yml" + ) return None if kbs_state == VerComp.RIGHT_OLDER: - logging.warning(f'KBS version {kbs_version!r} is older than {minimum_ver!r} required by PKGBUILDs.\n' - 'Some functionality may randomly be broken.\nYou have been warned.') + logging.warning( + f"KBS version {kbs_version!r} is older than {minimum_ver!r} required by PKGBUILDs.\n" + "Some functionality may randomly be broken.\nYou have been warned." + ) return False return True -def compare_kbs_ci_version(kbs_version: str, repo_config: ReposConfigFile) -> bool | None: +def compare_kbs_ci_version( + kbs_version: str, repo_config: ReposConfigFile +) -> bool | None: """Returns True if KBS is new enough for PKGBUILDs in CI""" minimum_ver = repo_config.get(KBS_VERSION_CI_MIN_KEY) if not minimum_ver: - logging.warning("Can't check PKGBUILDs for compatible KBS CI version: " - f'Minimum CI KBS version {KBS_VERSION_CI_MIN_KEY!r} is empty in PKGBUILDs repos.yml!') + logging.warning( + "Can't check PKGBUILDs for compatible KBS CI version: " + f"Minimum CI KBS version {KBS_VERSION_CI_MIN_KEY!r} is empty in PKGBUILDs repos.yml!" + ) return None - kbs_state = compare_kbs_version_generic(kbs_version=kbs_version, minimum_ver=minimum_ver) + kbs_state = compare_kbs_version_generic( + kbs_version=kbs_version, minimum_ver=minimum_ver + ) if kbs_state == VerComp.RIGHT_OLDER: - logging.error(f'KBS CI version {kbs_version!r} is older than {minimum_ver!r} required by PKGBUILDs kbs_ci_version!\n' - 'CI is likely to fail!') + logging.error( + f"KBS CI version {kbs_version!r} is older than {minimum_ver!r} required by PKGBUILDs kbs_ci_version!\n" + "CI is likely to fail!" + ) return False return True diff --git a/src/kupferbootstrap/wrapper/__init__.py b/src/kupferbootstrap/wrapper/__init__.py index 1a7156e..2731e74 100644 --- a/src/kupferbootstrap/wrapper/__init__.py +++ b/src/kupferbootstrap/wrapper/__init__.py @@ -10,7 +10,7 @@ from .docker import DockerWrapper from .wrapper import Wrapper wrapper_impls: dict[str, Wrapper] = { - 'docker': DockerWrapper(), + "docker": DockerWrapper(), } @@ -24,30 +24,38 @@ def get_wrapper_impl(wrapper_type: Optional[str] = None) -> Wrapper: def wrap(wrapper_type: Optional[str] = None): wrapper_type = get_wrapper_type(wrapper_type) - if wrapper_type != 'none': + if wrapper_type != "none": get_wrapper_impl(wrapper_type).wrap() def is_wrapped(wrapper_type: Optional[str] = None) -> bool: wrapper_type = get_wrapper_type(wrapper_type) - return wrapper_type != 'none' and get_wrapper_impl(wrapper_type).is_wrapped() + return ( + wrapper_type != "none" and get_wrapper_impl(wrapper_type).is_wrapped() + ) def needs_wrap(wrapper_type: Optional[str] = None) -> bool: wrapper_type = wrapper_type or get_wrapper_type() - return wrapper_type != 'none' and not is_wrapped(wrapper_type) and not config.runtime.no_wrap + return ( + wrapper_type != "none" + and not is_wrapped(wrapper_type) + and not config.runtime.no_wrap + ) def enforce_wrap(no_wrapper=False): wrapper_type = get_wrapper_type() if needs_wrap(wrapper_type) and not no_wrapper: - logging.info(f'Wrapping in {wrapper_type}') + logging.info(f"Wrapping in {wrapper_type}") wrap() def check_programs_wrap(programs: Union[str, Sequence[str]]): if not programs_available(programs): - logging.debug(f"Wrapping because one of {[programs] if isinstance(programs, str) else programs} isn't available.") + logging.debug( + f"Wrapping because one of {[programs] if isinstance(programs, str) else programs} isn't available." + ) enforce_wrap() @@ -56,7 +64,9 @@ def wrap_if_foreign_arch(arch: Arch): enforce_wrap() -def execute_without_exit(f, argv_override: Optional[list[str]], *args, **kwargs): +def execute_without_exit( + f, argv_override: Optional[list[str]], *args, **kwargs +): """ If no wrap is needed, executes and returns `f(*args, **kwargs)`. If a wrap is determined to be necessary, force a wrap with argv_override applied. @@ -65,7 +75,9 @@ def execute_without_exit(f, argv_override: Optional[list[str]], *args, **kwargs) """ if not needs_wrap(): return f(*args, **kwargs) - assert get_wrapper_type() != 'none', "needs_wrap() should've returned False" + assert get_wrapper_type() != "none", ( + "needs_wrap() should've returned False" + ) w = get_wrapper_impl() w_cmd = w.argv_override # we need to avoid throwing and catching SystemExit due to FDs getting closed otherwise @@ -79,10 +91,10 @@ def execute_without_exit(f, argv_override: Optional[list[str]], *args, **kwargs) nowrapper_option = click.option( - '-w/-W', - '--force-wrapper/--no-wrapper', - 'wrapper_override', + "-w/-W", + "--force-wrapper/--no-wrapper", + "wrapper_override", is_flag=True, default=None, - help='Force or disable the docker wrapper. Defaults to autodetection.', + help="Force or disable the docker wrapper. Defaults to autodetection.", ) diff --git a/src/kupferbootstrap/wrapper/docker.py b/src/kupferbootstrap/wrapper/docker.py index 4976a14..4a89d3a 100644 --- a/src/kupferbootstrap/wrapper/docker.py +++ b/src/kupferbootstrap/wrapper/docker.py @@ -17,12 +17,12 @@ DOCKER_PATHS = WRAPPER_PATHS.copy() def docker_volumes_args(volume_mappings: dict[str, str]) -> list[str]: result = [] for source, destination in volume_mappings.items(): - result += ['-v', f'{source}:{destination}:z'] + result += ["-v", f"{source}:{destination}:z"] return result class DockerWrapper(Wrapper): - type: str = 'docker' + type: str = "docker" def wrap(self): super().wrap() @@ -40,43 +40,59 @@ class DockerWrapper(Wrapper): _path = os.path.join(_par_dir, "../../../..") docker_path = os.path.realpath(_path) tried.append(f"{_path} => {docker_path}") - logging.debug(f"{DOCKER_FILE!r} not found at {script_path!r}, trying {docker_path!r}") - version_file = os.path.join(script_path, '../..', VERSION_FILE) + logging.debug( + f"{DOCKER_FILE!r} not found at {script_path!r}, trying {docker_path!r}" + ) + version_file = os.path.join(script_path, "../..", VERSION_FILE) if not os.path.exists(version_file): _vfile = os.path.join(docker_path, VERSION_FILE) - logging.warning(f"{VERSION_FILE} not found at {version_file!r}." - f"\nTrying {_vfile!r}" - "\nDid you use `pip install .` instead of `pip install -e .`?") + logging.warning( + f"{VERSION_FILE} not found at {version_file!r}." + f"\nTrying {_vfile!r}" + "\nDid you use `pip install .` instead of `pip install -e .`?" + ) if os.path.exists(_vfile): version_file = _vfile if os.path.exists(version_file): with open(version_file) as fd: - version = fd.read().replace('\n', '').strip() + version = fd.read().replace("\n", "").strip() logging.debug(f"Read docker tag {version} from {version_file}") else: version = "BUILD" - logging.error(f"'{script_path}/{VERSION_FILE}' doesn't exist, defaulting docker tag to {version}!" - "\nThis installation is potentially broken!" - "\nDid you use `pip install .` instead of `pip install -e .` to install kupferboostrap?" - f"Tried locations: {[version_file, _vfile]}") - tag = f'registry.gitlab.com/kupfer/kupferbootstrap:{version}' - if version == 'BUILD': + logging.error( + f"'{script_path}/{VERSION_FILE}' doesn't exist, defaulting docker tag to {version}!" + "\nThis installation is potentially broken!" + "\nDid you use `pip install .` instead of `pip install -e .` to install kupferboostrap?" + f"Tried locations: {[version_file, _vfile]}" + ) + tag = f"registry.gitlab.com/kupfer/kupferbootstrap:{version}" + if version == "BUILD": logging.info(f'Building docker image "{tag}"') cmd = [ - 'docker', - 'build', - '--pull', - '--network', - 'host', - '.', - '-t', + "docker", + "build", + "--pull", + "--network", + "host", + ".", + "-t", tag, - ] + (['-q'] if not config.runtime.verbose else []) + ] + (["-q"] if not config.runtime.verbose else []) _dfile = os.path.join(docker_path, DOCKER_FILE) if not os.path.exists(_dfile): _sep = "\n -" - raise Exception(f'{DOCKER_FILE!r} not found. Tried locations:' + (_sep.join(["", *[repr(f"{p}/{DOCKER_FILE}") for p in tried]]))) - logging.debug(f'Running docker cmd (chdir={script_path!r}) : ' + ' '.join(cmd)) + raise Exception( + f"{DOCKER_FILE!r} not found. Tried locations:" + + ( + _sep.join( + ["", *[repr(f"{p}/{DOCKER_FILE}") for p in tried]] + ) + ) + ) + logging.debug( + f"Running docker cmd (chdir={script_path!r}) : " + + " ".join(cmd) + ) mute_docker = not config.runtime.verbose result = subprocess.run( cmd, @@ -84,66 +100,93 @@ class DockerWrapper(Wrapper): capture_output=mute_docker, ) if result.returncode != 0: - error_msg = ('\n' + result.stderr.decode() + '\n') if mute_docker else '' - logging.fatal(f'Docker error: {error_msg}Failed to build docker image: see errors above: ^^^^') + error_msg = ( + ("\n" + result.stderr.decode() + "\n") + if mute_docker + else "" + ) + logging.fatal( + f"Docker error: {error_msg}Failed to build docker image: see errors above: ^^^^" + ) exit(1) else: # Check if the image for the version already exists result = subprocess.run( [ - 'docker', - 'images', - '-q', + "docker", + "images", + "-q", tag, ], capture_output=True, ) - if result.stdout == b'': - logging.info(f'Pulling kupferbootstrap docker image version \'{version}\'') - subprocess.run([ - 'docker', - 'pull', - tag, - ]) - container_name = f'kupferbootstrap-{self.uuid}' + if result.stdout == b"": + logging.info( + f"Pulling kupferbootstrap docker image version '{version}'" + ) + subprocess.run( + [ + "docker", + "pull", + tag, + ] + ) + container_name = f"kupferbootstrap-{self.uuid}" wrapped_config = self.generate_wrapper_config() - target_user = 'root' if config.runtime.uid == 0 else 'kupfer' - target_home = '/root' if target_user == 'root' else f'/home/{target_user}' + target_user = "root" if config.runtime.uid == 0 else "kupfer" + target_home = ( + "/root" if target_user == "root" else f"/home/{target_user}" + ) - ssh_dir = os.path.join(pathlib.Path.home(), '.ssh') + ssh_dir = os.path.join(pathlib.Path.home(), ".ssh") if not os.path.exists(ssh_dir): os.makedirs(ssh_dir, mode=0o700) - volumes = self.get_bind_mounts_default(wrapped_config, ssh_dir=ssh_dir, target_home=target_home) + volumes = self.get_bind_mounts_default( + wrapped_config, ssh_dir=ssh_dir, target_home=target_home + ) for vol_name, vol_dest in DOCKER_PATHS.items(): vol_src = config.get_path(vol_name) makedir(vol_src) volumes[vol_src] = vol_dest - docker_cmd = [ - 'docker', - 'run', - '--net', - 'host', - '--name', - container_name, - '--rm', - '--interactive', - '--tty', - '--privileged', - ] + docker_volumes_args(volumes) + [tag] + docker_cmd = ( + [ + "docker", + "run", + "--net", + "host", + "--name", + container_name, + "--rm", + "--interactive", + "--tty", + "--privileged", + ] + + docker_volumes_args(volumes) + + [tag] + ) kupfer_cmd = [ - 'kupferbootstrap', - '--config', + "kupferbootstrap", + "--config", volumes[wrapped_config], ] - kupfer_cmd += self.argv_override or self.filter_args_wrapper(sys.argv[1:]) + kupfer_cmd += self.argv_override or self.filter_args_wrapper( + sys.argv[1:] + ) if config.runtime.uid: - kupfer_cmd = ['wrapper_su_helper', '--uid', str(config.runtime.uid), '--username', 'kupfer', '--'] + kupfer_cmd + kupfer_cmd = [ + "wrapper_su_helper", + "--uid", + str(config.runtime.uid), + "--username", + "kupfer", + "--", + ] + kupfer_cmd cmd = docker_cmd + kupfer_cmd - logging.debug('Wrapping in docker:' + repr(cmd)) + logging.debug("Wrapping in docker:" + repr(cmd)) result = subprocess.run(cmd) if self.should_exit: exit(result.returncode) @@ -152,8 +195,8 @@ class DockerWrapper(Wrapper): def stop(self): subprocess.run( [ - 'docker', - 'kill', + "docker", + "kill", self.identifier, ], stdout=subprocess.DEVNULL, diff --git a/src/kupferbootstrap/wrapper/su_helper.py b/src/kupferbootstrap/wrapper/su_helper.py index d82877b..beb36e4 100644 --- a/src/kupferbootstrap/wrapper/su_helper.py +++ b/src/kupferbootstrap/wrapper/su_helper.py @@ -9,28 +9,49 @@ from kupferbootstrap.exec.cmd import run_cmd, flatten_shell_script from kupferbootstrap.exec.file import chown -@click.command('kupferbootstrap_su') -@click.option('--username', default='kupfer', help="The user's name. If --uid is provided, the user's uid will be changed to this in passwd") -@click.option('--uid', default=1000, type=int, help='uid to change $username to and run as') -@click.argument('cmd', type=str, nargs=-1) -def kupferbootstrap_su(cmd: list[str], uid: int = 1000, username: str = 'kupfer'): +@click.command("kupferbootstrap_su") +@click.option( + "--username", + default="kupfer", + help="The user's name. If --uid is provided, the user's uid will be changed to this in passwd", +) +@click.option( + "--uid", + default=1000, + type=int, + help="uid to change $username to and run as", +) +@click.argument("cmd", type=str, nargs=-1) +def kupferbootstrap_su( + cmd: list[str], uid: int = 1000, username: str = "kupfer" +): "Changes `username`'s uid to `uid` and executes kupferbootstrap as that user" cmd = list(cmd) user = pwd.getpwnam(username) home = user.pw_dir if uid != user.pw_uid: - run_cmd(['usermod', '-o', '-u', str(uid), username]).check_returncode() # type: ignore[union-attr] + run_cmd(["usermod", "-o", "-u", str(uid), username]).check_returncode() # type: ignore[union-attr] chown(home, username, recursive=False) - logging.debug(f'wrapper_su_helper: running {cmd} as {repr(username)}') - env_inject = ['env', f'PATH={os.environ["PATH"]}'] + logging.debug(f"wrapper_su_helper: running {cmd} as {repr(username)}") + env_inject = ["env", f"PATH={os.environ['PATH']}"] if WRAPPER_ENV_VAR in os.environ: - env_inject.append(f'{WRAPPER_ENV_VAR}={os.environ[WRAPPER_ENV_VAR]}') - su_cmd = ['sudo', *env_inject, 'su', '-P', username, '-c', flatten_shell_script(cmd, wrap_in_shell_quote=True, shell_quote_items=True)] + env_inject.append(f"{WRAPPER_ENV_VAR}={os.environ[WRAPPER_ENV_VAR]}") + su_cmd = [ + "sudo", + *env_inject, + "su", + "-P", + username, + "-c", + flatten_shell_script( + cmd, wrap_in_shell_quote=True, shell_quote_items=True + ), + ] result = run_cmd(su_cmd, attach_tty=True) assert isinstance(result, int) exit(result) -if __name__ == '__main__': +if __name__ == "__main__": setup_logging(True) - kupferbootstrap_su(prog_name='kupferbootstrap_su_helper') + kupferbootstrap_su(prog_name="kupferbootstrap_su_helper") diff --git a/src/kupferbootstrap/wrapper/wrapper.py b/src/kupferbootstrap/wrapper/wrapper.py index 5f40a61..ce71bb5 100644 --- a/src/kupferbootstrap/wrapper/wrapper.py +++ b/src/kupferbootstrap/wrapper/wrapper.py @@ -10,8 +10,8 @@ from kupferbootstrap.config.state import dump_file as dump_config_file from kupferbootstrap.constants import CHROOT_PATHS, WRAPPER_ENV_VAR WRAPPER_PATHS = CHROOT_PATHS | { - 'ccache': '/ccache', - 'rust': '/rust', + "ccache": "/ccache", + "rust": "/rust", } @@ -40,9 +40,11 @@ class Wrapper(WrapperProtocol): should_exit: bool atexit_registered: bool - def __init__(self, random_id: Optional[str] = None, name: Optional[str] = None): + def __init__( + self, random_id: Optional[str] = None, name: Optional[str] = None + ): self.uuid = str(random_id or uuid.uuid4()) - self.identifier = name or f'kupferbootstrap-{self.uuid}' + self.identifier = name or f"kupferbootstrap-{self.uuid}" self.argv_override = None self.should_exit = True self.atexit_registered = False @@ -55,18 +57,20 @@ class Wrapper(WrapperProtocol): for i, arg in enumerate(args): if done: break - if arg[0] != '-': + if arg[0] != "-": results += args[i:] done = True break - for argname in ['--config', '-C']: + for argname in ["--config", "-C"]: if arg.startswith(argname): done = True - if arg.strip() != argname: # arg is longer, assume --arg=value + if ( + arg.strip() != argname + ): # arg is longer, assume --arg=value offset = 1 else: offset = 2 - results += args[i + offset:] + results += args[i + offset :] break if not done: results.append(arg) @@ -74,17 +78,23 @@ class Wrapper(WrapperProtocol): def generate_wrapper_config( self, - target_path: str = '/tmp/kupferbootstrap', + target_path: str = "/tmp/kupferbootstrap", paths: dict[str, str] = WRAPPER_PATHS, config_overrides: dict[str, dict] = {}, ) -> str: - wrapped_config = f'{target_path.rstrip("/")}/{self.identifier}_wrapped.toml' + wrapped_config = ( + f"{target_path.rstrip('/')}/{self.identifier}_wrapped.toml" + ) dump_config_file( file_path=wrapped_config, - config=(config.file | { - 'paths': paths, - } | config_overrides), + config=( + config.file + | { + "paths": paths, + } + | config_overrides + ), ) self.wrapped_config_path = wrapped_config return wrapped_config @@ -106,16 +116,21 @@ class Wrapper(WrapperProtocol): def is_wrapped(self): return os.getenv(WRAPPER_ENV_VAR) == self.type.upper() - def get_bind_mounts_default(self, wrapped_config_path: Optional[str] = None, ssh_dir: Optional[str] = None, target_home: str = '/root'): + def get_bind_mounts_default( + self, + wrapped_config_path: Optional[str] = None, + ssh_dir: Optional[str] = None, + target_home: str = "/root", + ): wrapped_config_path = wrapped_config_path or self.wrapped_config_path - ssh_dir = ssh_dir or os.path.join(pathlib.Path.home(), '.ssh') - assert (wrapped_config_path) + ssh_dir = ssh_dir or os.path.join(pathlib.Path.home(), ".ssh") + assert wrapped_config_path mounts = { - '/dev': '/dev', - wrapped_config_path: f'{target_home}/.config/kupfer/kupferbootstrap.toml', + "/dev": "/dev", + wrapped_config_path: f"{target_home}/.config/kupfer/kupferbootstrap.toml", } if ssh_dir: mounts |= { - ssh_dir: f'{target_home}/.ssh', + ssh_dir: f"{target_home}/.ssh", } return mounts diff --git a/tests/config/test_config.py b/tests/config/test_config.py index 373aace..a23ee1f 100644 --- a/tests/config/test_config.py +++ b/tests/config/test_config.py @@ -13,7 +13,7 @@ from kupferbootstrap.config.state import CONFIG_DEFAULTS, ConfigStateHolder def get_filename(): - return mktemp() + '_pytest.toml' + return mktemp() + "_pytest.toml" @pytest.fixture @@ -25,8 +25,8 @@ def conf_filename(): @pytest.fixture def empty_config(): f = get_filename() - with open(f, 'w') as fd: - fd.write('') + with open(f, "w") as fd: + fd.write("") yield f os.unlink(f) @@ -41,7 +41,9 @@ def configstate_emptyfile(empty_config): return ConfigStateHolder(empty_config) -def validate_ConfigStateHolder(c: ConfigStateHolder, should_load: Optional[bool] = None): +def validate_ConfigStateHolder( + c: ConfigStateHolder, should_load: Optional[bool] = None +): assert isinstance(c, ConfigStateHolder) if should_load is not None: assert c.file_state.load_finished is True @@ -49,10 +51,13 @@ def validate_ConfigStateHolder(c: ConfigStateHolder, should_load: Optional[bool] assert c.file -@pytest.mark.parametrize('conf_fixture,exists', [('configstate_emptyfile', True), ('configstate_nonexistant', False)]) +@pytest.mark.parametrize( + "conf_fixture,exists", + [("configstate_emptyfile", True), ("configstate_nonexistant", False)], +) def test_fixture_configstate(conf_fixture: str, exists: bool, request): configstate = request.getfixturevalue(conf_fixture) - assert 'config_file' in configstate.runtime + assert "config_file" in configstate.runtime confpath = configstate.runtime.config_file assert isinstance(confpath, str) assert confpath @@ -68,8 +73,13 @@ def test_config_load_nonexistant(configstate_nonexistant): validate_ConfigStateHolder(configstate_nonexistant, should_load=False) -@pytest.mark.parametrize('path_fixture,should_load', [('conf_filename', False), ('empty_config', True)]) -def test_loadstate_is_loaded(path_fixture: str, should_load: bool, request: pytest.FixtureRequest): +@pytest.mark.parametrize( + "path_fixture,should_load", + [("conf_filename", False), ("empty_config", True)], +) +def test_loadstate_is_loaded( + path_fixture: str, should_load: bool, request: pytest.FixtureRequest +): path = request.getfixturevalue(path_fixture) assert os.path.exists(path) == should_load c = ConfigStateHolder(path) @@ -79,7 +89,9 @@ def test_loadstate_is_loaded(path_fixture: str, should_load: bool, request: pyte assert c.is_loaded() == should_load -@pytest.mark.parametrize('conf_fixture', ['configstate_emptyfile', 'configstate_nonexistant']) +@pytest.mark.parametrize( + "conf_fixture", ["configstate_emptyfile", "configstate_nonexistant"] +) def test_config_fills_defaults(conf_fixture: str, request): c = request.getfixturevalue(conf_fixture) assert c.file == CONFIG_DEFAULTS @@ -89,7 +101,11 @@ def dict_filter_out_None(d: dict): return {k: v for k, v in d.items() if v is not None} -def compare_to_defaults(config: dict, defaults: dict = CONFIG_DEFAULTS, filter_None_from_defaults: Optional[bool] = None): +def compare_to_defaults( + config: dict, + defaults: dict = CONFIG_DEFAULTS, + filter_None_from_defaults: Optional[bool] = None, +): if filter_None_from_defaults is None: filter_None_from_defaults = not isinstance(config, Config) # assert sections match @@ -101,13 +117,16 @@ def compare_to_defaults(config: dict, defaults: dict = CONFIG_DEFAULTS, filter_N if filter_None_from_defaults: section_defaults = dict_filter_out_None(section_defaults) section_values_config = config[section] - if section != 'profiles': + if section != "profiles": assert section_values_config == section_defaults else: - CURRENT_KEY = 'current' + CURRENT_KEY = "current" assert CURRENT_KEY in section_defaults.keys() assert section_defaults.keys() == section_values_config.keys() - assert section_defaults[CURRENT_KEY] == section_values_config[CURRENT_KEY] + assert ( + section_defaults[CURRENT_KEY] + == section_values_config[CURRENT_KEY] + ) for profile_name, profile in section_defaults.items(): if profile_name == CURRENT_KEY: continue # not a profile @@ -117,7 +136,7 @@ def compare_to_defaults(config: dict, defaults: dict = CONFIG_DEFAULTS, filter_N def load_toml_file(path) -> dict: - with open(path, 'r') as f: + with open(path, "r") as f: text = f.read() assert text return toml.loads(text) @@ -143,39 +162,53 @@ def test_config_save_nonexistant(configstate_nonexistant: ConfigStateHolder): def test_config_save_modified(configstate_emptyfile: ConfigStateHolder): c = configstate_emptyfile - WRAPPER_KEY = 'wrapper' - TYPE_KEY = 'type' + WRAPPER_KEY = "wrapper" + TYPE_KEY = "type" assert WRAPPER_KEY in c.file assert TYPE_KEY in c.file[WRAPPER_KEY] - wrapper_section = CONFIG_DEFAULTS[WRAPPER_KEY] | {TYPE_KEY: 'none'} + wrapper_section = CONFIG_DEFAULTS[WRAPPER_KEY] | {TYPE_KEY: "none"} c.file[WRAPPER_KEY] |= wrapper_section c.write() defaults_modified = CONFIG_DEFAULTS | {WRAPPER_KEY: wrapper_section} - compare_to_defaults(load_toml_file(get_path_from_stateholder(c)), defaults_modified) + compare_to_defaults( + load_toml_file(get_path_from_stateholder(c)), defaults_modified + ) -def get_config_scheme(data: dict[str, Any], validate=True, allow_incomplete=False) -> Config: +def get_config_scheme( + data: dict[str, Any], validate=True, allow_incomplete=False +) -> Config: """ helper func to ignore a false type error. for some reason, mypy argues about DictScheme.fromDict() instead of Config.fromDict() here """ - return Config.fromDict(data, validate=validate, allow_incomplete=allow_incomplete) # type: ignore[call-arg] + return Config.fromDict( + data, validate=validate, allow_incomplete=allow_incomplete + ) # type: ignore[call-arg] def test_config_scheme_defaults(): - c = get_config_scheme(CONFIG_DEFAULTS, validate=True, allow_incomplete=False) + c = get_config_scheme( + CONFIG_DEFAULTS, validate=True, allow_incomplete=False + ) assert c compare_to_defaults(c) def test_config_scheme_modified(): - modifications = {'wrapper': {'type': 'none'}, 'build': {'crossdirect': False}} + modifications = { + "wrapper": {"type": "none"}, + "build": {"crossdirect": False}, + } assert set(modifications.keys()).issubset(CONFIG_DEFAULTS.keys()) - d = {section_name: (section | modifications.get(section_name, {})) for section_name, section in CONFIG_DEFAULTS.items()} + d = { + section_name: (section | modifications.get(section_name, {})) + for section_name, section in CONFIG_DEFAULTS.items() + } c = get_config_scheme(d, validate=True, allow_incomplete=False) assert c assert c.build.crossdirect is False - assert c.wrapper.type == 'none' + assert c.wrapper.type == "none" def test_configstate_profile_pickle(): @@ -183,7 +216,10 @@ def test_configstate_profile_pickle(): assert c.file.wrapper assert c.file.profiles # add new profile to check it doesn't error out due to unknown keys - c.file.profiles['graphical'] = {'username': 'kupfer123', 'hostname': 'test123'} + c.file.profiles["graphical"] = { + "username": "kupfer123", + "hostname": "test123", + } p = pickle.dumps(c) unpickled = pickle.loads(p) assert c.file == unpickled.file @@ -198,19 +234,19 @@ def test_profile(): def test_get_profile(): c = ConfigStateHolder() - d = {'username': 'kupfer123', 'hostname': 'test123'} - c.file.profiles['testprofile'] = d - p = c.get_profile('testprofile') + d = {"username": "kupfer123", "hostname": "test123"} + c.file.profiles["testprofile"] = d + p = c.get_profile("testprofile") assert p assert isinstance(p, Profile) def test_get_profile_from_disk(configstate_emptyfile): - profile_name = 'testprofile' - device = 'sdm845-oneplus-enchilada' + profile_name = "testprofile" + device = "sdm845-oneplus-enchilada" c = configstate_emptyfile c.file.profiles.default.device = device - d = {'parent': 'default', 'username': 'kupfer123', 'hostname': 'test123'} + d = {"parent": "default", "username": "kupfer123", "hostname": "test123"} c.file.profiles[profile_name] = d filepath = c.runtime.config_file assert filepath @@ -221,5 +257,5 @@ def test_get_profile_from_disk(configstate_emptyfile): c.enforce_config_loaded() p: Profile = c.get_profile(profile_name) assert isinstance(p, Profile) - assert 'device' in p + assert "device" in p assert p.device == device diff --git a/tests/devices/test_device.py b/tests/devices/test_device.py index 5de3805..4bf43da 100644 --- a/tests/devices/test_device.py +++ b/tests/devices/test_device.py @@ -5,11 +5,23 @@ import os from copy import copy from kupferbootstrap.config.state import ConfigStateHolder, config -from kupferbootstrap.devices.device import Device, DEVICE_DEPRECATIONS, get_device, get_devices, parse_device_pkg, check_devicepkg_name -from kupferbootstrap.packages.pkgbuild import init_pkgbuilds, discover_pkgbuilds, Pkgbuild, parse_pkgbuild +from kupferbootstrap.devices.device import ( + Device, + DEVICE_DEPRECATIONS, + get_device, + get_devices, + parse_device_pkg, + check_devicepkg_name, +) +from kupferbootstrap.packages.pkgbuild import ( + init_pkgbuilds, + discover_pkgbuilds, + Pkgbuild, + parse_pkgbuild, +) -@pytest.fixture(scope='session') +@pytest.fixture(scope="session") def initialise_pkgbuilds_dir() -> ConfigStateHolder: config.try_load_file() init_pkgbuilds(interactive=False) @@ -20,10 +32,10 @@ def initialise_pkgbuilds_dir() -> ConfigStateHolder: def pkgbuilds_dir(initialise_pkgbuilds_dir: ConfigStateHolder) -> str: global config config = initialise_pkgbuilds_dir - return config.get_path('pkgbuilds') + return config.get_path("pkgbuilds") -@pytest.fixture(scope='session') +@pytest.fixture(scope="session") def pkgbuilds_repo_cached(initialise_pkgbuilds_dir) -> dict[str, Pkgbuild]: return discover_pkgbuilds() @@ -34,29 +46,31 @@ def pkgbuilds_repo(pkgbuilds_dir, pkgbuilds_repo_cached): return pkgbuilds_repo_cached -ONEPLUS_ENCHILADA = 'sdm845-oneplus-enchilada' -ONEPLUS_ENCHILADA_PKG = f'device-{ONEPLUS_ENCHILADA}' +ONEPLUS_ENCHILADA = "sdm845-oneplus-enchilada" +ONEPLUS_ENCHILADA_PKG = f"device-{ONEPLUS_ENCHILADA}" -@pytest.fixture(scope='session') +@pytest.fixture(scope="session") def enchilada_pkgbuild(initialise_pkgbuilds_dir: ConfigStateHolder): config = initialise_pkgbuilds_dir config.try_load_file() - return parse_pkgbuild(os.path.join('device', ONEPLUS_ENCHILADA_PKG))[0] + return parse_pkgbuild(os.path.join("device", ONEPLUS_ENCHILADA_PKG))[0] def validate_oneplus_enchilada(d: Device): assert d - assert d.arch == 'aarch64' + assert d.arch == "aarch64" assert d.package and d.package.name == ONEPLUS_ENCHILADA_PKG -def test_fixture_initialise_pkgbuilds_dir(initialise_pkgbuilds_dir: ConfigStateHolder): - assert os.path.exists(os.path.join(config.get_path('pkgbuilds'), 'device')) +def test_fixture_initialise_pkgbuilds_dir( + initialise_pkgbuilds_dir: ConfigStateHolder, +): + assert os.path.exists(os.path.join(config.get_path("pkgbuilds"), "device")) def test_fixture_pkgbuilds_dir(pkgbuilds_dir): - assert os.path.exists(os.path.join(pkgbuilds_dir, 'device')) + assert os.path.exists(os.path.join(pkgbuilds_dir, "device")) def test_get_device(): @@ -66,7 +80,7 @@ def test_get_device(): def test_get_device_deprecated(): - name = 'oneplus-enchilada' + name = "oneplus-enchilada" assert name in DEVICE_DEPRECATIONS d = get_device(name) # currently redirects to correct package, need to change this test when changed to an exception @@ -79,7 +93,7 @@ def test_parse_device_pkg_enchilada(enchilada_pkgbuild): def test_parse_device_pkg_malformed_arch(enchilada_pkgbuild): enchilada_pkgbuild = copy(enchilada_pkgbuild) - enchilada_pkgbuild.arches.append('x86_64') + enchilada_pkgbuild.arches.append("x86_64") with pytest.raises(Exception): parse_device_pkg(enchilada_pkgbuild) @@ -96,5 +110,5 @@ def test_get_devices(pkgbuilds_repo: dict[str, Pkgbuild]): assert ONEPLUS_ENCHILADA in d for p in d.values(): check_devicepkg_name(p.package.name) - assert 'sdm845-oneplus-common' not in d + assert "sdm845-oneplus-common" not in d validate_oneplus_enchilada(d[ONEPLUS_ENCHILADA]) diff --git a/tests/devices/test_deviceinfo.py b/tests/devices/test_deviceinfo.py index 0c0298d..2cf42e8 100644 --- a/tests/devices/test_deviceinfo.py +++ b/tests/devices/test_deviceinfo.py @@ -42,7 +42,7 @@ deviceinfo_flash_sparse="true" def test_parse_deviceinfo(): config.try_load_file() - d = parse_deviceinfo(deviceinfo_text.split('\n'), 'device-bq-paella') + d = parse_deviceinfo(deviceinfo_text.split("\n"), "device-bq-paella") assert isinstance(d, DeviceInfo) assert d assert d.arch @@ -57,20 +57,28 @@ def test_parse_deviceinfo(): def test_parse_variant_deviceinfo(): config.try_load_file() # {'variant1': 'AAAAA', 'variant2': 'BBBBB', 'variant3': 'CCCCC'} - variants = {f"variant{i+1}": chr(ord('A') + i) * 5 for i in range(0, 3)} + variants = {f"variant{i + 1}": chr(ord("A") + i) * 5 for i in range(0, 3)} field = "dev_touchscreen_calibration" - text = deviceinfo_text + '\n'.join([""] + [f"deviceinfo_{field}_{variant}={value}" for variant, value in variants.items()]) + text = deviceinfo_text + "\n".join( + [""] + + [ + f"deviceinfo_{field}_{variant}={value}" + for variant, value in variants.items() + ] + ) for variant, result in variants.items(): - d = parse_deviceinfo(text.split('\n'), 'device-bq-paella', kernel=variant) + d = parse_deviceinfo( + text.split("\n"), "device-bq-paella", kernel=variant + ) # note: the python code from pmb only strips one variant, the shell code in packaging strips all variants - assert f'{field}_{variant}' not in d + assert f"{field}_{variant}" not in d assert field in d assert d[field] == result def test_get_deviceinfo_from_repo(): config.try_load_file() - dev = get_device('sdm845-oneplus-enchilada') + dev = get_device("sdm845-oneplus-enchilada") assert dev info = dev.parse_deviceinfo() assert info @@ -78,10 +86,12 @@ def test_get_deviceinfo_from_repo(): def test_get_variant_deviceinfo_from_repo(): config.try_load_file() - dev = get_device('sdm845-xiaomi-beryllium-ebbg') + dev = get_device("sdm845-xiaomi-beryllium-ebbg") assert dev info = dev.parse_deviceinfo() assert info - assert 'dtb' in info # variant-specific variable, check it has been stripped down from 'dtb_ebbg' to 'dtb' - assert 'dtb_tianma' not in info + assert ( + "dtb" in info + ) # variant-specific variable, check it has been stripped down from 'dtb_ebbg' to 'dtb' + assert "dtb_tianma" not in info assert info.dtb diff --git a/tests/exec/test_cmd.py b/tests/exec/test_cmd.py index b4169b3..e4f3e4b 100644 --- a/tests/exec/test_cmd.py +++ b/tests/exec/test_cmd.py @@ -16,7 +16,7 @@ def run_func(f, expected_user: Optional[str] = None, **kwargs): current_uid = os.getuid() current_username = get_username(current_uid) target_uid = current_uid - result = f(['id', '-u'], capture_output=True, **kwargs) + result = f(["id", "-u"], capture_output=True, **kwargs) assert isinstance(result, subprocess.CompletedProcess) result.check_returncode() if expected_user and current_username != expected_user: @@ -25,12 +25,14 @@ def run_func(f, expected_user: Optional[str] = None, **kwargs): assert int(result_uid) == target_uid -def run_generate_and_exec(script, generate_args={}, switch_user=None, **kwargs): +def run_generate_and_exec( + script, generate_args={}, switch_user=None, **kwargs +): "runs generate_cmd_su() and executes the resulting argv" if not switch_user: switch_user = get_username(os.getuid()) cmd = generate_cmd_su(script, switch_user=switch_user, **generate_args) - logging.debug(f'run_generate_and_exec: running {cmd}') + logging.debug(f"run_generate_and_exec: running {cmd}") return subprocess.run( cmd, **kwargs, @@ -38,21 +40,36 @@ def run_generate_and_exec(script, generate_args={}, switch_user=None, **kwargs): def test_generate_su_force_su(): - run_func(run_generate_and_exec, generate_args={'force_su': True}) + run_func(run_generate_and_exec, generate_args={"force_su": True}) def test_generate_su_force_elevate(): - run_func(run_generate_and_exec, generate_args={'force_elevate': True}, expected_user='root', switch_user='root') + run_func( + run_generate_and_exec, + generate_args={"force_elevate": True}, + expected_user="root", + switch_user="root", + ) def test_generate_su_nobody_force_su(): - user = 'nobody' - run_func(run_generate_and_exec, expected_user=user, switch_user=user, generate_args={'force_su': True}) + user = "nobody" + run_func( + run_generate_and_exec, + expected_user=user, + switch_user=user, + generate_args={"force_su": True}, + ) def test_generate_su_nobody_force_su_and_elevate(): - user = 'nobody' - run_func(run_generate_and_exec, expected_user=user, switch_user=user, generate_args={'force_su': True, 'force_elevate': True}) + user = "nobody" + run_func( + run_generate_and_exec, + expected_user=user, + switch_user=user, + generate_args={"force_su": True, "force_elevate": True}, + ) def test_run_cmd(): @@ -60,13 +77,13 @@ def test_run_cmd(): def test_run_cmd_su_nobody(): - user = 'nobody' + user = "nobody" run_func(run_cmd, expected_user=user, switch_user=user) def test_run_cmd_as_root(): - run_func(run_cmd, expected_user='root', switch_user='root') + run_func(run_cmd, expected_user="root", switch_user="root") def test_run_root_cmd(): - run_func(run_root_cmd, expected_user='root') + run_func(run_root_cmd, expected_user="root") diff --git a/tests/exec/test_file.py b/tests/exec/test_file.py index 8b44507..32edaad 100644 --- a/tests/exec/test_file.py +++ b/tests/exec/test_file.py @@ -14,7 +14,7 @@ TEMPDIR_MODE = 0o755 @dataclass -class TempdirFillInfo(): +class TempdirFillInfo: path: str files: dict[str, str] @@ -26,13 +26,13 @@ def _get_tempdir(): def remove_dir(d): - run_root_cmd(['rm', '-rf', d]).check_returncode() + run_root_cmd(["rm", "-rf", d]).check_returncode() -def create_file(filepath, owner='root', group='root'): +def create_file(filepath, owner="root", group="root"): assert not os.path.exists(filepath) - run_root_cmd(['touch', filepath]).check_returncode() - run_root_cmd(['chown', f'{owner}:{group}', filepath]).check_returncode() + run_root_cmd(["touch", filepath]).check_returncode() + run_root_cmd(["chown", f"{owner}:{group}", filepath]).check_returncode() @pytest.fixture @@ -53,13 +53,13 @@ def test_get_tempdir(tempdir): def tempdir_filled() -> Generator[TempdirFillInfo, None, None]: d = _get_tempdir() contents = { - 'rootfile': { - 'owner': 'root', - 'group': 'root', + "rootfile": { + "owner": "root", + "group": "root", }, - 'userfile': { - 'owner': 'nobody', - 'group': 'nobody', + "userfile": { + "owner": "nobody", + "group": "nobody", }, } res = TempdirFillInfo(path=d, files={}) @@ -87,11 +87,13 @@ def verify_mode(filepath, mode: int = TEMPDIR_MODE): def verify_content(filepath, content): assert os.path.exists(filepath) - with open(filepath, 'r') as f: + with open(filepath, "r") as f: assert f.read().strip() == content.strip() -@pytest.mark.parametrize("user,group", [('root', 'root'), ('nobody', 'nobody')]) +@pytest.mark.parametrize( + "user,group", [("root", "root"), ("nobody", "nobody")] +) def test_chown(tempdir: str, user: str, group: str): assert os.path.exists(tempdir) target_uid = get_uid(user) @@ -110,16 +112,16 @@ def test_chmod(tempdir_filled, mode: int): def test_tempdir_filled_fixture(tempdir_filled: TempdirFillInfo): files = tempdir_filled.files assert files - assert 'rootfile' in files - assert 'userfile' in files - verify_ownership(files['rootfile'], 'root', 'root') - verify_ownership(files['userfile'], 'nobody', 'nobody') + assert "rootfile" in files + assert "userfile" in files + verify_ownership(files["rootfile"], "root", "root") + verify_ownership(files["userfile"], "nobody", "nobody") def test_write_new_file_naive(tempdir: str): assert os.path.exists(tempdir) - new = os.path.join(tempdir, 'newfiletest') - content = 'test12345' + new = os.path.join(tempdir, "newfiletest") + content = "test12345" assert not os.path.exists(new) write_file(new, content) verify_content(new, content) @@ -128,20 +130,20 @@ def test_write_new_file_naive(tempdir: str): def test_write_new_file_root(tempdir: str): assert os.path.exists(tempdir) - new = os.path.join(tempdir, 'newfiletest') - content = 'test12345' + new = os.path.join(tempdir, "newfiletest") + content = "test12345" assert not os.path.exists(new) - write_file(new, content, user='root', group='root') + write_file(new, content, user="root", group="root") verify_content(new, content) verify_ownership(new, user=0, group=0) def test_write_new_file_user(tempdir: str): - user = 'nobody' - group = 'nobody' + user = "nobody" + group = "nobody" assert os.path.exists(tempdir) - new = os.path.join(tempdir, 'newfiletest') - content = 'test12345' + new = os.path.join(tempdir, "newfiletest") + content = "test12345" assert not os.path.exists(new) write_file(new, content, user=user, group=group) assert os.path.exists(new) @@ -151,31 +153,35 @@ def test_write_new_file_user(tempdir: str): def test_write_new_file_user_in_root_dir(tempdir: str): assert os.path.exists(tempdir) - chown(tempdir, user='root', group='root') - verify_ownership(tempdir, 'root', 'root') + chown(tempdir, user="root", group="root") + verify_ownership(tempdir, "root", "root") test_write_new_file_user(tempdir) def test_write_rootfile_naive(tempdir_filled: TempdirFillInfo): files = tempdir_filled.files - assert 'rootfile' in files - p = files['rootfile'] + assert "rootfile" in files + p = files["rootfile"] assert os.path.exists(p) - verify_ownership(p, 'root', 'root') - content = 'test123' + verify_ownership(p, "root", "root") + content = "test123" write_file(p, content) - verify_content(p, 'test123') - verify_ownership(p, 'root', 'root') + verify_content(p, "test123") + verify_ownership(p, "root", "root") -@pytest.mark.parametrize("user,group", [('root', 'root'), ('nobody', 'nobody')]) -def test_write_rootfile(tempdir_filled: TempdirFillInfo, user: str, group: str): +@pytest.mark.parametrize( + "user,group", [("root", "root"), ("nobody", "nobody")] +) +def test_write_rootfile( + tempdir_filled: TempdirFillInfo, user: str, group: str +): files = tempdir_filled.files - assert 'rootfile' in files - p = files['rootfile'] + assert "rootfile" in files + p = files["rootfile"] assert os.path.exists(p) - verify_ownership(p, 'root', 'root') - content = 'test123' + verify_ownership(p, "root", "root") + content = "test123" write_file(p, content) - verify_content(p, 'test123') - verify_ownership(p, 'root', 'root') + verify_content(p, "test123") + verify_ownership(p, "root", "root") diff --git a/tests/flavours/test_flavour.py b/tests/flavours/test_flavour.py index 3578588..157af2c 100644 --- a/tests/flavours/test_flavour.py +++ b/tests/flavours/test_flavour.py @@ -2,7 +2,7 @@ import pytest from kupferbootstrap.flavours.flavour import Flavour, get_flavour, get_flavours -FLAVOUR_NAME = 'phosh' +FLAVOUR_NAME = "phosh" @pytest.fixture() diff --git a/tests/test_integration.py b/tests/test_integration.py index af72b09..047ae76 100644 --- a/tests/test_integration.py +++ b/tests/test_integration.py @@ -10,22 +10,30 @@ from kupferbootstrap.constants import SRCINFO_METADATA_FILE from kupferbootstrap.exec.cmd import run_cmd from kupferbootstrap.exec.file import get_temp_dir from kupferbootstrap.logger import setup_logging -from kupferbootstrap.packages.cli import SRCINFO_CACHE_FILES, cmd_build, cmd_clean, cmd_init, cmd_update +from kupferbootstrap.packages.cli import ( + SRCINFO_CACHE_FILES, + cmd_build, + cmd_clean, + cmd_init, + cmd_update, +) from kupferbootstrap.utils import git_get_branch tempdir = None config.try_load_file() setup_logging(True) -PKG_TEST_PATH = 'device/device-sdm845-oneplus-enchilada' -PKG_TEST_NAME = 'device-sdm845-xiaomi-beryllium-ebbg' +PKG_TEST_PATH = "device/device-sdm845-oneplus-enchilada" +PKG_TEST_NAME = "device-sdm845-xiaomi-beryllium-ebbg" INTEGRATION_TESTS_ENABLE_ENV_VAR = "INTEGRATION_TESTS_ENABLE" integration_enabled = False try: - integration_enabled = bool(int(os.environ.get(INTEGRATION_TESTS_ENABLE_ENV_VAR, 0)) > 0) + integration_enabled = bool( + int(os.environ.get(INTEGRATION_TESTS_ENABLE_ENV_VAR, 0)) > 0 + ) except: pass @@ -48,50 +56,69 @@ def ctx() -> click.Context: global tempdir if not tempdir: tempdir = get_temp_dir() - if not os.environ.get('INTEGRATION_TESTS_USE_GLOBAL_CONFIG', 'false').lower() == 'true': - config.file.paths.update(CONFIG_DEFAULTS.paths | {'cache_dir': tempdir}) - config_path = os.path.join(tempdir, 'kupferbootstrap.toml') + if ( + not os.environ.get( + "INTEGRATION_TESTS_USE_GLOBAL_CONFIG", "false" + ).lower() + == "true" + ): + config.file.paths.update( + CONFIG_DEFAULTS.paths | {"cache_dir": tempdir} + ) + config_path = os.path.join(tempdir, "kupferbootstrap.toml") config.runtime.config_file = config_path if not os.path.exists(config_path): config.write() config.try_load_file(config_path) - print(f'cache_dir: {config.file.paths.cache_dir}') - return click.Context(click.Command('integration_tests')) + print(f"cache_dir: {config.file.paths.cache_dir}") + return click.Context(click.Command("integration_tests")) def test_main_import(): from kupferbootstrap.main import cli + assert cli def test_config_load(ctx: click.Context): path = config.runtime.config_file assert path - assert path.startswith('/tmp/') + assert path.startswith("/tmp/") assert os.path.exists(path) config.enforce_config_loaded() def test_packages_update(ctx: click.Context): - pkgbuilds_path = config.get_path('pkgbuilds') + pkgbuilds_path = config.get_path("pkgbuilds") assert config.runtime.script_source_dir - kbs_branch = git_get_branch(os.path.join(config.runtime.script_source_dir, "../..")) + kbs_branch = git_get_branch( + os.path.join(config.runtime.script_source_dir, "../..") + ) # Gitlab CI integration: the CI checks out a detached commit, branch comes back empty. - if not kbs_branch and os.environ.get('CI', 'false') == 'true': - kbs_branch = os.environ.get('CI_COMMIT_BRANCH', '') - branches: dict[str, bool] = {'main': False, 'dev': False} + if not kbs_branch and os.environ.get("CI", "false") == "true": + kbs_branch = os.environ.get("CI_COMMIT_BRANCH", "") + branches: dict[str, bool] = {"main": False, "dev": False} if kbs_branch: branches[kbs_branch] = True for branch, may_fail in branches.items(): config.file.pkgbuilds.git_branch = branch try: - ctx.invoke(cmd_init, update=True, non_interactive=True, switch_branch=True, discard_changes=True, init_caches=False) + ctx.invoke( + cmd_init, + update=True, + non_interactive=True, + switch_branch=True, + discard_changes=True, + init_caches=False, + ) except Exception as ex: - print(f'may_fail: {may_fail}; Exception: {ex}') + print(f"may_fail: {may_fail}; Exception: {ex}") if not may_fail: raise ex # check branch really doesn't exist - res = run_cmd(f"git ls-remote {CONFIG_DEFAULTS.pkgbuilds.git_repo} 'refs/heads/*' | grep 'refs/heads/{branch}'") + res = run_cmd( + f"git ls-remote {CONFIG_DEFAULTS.pkgbuilds.git_repo} 'refs/heads/*' | grep 'refs/heads/{branch}'" + ) assert isinstance(res, CompletedProcess) assert res.returncode != 0 continue @@ -99,19 +126,33 @@ def test_packages_update(ctx: click.Context): def test_packages_clean(ctx: click.Context): - if not glob(os.path.join(config.get_path('pkgbuilds'), '*', '*', SRCINFO_METADATA_FILE)): + if not glob( + os.path.join( + config.get_path("pkgbuilds"), "*", "*", SRCINFO_METADATA_FILE + ) + ): ctx.invoke(cmd_update, non_interactive=True) - ctx.invoke(cmd_clean, what=['git'], force=True) + ctx.invoke(cmd_clean, what=["git"], force=True) def test_packages_cache_init(ctx: click.Context): - ctx.invoke(cmd_update, non_interactive=True, switch_branch=False, discard_changes=False, init_caches=True) + ctx.invoke( + cmd_update, + non_interactive=True, + switch_branch=False, + discard_changes=False, + init_caches=True, + ) for f in SRCINFO_CACHE_FILES: - assert os.path.exists(os.path.join(config.get_path('pkgbuilds'), PKG_TEST_PATH, f)) + assert os.path.exists( + os.path.join(config.get_path("pkgbuilds"), PKG_TEST_PATH, f) + ) -def build_pkgs(_ctx: click.Context, query: list[str], arch: str = 'aarch64', **kwargs): +def build_pkgs( + _ctx: click.Context, query: list[str], arch: str = "aarch64", **kwargs +): _ctx.invoke(cmd_build, paths=query, arch=arch, **kwargs) diff --git a/tests/version/test_kbs_version.py b/tests/version/test_kbs_version.py index d274984..ec472f3 100644 --- a/tests/version/test_kbs_version.py +++ b/tests/version/test_kbs_version.py @@ -1,7 +1,10 @@ from pytest import mark from typing import Optional -from kupferbootstrap.version.kbs import get_kbs_version, compare_kbs_version_generic +from kupferbootstrap.version.kbs import ( + get_kbs_version, + compare_kbs_version_generic, +) from kupferbootstrap.version.compare import VerComp @@ -30,5 +33,12 @@ def test_get_kbs_version(): ("v0.2.2", "v1.0.0", VerComp.RIGHT_NEWER), ], ) -def test_kbs_version_compare(minimum_ver: str, kbs_ver: str, expected: Optional[VerComp]): - assert compare_kbs_version_generic(kbs_version=kbs_ver, minimum_ver=minimum_ver) == expected +def test_kbs_version_compare( + minimum_ver: str, kbs_ver: str, expected: Optional[VerComp] +): + assert ( + compare_kbs_version_generic( + kbs_version=kbs_ver, minimum_ver=minimum_ver + ) + == expected + )