Compare commits
121 Commits
v0.1.0
...
prawn/armv
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
645b3b1d2b | ||
|
|
efd1de9b91 | ||
|
|
4c77a16bba | ||
|
|
57d5ed474f | ||
|
|
114755888e | ||
|
|
b154f835e6 | ||
|
|
bc31f9822a | ||
|
|
08fc10bf11 | ||
|
|
6e8fd9f622 | ||
|
|
2b539f5a5b | ||
|
|
c4e96af44f | ||
|
|
2db8a0a0cb | ||
|
|
f940fd2301 | ||
|
|
3952892029 | ||
|
|
5a794ba3dd | ||
|
|
97d3f05968 | ||
|
|
70c4799385 | ||
|
|
c53acbf2f4 | ||
|
|
39be2b2fb6 | ||
|
|
f5e3fa46ad | ||
|
|
657a5fe227 | ||
|
|
5b218e64c8 | ||
|
|
27e7fe9a10 | ||
|
|
8a7f78261f | ||
|
|
2d13d82943 | ||
|
|
688f9e2375 | ||
|
|
1c6689f710 | ||
|
|
e001d107c2 | ||
|
|
5baaaaa180 | ||
|
|
7d9f1b9ed8 | ||
|
|
aaef4b7699 | ||
|
|
91b44299ae | ||
|
|
30d9be0950 | ||
|
|
7eefafc386 | ||
|
|
16fd2f1590 | ||
|
|
4298d15178 | ||
|
|
5e9b0448dc | ||
|
|
924f125893 | ||
|
|
7ca0e80682 | ||
|
|
7f86c80cec | ||
|
|
36b321aa2d | ||
|
|
e17a69ed81 | ||
|
|
8b9fe661cf | ||
|
|
4e4e12b6b9 | ||
|
|
5eda60c14d | ||
|
|
1bf397f29f | ||
|
|
216050fbb4 | ||
|
|
7f9f326861 | ||
|
|
6cfd8ae1c2 | ||
|
|
0924ea298a | ||
|
|
1f15d6705c | ||
|
|
0858a64144 | ||
|
|
916be09c61 | ||
|
|
4ed0b8626b | ||
|
|
859b08df6a | ||
|
|
dd7e1716b8 | ||
|
|
dbf65b44df | ||
|
|
25ea4afe9b | ||
|
|
707c61f026 | ||
|
|
818b354000 | ||
|
|
2535d6bbd8 | ||
|
|
cc29b60f9f | ||
|
|
9d24065258 | ||
|
|
ceedf4bced | ||
|
|
774b526925 | ||
|
|
107ca5d86e | ||
|
|
4eacee8cad | ||
|
|
98b835c75a | ||
|
|
f3a1a510d9 | ||
|
|
879fd113f0 | ||
|
|
72ca2258d1 | ||
|
|
c562271006 | ||
|
|
40600855ec | ||
|
|
b32099c4f1 | ||
|
|
fdf03e2b97 | ||
|
|
6593471a8e | ||
|
|
0465d1035a | ||
|
|
7fcd68ced9 | ||
|
|
a6129a82bd | ||
|
|
de71a71c13 | ||
|
|
0d4d83f0ed | ||
|
|
66ac56d715 | ||
|
|
e3ad2edc69 | ||
|
|
d70805f3a6 | ||
|
|
cac5ac2ad0 | ||
|
|
2d71b1f3cc | ||
|
|
572638ece9 | ||
|
|
bffd60f71a | ||
|
|
8e8713a9d3 | ||
|
|
7a074d1c11 | ||
|
|
bb3a7d4881 | ||
|
|
068009185c | ||
|
|
bd5415de47 | ||
|
|
ec1e5fa300 | ||
|
|
1a58b136e3 | ||
|
|
bcf7450235 | ||
|
|
fe6fd7dfb5 | ||
|
|
97c1bd1f74 | ||
|
|
5003225409 | ||
|
|
85b4463da4 | ||
|
|
5f3e43a922 | ||
|
|
d8e754cdd9 | ||
|
|
80f72b2711 | ||
|
|
6a7a0d6d17 | ||
|
|
06b8536915 | ||
|
|
597390c1e6 | ||
|
|
ea7df92b7f | ||
|
|
657ada4c73 | ||
|
|
07c8e178fb | ||
|
|
d249504151 | ||
|
|
0d05d3ca26 | ||
|
|
6bff3c3c83 | ||
|
|
1019e8d30c | ||
|
|
cec145232e | ||
|
|
13bb652301 | ||
|
|
7bb0d9a2e6 | ||
|
|
882e95b823 | ||
|
|
1ee38a3fb4 | ||
|
|
0da9feeda0 | ||
|
|
35a79363a4 | ||
|
|
e28239454a |
@@ -1,6 +1,7 @@
|
||||
stages:
|
||||
- check
|
||||
- build
|
||||
- deploy
|
||||
|
||||
format:
|
||||
stage: check
|
||||
@@ -21,6 +22,24 @@ typecheck:
|
||||
reports:
|
||||
junit: mypy-report.xml
|
||||
|
||||
pytest:
|
||||
stage: check
|
||||
image: archlinux
|
||||
before_script:
|
||||
- pacman -Sy --noconfirm --needed archlinux-keyring && pacman -Su --noconfirm python python-pip sudo git base-devel
|
||||
- pip install pytest pytest-cov -r requirements.txt
|
||||
- 'echo "kupfer ALL = (ALL) NOPASSWD: ALL" > /etc/sudoers.d/kupfer_all'
|
||||
- useradd -m kupfer
|
||||
- chmod 777 .
|
||||
script:
|
||||
- script -e -c 'su kupfer -s /bin/bash -c "./pytest.sh --cov=. --cov-branch --cov-report=term --cov-report=xml:coverage.xml"'
|
||||
coverage: '/(?i)total.*? (100(?:\.0+)?\%|[1-9]?\d(?:\.\d+)?\%)$/'
|
||||
artifacts:
|
||||
reports:
|
||||
junit: pytest-report.xml
|
||||
coverage_report:
|
||||
coverage_format: cobertura
|
||||
path: coverage.xml
|
||||
build_docker:
|
||||
stage: build
|
||||
image: docker:latest
|
||||
@@ -34,3 +53,26 @@ build_docker:
|
||||
only:
|
||||
- main
|
||||
- dev
|
||||
|
||||
.docs:
|
||||
image: "${CI_REGISTRY_IMAGE}:dev"
|
||||
before_script:
|
||||
- pacman -Sy --noconfirm python-sphinx-{click,furo}
|
||||
script:
|
||||
- (cd docs && make)
|
||||
- mv docs/html public
|
||||
artifacts:
|
||||
paths:
|
||||
- public
|
||||
|
||||
build_docs:
|
||||
stage: build
|
||||
extends: .docs
|
||||
except:
|
||||
- main
|
||||
|
||||
pages:
|
||||
stage: deploy
|
||||
extends: .docs
|
||||
only:
|
||||
- main
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
Kupfer Linux bootstrapping tool - drives pacstrap, makepkg, mkfs and fastboot, just to name a few.
|
||||
|
||||
## Installation
|
||||
Install Docker, Python 3 with libraries `click`, `appdirs`, `joblib`, `toml`, `typing_extentions`, and `coloredlogs` and put `bin/` into your `PATH`.
|
||||
Install Docker, Python 3 with the libraries from `requirements.txt` and put `bin/` into your `PATH`.
|
||||
Then use `kupferbootstrap`.
|
||||
|
||||
## Usage
|
||||
|
||||
24
binfmt.py
24
binfmt.py
@@ -2,8 +2,9 @@
|
||||
|
||||
import os
|
||||
import logging
|
||||
import subprocess
|
||||
|
||||
from constants import Arch, QEMU_ARCHES
|
||||
from exec.cmd import run_root_cmd
|
||||
from utils import mount
|
||||
|
||||
|
||||
@@ -38,11 +39,15 @@ def binfmt_info():
|
||||
return full
|
||||
|
||||
|
||||
def is_registered(arch: str) -> bool:
|
||||
return os.path.exists("/proc/sys/fs/binfmt_misc/qemu-" + arch)
|
||||
def is_registered(arch: Arch) -> bool:
|
||||
qemu_arch = QEMU_ARCHES[arch]
|
||||
return os.path.exists("/proc/sys/fs/binfmt_misc/qemu-" + qemu_arch)
|
||||
|
||||
|
||||
def register(arch):
|
||||
def register(arch: Arch):
|
||||
if arch not in QEMU_ARCHES:
|
||||
raise Exception(f'binfmt.register(): unknown arch {arch} (not in QEMU_ARCHES)')
|
||||
qemu_arch = QEMU_ARCHES[arch]
|
||||
if is_registered(arch):
|
||||
return
|
||||
|
||||
@@ -51,7 +56,7 @@ def register(arch):
|
||||
# Build registration string
|
||||
# https://en.wikipedia.org/wiki/Binfmt_misc
|
||||
# :name:type:offset:magic:mask:interpreter:flags
|
||||
info = lines[arch]
|
||||
info = lines[qemu_arch]
|
||||
code = info['line']
|
||||
binfmt = '/proc/sys/fs/binfmt_misc'
|
||||
register = binfmt + '/register'
|
||||
@@ -63,15 +68,18 @@ def register(arch):
|
||||
|
||||
# Register in binfmt_misc
|
||||
logging.info(f"Registering qemu binfmt ({arch})")
|
||||
subprocess.run(["sh", "-c", 'echo "' + code + '" > ' + register + ' 2>/dev/null'])
|
||||
run_root_cmd(["sh", "-c", f'echo "{code}" > {register} 2>/dev/null'])
|
||||
if not is_registered(arch):
|
||||
logging.debug(f'binfmt line: {code}')
|
||||
raise Exception(f'Failed to register qemu-user for {arch} with binfmt_misc, {binfmt}/{info["name"]} not found')
|
||||
|
||||
|
||||
def unregister(arch):
|
||||
binfmt_file = "/proc/sys/fs/binfmt_misc/qemu-" + arch
|
||||
if arch not in QEMU_ARCHES:
|
||||
raise Exception(f'binfmt.unregister(): unknown arch {arch} (not in QEMU_ARCHES)')
|
||||
qemu_arch = QEMU_ARCHES[arch]
|
||||
binfmt_file = "/proc/sys/fs/binfmt_misc/qemu-" + qemu_arch
|
||||
if not os.path.exists(binfmt_file):
|
||||
return
|
||||
logging.info(f"Unregistering qemu binfmt ({arch})")
|
||||
subprocess.run(["sh", "-c", "echo -1 > " + binfmt_file])
|
||||
run_root_cmd(["sh", "-c", f"echo -1 > {binfmt_file}"])
|
||||
|
||||
3
boot.py
3
boot.py
@@ -4,6 +4,7 @@ import click
|
||||
|
||||
from config import config
|
||||
from constants import BOOT_STRATEGIES, FLASH_PARTS, FASTBOOT, JUMPDRIVE, JUMPDRIVE_VERSION
|
||||
from exec.file import makedir
|
||||
from fastboot import fastboot_boot, fastboot_erase_dtbo
|
||||
from image import get_device_and_flavour, losetup_rootfs_image, get_image_path, dump_aboot, dump_lk2nd
|
||||
from wrapper import enforce_wrap
|
||||
@@ -29,7 +30,7 @@ def cmd_boot(type):
|
||||
if type == JUMPDRIVE:
|
||||
file = f'boot-{device}.img'
|
||||
path = os.path.join(config.get_path('jumpdrive'), file)
|
||||
os.makedirs(os.path.dirname(path), exist_ok=True)
|
||||
makedir(os.path.dirname(path))
|
||||
if not os.path.exists(path):
|
||||
urllib.request.urlretrieve(f'https://github.com/dreemurrs-embedded/Jumpdrive/releases/download/{JUMPDRIVE_VERSION}/{file}', path)
|
||||
else:
|
||||
|
||||
12
cache.py
12
cache.py
@@ -1,10 +1,11 @@
|
||||
import shutil
|
||||
import click
|
||||
import os
|
||||
from config import config
|
||||
from wrapper import enforce_wrap
|
||||
import logging
|
||||
|
||||
from config import config
|
||||
from exec.file import remove_file
|
||||
from wrapper import enforce_wrap
|
||||
|
||||
PATHS = ['chroots', 'pacman', 'jumpdrive', 'packages', 'images']
|
||||
|
||||
|
||||
@@ -37,7 +38,4 @@ def cmd_clean(paths: list[str], force=False):
|
||||
for file in os.listdir(dir):
|
||||
path = os.path.join(dir, file)
|
||||
logging.debug(f'Removing "{path_name}/{file}"')
|
||||
if os.path.isdir(path):
|
||||
shutil.rmtree(path)
|
||||
else:
|
||||
os.unlink(path)
|
||||
remove_file(path, recursive=True)
|
||||
|
||||
@@ -8,8 +8,10 @@ from typing import Protocol, Union, Optional, Mapping
|
||||
from uuid import uuid4
|
||||
|
||||
from config import config
|
||||
from constants import Arch, CHROOT_PATHS
|
||||
from constants import Arch, CHROOT_PATHS, GCC_HOSTSPECS
|
||||
from distro.distro import get_base_distro, get_kupfer_local, RepoInfo
|
||||
from exec.cmd import run_root_cmd, generate_env_cmd, flatten_shell_script, wrap_in_bash
|
||||
from exec.file import makedir, root_makedir, root_write_file, write_file
|
||||
from generator import generate_makepkg_conf
|
||||
from utils import mount, umount, check_findmnt, log_or_exception
|
||||
|
||||
@@ -138,7 +140,7 @@ class Chroot(AbstractChroot):
|
||||
options=['bind'],
|
||||
fs_type: str = None,
|
||||
fail_if_mounted: bool = True,
|
||||
makedir: bool = True,
|
||||
mkdir: bool = True,
|
||||
strict_cache_consistency: bool = False,
|
||||
):
|
||||
"""returns the absolute path `relative_target` was mounted at"""
|
||||
@@ -158,8 +160,8 @@ class Chroot(AbstractChroot):
|
||||
else:
|
||||
if pseudo_absolute in self.active_mounts:
|
||||
log_or_exc(f'{self.name}: Mount {pseudo_absolute} was in active_mounts but not actually mounted. ({absolute_destination})')
|
||||
if makedir and os.path.isdir(absolute_source):
|
||||
os.makedirs(absolute_destination, exist_ok=True)
|
||||
if mkdir and os.path.isdir(absolute_source):
|
||||
root_makedir(absolute_destination)
|
||||
result = mount(absolute_source, absolute_destination, options=options, fs_type=fs_type, register_unmount=False)
|
||||
if result.returncode != 0:
|
||||
raise Exception(f'{self.name}: failed to mount {absolute_source} to {absolute_destination}')
|
||||
@@ -215,7 +217,7 @@ class Chroot(AbstractChroot):
|
||||
self,
|
||||
script: Union[str, list[str]],
|
||||
inner_env: dict[str, str] = {},
|
||||
outer_env: dict[str, str] = os.environ.copy() | {'QEMU_LD_PREFIX': '/usr/aarch64-linux-gnu'},
|
||||
outer_env: dict[str, str] = {},
|
||||
attach_tty: bool = False,
|
||||
capture_output: bool = False,
|
||||
cwd: Optional[str] = None,
|
||||
@@ -225,28 +227,20 @@ class Chroot(AbstractChroot):
|
||||
if not self.active and fail_inactive:
|
||||
raise Exception(f'Chroot {self.name} is inactive, not running command! Hint: pass `fail_inactive=False`')
|
||||
if outer_env is None:
|
||||
outer_env = os.environ.copy()
|
||||
env_cmd = ['/usr/bin/env'] + [f'{shell_quote(key)}={shell_quote(value)}' for key, value in inner_env.items()]
|
||||
kwargs: dict = {
|
||||
'env': outer_env,
|
||||
}
|
||||
if not attach_tty:
|
||||
kwargs |= {'stdout': stdout} if stdout else {'capture_output': capture_output}
|
||||
outer_env = {}
|
||||
native = config.runtime['arch']
|
||||
if self.arch != native and 'QEMU_LD_PREFIX' not in outer_env:
|
||||
outer_env = dict(outer_env) # copy dict for modification
|
||||
outer_env |= {'QEMU_LD_PREFIX': f'/usr/{GCC_HOSTSPECS[native][self.arch]}'}
|
||||
env_cmd = generate_env_cmd(inner_env) if inner_env else []
|
||||
|
||||
if not isinstance(script, str) and isinstance(script, list):
|
||||
script = ' '.join(script)
|
||||
script = flatten_shell_script(script, shell_quote_items=False, wrap_in_shell_quote=False)
|
||||
if cwd:
|
||||
script = f"cd {shell_quote(cwd)} && ( {script} )"
|
||||
cmd = ['chroot', self.path] + env_cmd + [
|
||||
'/bin/bash',
|
||||
'-c',
|
||||
script,
|
||||
]
|
||||
logging.debug(f'{self.name}: Running cmd: "{cmd}"')
|
||||
if attach_tty:
|
||||
return subprocess.call(cmd, **kwargs)
|
||||
else:
|
||||
return subprocess.run(cmd, **kwargs)
|
||||
cmd = flatten_shell_script(['chroot', self.path] + env_cmd + wrap_in_bash(script, flatten_result=False), shell_quote_items=True)
|
||||
|
||||
return run_root_cmd(cmd, env=outer_env, attach_tty=attach_tty, capture_output=capture_output, stdout=stdout)
|
||||
|
||||
def mount_pkgbuilds(self, fail_if_mounted: bool = False) -> str:
|
||||
return self.mount(
|
||||
@@ -258,8 +252,8 @@ class Chroot(AbstractChroot):
|
||||
def mount_pacman_cache(self, fail_if_mounted: bool = False) -> str:
|
||||
arch_cache = os.path.join(config.get_path('pacman'), self.arch)
|
||||
rel_target = os.path.join(CHROOT_PATHS['pacman'].lstrip('/'), self.arch)
|
||||
for dir in [arch_cache, self.get_path(rel_target)]:
|
||||
os.makedirs(dir, exist_ok=True)
|
||||
makedir(arch_cache)
|
||||
root_makedir(self.get_path(rel_target))
|
||||
return self.mount(
|
||||
arch_cache,
|
||||
rel_target,
|
||||
@@ -283,15 +277,31 @@ class Chroot(AbstractChroot):
|
||||
filename = 'makepkg' + (f'_cross_{target_arch}' if cross else '') + '.conf'
|
||||
makepkg_conf_path_relative = os.path.join('etc', filename)
|
||||
makepkg_conf_path = os.path.join(self.path, makepkg_conf_path_relative)
|
||||
with open(makepkg_conf_path, 'w') as f:
|
||||
f.write(makepkg_cross_conf)
|
||||
root_makedir(self.get_path('/etc'))
|
||||
root_write_file(makepkg_conf_path, makepkg_cross_conf)
|
||||
return makepkg_conf_path_relative
|
||||
|
||||
def write_pacman_conf(self, check_space: bool = False):
|
||||
os.makedirs(self.get_path('/etc'), exist_ok=True)
|
||||
conf_text = get_base_distro(self.arch).get_pacman_conf(self.extra_repos, check_space=check_space)
|
||||
with open(self.get_path('etc/pacman.conf'), 'w') as file:
|
||||
file.write(conf_text)
|
||||
def write_pacman_conf(self, check_space: Optional[bool] = None, in_chroot: bool = True, absolute_path: str = None):
|
||||
user = None
|
||||
group = None
|
||||
if check_space is None:
|
||||
check_space = config.file['pacman']['check_space']
|
||||
if not absolute_path:
|
||||
path = self.get_path('/etc')
|
||||
root_makedir(path)
|
||||
absolute_path = os.path.join(path, 'pacman.conf')
|
||||
user = 'root'
|
||||
group = 'root'
|
||||
repos = deepcopy(self.extra_repos)
|
||||
if not in_chroot:
|
||||
for repo in repos.values():
|
||||
repo.url_template = repo.url_template.replace(
|
||||
f'file://{CHROOT_PATHS["packages"]}',
|
||||
f'file://{config.get_path("packages")}',
|
||||
1,
|
||||
)
|
||||
conf_text = get_base_distro(self.arch).get_pacman_conf(repos, check_space=check_space, in_chroot=in_chroot)
|
||||
write_file(absolute_path, conf_text, user=user, group=group)
|
||||
|
||||
def create_user(
|
||||
self,
|
||||
|
||||
@@ -1,11 +1,13 @@
|
||||
import logging
|
||||
import os
|
||||
import subprocess
|
||||
|
||||
from glob import glob
|
||||
from shutil import rmtree
|
||||
|
||||
from constants import Arch
|
||||
from exec.cmd import run_root_cmd
|
||||
from exec.file import makedir, root_makedir
|
||||
from config import config
|
||||
|
||||
from .abstract import Chroot, get_chroot
|
||||
from .helpers import base_chroot_name
|
||||
@@ -20,17 +22,18 @@ class BaseChroot(Chroot):
|
||||
logging.info(f'Resetting {self.name}')
|
||||
for dir in glob(os.path.join(self.path, '*')):
|
||||
rmtree(dir)
|
||||
makedir(config.get_path('chroots'))
|
||||
root_makedir(self.get_path())
|
||||
|
||||
self.write_pacman_conf(check_space=True)
|
||||
self.write_pacman_conf()
|
||||
self.mount_pacman_cache()
|
||||
|
||||
logging.info(f'Pacstrapping chroot {self.name}: {", ".join(self.base_packages)}')
|
||||
|
||||
result = subprocess.run([
|
||||
result = run_root_cmd([
|
||||
'pacstrap',
|
||||
'-C',
|
||||
pacman_conf_target,
|
||||
'-c',
|
||||
'-G',
|
||||
self.path,
|
||||
] + self.base_packages + [
|
||||
|
||||
@@ -7,6 +7,8 @@ from typing import Optional
|
||||
from config import config
|
||||
from constants import Arch, GCC_HOSTSPECS, CROSSDIRECT_PKGS, CHROOT_PATHS
|
||||
from distro.distro import get_kupfer_local
|
||||
from exec.cmd import run_root_cmd
|
||||
from exec.file import makedir, remove_file, root_makedir, root_write_file, symlink
|
||||
|
||||
from .abstract import Chroot, get_chroot
|
||||
from .helpers import build_chroot_name
|
||||
@@ -18,6 +20,8 @@ class BuildChroot(Chroot):
|
||||
copy_base: bool = True
|
||||
|
||||
def create_rootfs(self, reset: bool, pacman_conf_target: str, active_previously: bool):
|
||||
makedir(config.get_path('chroots'))
|
||||
root_makedir(self.get_path())
|
||||
if reset or not os.path.exists(self.get_path('usr/bin')):
|
||||
base_chroot = get_base_chroot(self.arch)
|
||||
if base_chroot == self:
|
||||
@@ -29,7 +33,7 @@ class BuildChroot(Chroot):
|
||||
cmd += ['--exclude', mountpoint.rstrip('/')]
|
||||
cmd += [f'{base_chroot.path}/', f'{self.path}/']
|
||||
logging.debug(f"running rsync: {cmd}")
|
||||
result = subprocess.run(cmd)
|
||||
result = run_root_cmd(cmd)
|
||||
if result.returncode != 0:
|
||||
raise Exception(f'Failed to copy {base_chroot.name} to {self.name}')
|
||||
|
||||
@@ -50,8 +54,7 @@ class BuildChroot(Chroot):
|
||||
with open(self.get_path('/usr/bin/makepkg'), 'r') as file:
|
||||
data = file.read()
|
||||
data = data.replace('EUID == 0', 'EUID == -1')
|
||||
with open(self.get_path('/usr/bin/makepkg'), 'w') as file:
|
||||
file.write(data)
|
||||
root_write_file(self.get_path('/usr/bin/makepkg'), data)
|
||||
|
||||
# configure makepkg
|
||||
self.write_makepkg_conf(self.arch, cross_chroot_relative=None, cross=False)
|
||||
@@ -101,11 +104,11 @@ class BuildChroot(Chroot):
|
||||
for target, source in {cc_path: gcc, target_lib_dir: 'lib', target_include_dir: 'usr/include'}.items():
|
||||
if not os.path.exists(target):
|
||||
logging.debug(f'Symlinking {source} at {target}')
|
||||
os.symlink(source, target)
|
||||
symlink(source, target)
|
||||
ld_so = os.path.basename(glob(f"{os.path.join(native_chroot.path, 'usr', 'lib', 'ld-linux-')}*")[0])
|
||||
ld_so_target = os.path.join(target_lib_dir, ld_so)
|
||||
if not os.path.islink(ld_so_target):
|
||||
os.symlink(os.path.join('/native', 'usr', 'lib', ld_so), ld_so_target)
|
||||
symlink(os.path.join('/native', 'usr', 'lib', ld_so), ld_so_target)
|
||||
else:
|
||||
logging.debug(f'ld-linux.so symlink already exists, skipping for {self.name}')
|
||||
|
||||
@@ -113,9 +116,9 @@ class BuildChroot(Chroot):
|
||||
rustc = os.path.join(native_chroot.path, 'usr/lib/crossdirect', target_arch, 'rustc')
|
||||
if os.path.exists(rustc):
|
||||
logging.debug('Disabling crossdirect rustc')
|
||||
os.unlink(rustc)
|
||||
remove_file(rustc)
|
||||
|
||||
os.makedirs(native_mount, exist_ok=True)
|
||||
root_makedir(native_mount)
|
||||
logging.debug(f'Mounting {native_chroot.name} to {native_mount}')
|
||||
self.mount(native_chroot.path, 'native', fail_if_mounted=fail_if_mounted)
|
||||
return native_mount
|
||||
|
||||
@@ -1,10 +1,13 @@
|
||||
import atexit
|
||||
import os
|
||||
|
||||
from typing import Optional
|
||||
|
||||
from config import config
|
||||
from constants import Arch, BASE_PACKAGES
|
||||
from distro.distro import get_kupfer_local, get_kupfer_https
|
||||
from exec.file import get_temp_dir, makedir, root_makedir
|
||||
from utils import check_findmnt
|
||||
from typing import Optional
|
||||
|
||||
from .base import BaseChroot
|
||||
from .build import BuildChroot
|
||||
@@ -18,6 +21,12 @@ class DeviceChroot(BuildChroot):
|
||||
def create_rootfs(self, reset, pacman_conf_target, active_previously):
|
||||
clss = BuildChroot if self.copy_base else BaseChroot
|
||||
|
||||
makedir(config.get_path('chroots'))
|
||||
root_makedir(self.get_path())
|
||||
if not self.copy_base:
|
||||
pacman_conf_target = os.path.join(get_temp_dir(register_cleanup=True), f'pacman-{self.name}.conf')
|
||||
self.write_pacman_conf(in_chroot=False, absolute_path=pacman_conf_target)
|
||||
|
||||
clss.create_rootfs(self, reset, pacman_conf_target, active_previously)
|
||||
|
||||
def mount_rootfs(self, source_path: str, fs_type: str = None, options: list[str] = [], allow_overlay: bool = False):
|
||||
@@ -36,7 +45,7 @@ class DeviceChroot(BuildChroot):
|
||||
raise Exception(f'{self.name}: There is already something mounted at {self.path}, not mounting over it.')
|
||||
if os.path.exists(os.path.join(self.path, 'usr/bin')):
|
||||
raise Exception(f'{self.name}: {self.path}/usr/bin exists, not mounting over existing rootfs.')
|
||||
os.makedirs(self.path, exist_ok=True)
|
||||
makedir(self.path)
|
||||
atexit.register(self.deactivate)
|
||||
self.mount(source_path, '/', fs_type=fs_type, options=options)
|
||||
|
||||
|
||||
631
config.py
631
config.py
@@ -1,631 +0,0 @@
|
||||
import appdirs
|
||||
import click
|
||||
import os
|
||||
import toml
|
||||
import logging
|
||||
from copy import deepcopy
|
||||
from typing import Optional, Union, TypedDict, Any, Mapping
|
||||
|
||||
from constants import DEFAULT_PACKAGE_BRANCH
|
||||
|
||||
CONFIG_DIR = appdirs.user_config_dir('kupfer')
|
||||
CACHE_DIR = appdirs.user_cache_dir('kupfer')
|
||||
|
||||
CONFIG_DEFAULT_PATH = os.path.join(CONFIG_DIR, 'kupferbootstrap.toml')
|
||||
|
||||
|
||||
class Profile(TypedDict, total=False):
|
||||
parent: str
|
||||
device: str
|
||||
flavour: str
|
||||
pkgs_include: list[str]
|
||||
pkgs_exclude: list[str]
|
||||
hostname: str
|
||||
username: str
|
||||
password: Optional[str]
|
||||
size_extra_mb: Union[str, int]
|
||||
|
||||
|
||||
PROFILE_DEFAULTS: Profile = {
|
||||
'parent': '',
|
||||
'device': '',
|
||||
'flavour': '',
|
||||
'pkgs_include': [],
|
||||
'pkgs_exclude': [],
|
||||
'hostname': 'kupfer',
|
||||
'username': 'kupfer',
|
||||
'password': None,
|
||||
'size_extra_mb': "0",
|
||||
}
|
||||
|
||||
PROFILE_EMPTY: Profile = {key: None for key in PROFILE_DEFAULTS.keys()} # type: ignore
|
||||
|
||||
CONFIG_DEFAULTS: dict = {
|
||||
'wrapper': {
|
||||
'type': 'docker',
|
||||
},
|
||||
'build': {
|
||||
'ccache': True,
|
||||
'clean_mode': True,
|
||||
'crosscompile': True,
|
||||
'crossdirect': True,
|
||||
'threads': 0,
|
||||
},
|
||||
'pkgbuilds': {
|
||||
'git_repo': 'https://gitlab.com/kupfer/packages/pkgbuilds.git',
|
||||
'git_branch': DEFAULT_PACKAGE_BRANCH,
|
||||
},
|
||||
'pacman': {
|
||||
'parallel_downloads': 4,
|
||||
'repo_branch': DEFAULT_PACKAGE_BRANCH,
|
||||
},
|
||||
'paths': {
|
||||
'cache_dir': CACHE_DIR,
|
||||
'chroots': os.path.join('%cache_dir%', 'chroots'),
|
||||
'pacman': os.path.join('%cache_dir%', 'pacman'),
|
||||
'packages': os.path.join('%cache_dir%', 'packages'),
|
||||
'pkgbuilds': os.path.join('%cache_dir%', 'pkgbuilds'),
|
||||
'jumpdrive': os.path.join('%cache_dir%', 'jumpdrive'),
|
||||
'images': os.path.join('%cache_dir%', 'images'),
|
||||
},
|
||||
'profiles': {
|
||||
'current': 'default',
|
||||
'default': deepcopy(PROFILE_DEFAULTS),
|
||||
},
|
||||
}
|
||||
CONFIG_SECTIONS = list(CONFIG_DEFAULTS.keys())
|
||||
|
||||
CONFIG_RUNTIME_DEFAULTS = {
|
||||
'verbose': False,
|
||||
'config_file': None,
|
||||
'arch': None,
|
||||
'no_wrap': False,
|
||||
'script_source_dir': os.path.dirname(os.path.realpath(__file__)),
|
||||
'error_shell': False,
|
||||
}
|
||||
|
||||
|
||||
def resolve_path_template(path_template: str, paths: dict[str, str]) -> str:
|
||||
terminator = '%' # i'll be back
|
||||
result = path_template
|
||||
for path_name, path in paths.items():
|
||||
result = result.replace(terminator + path_name + terminator, path)
|
||||
return result
|
||||
|
||||
|
||||
def resolve_profile(
|
||||
name: str,
|
||||
sparse_profiles: dict[str, Profile],
|
||||
resolved: dict[str, Profile] = None,
|
||||
_visited=None,
|
||||
) -> dict[str, Profile]:
|
||||
"""
|
||||
Recursively resolves the specified profile by `name` and its parents to merge the config semantically,
|
||||
applying include and exclude overrides along the hierarchy.
|
||||
If `resolved` is passed `None`, a fresh dictionary will be created.
|
||||
`resolved` will be modified in-place during parsing and also returned.
|
||||
A sanitized `sparse_profiles` dict is assumed, no checking for unknown keys or incorrect data types is performed.
|
||||
`_visited` should not be passed by users.
|
||||
"""
|
||||
if _visited is None:
|
||||
_visited = list[str]()
|
||||
if resolved is None:
|
||||
resolved = dict[str, Profile]()
|
||||
if name in _visited:
|
||||
loop = list(_visited)
|
||||
raise Exception(f'Dependency loop detected in profiles: {" -> ".join(loop+[loop[0]])}')
|
||||
if name in resolved:
|
||||
return resolved
|
||||
|
||||
logging.debug(f'Resolving profile {name}')
|
||||
_visited.append(name)
|
||||
sparse = sparse_profiles[name]
|
||||
full = deepcopy(sparse)
|
||||
if 'parent' in sparse and (parent_name := sparse['parent']):
|
||||
parent = resolve_profile(name=parent_name, sparse_profiles=sparse_profiles, resolved=resolved, _visited=_visited)[parent_name]
|
||||
full = parent | sparse
|
||||
# add up size_extra_mb
|
||||
if 'size_extra_mb' in sparse:
|
||||
size = sparse['size_extra_mb']
|
||||
if isinstance(size, str) and size.startswith('+'):
|
||||
full['size_extra_mb'] = int(parent.get('size_extra_mb', 0)) + int(size.lstrip('+'))
|
||||
else:
|
||||
full['size_extra_mb'] = int(sparse['size_extra_mb'])
|
||||
# join our includes with parent's
|
||||
includes = set(parent.get('pkgs_include', []) + sparse.get('pkgs_include', []))
|
||||
if 'pkgs_exclude' in sparse:
|
||||
includes -= set(sparse['pkgs_exclude'])
|
||||
full['pkgs_include'] = list(includes)
|
||||
|
||||
# join our includes with parent's
|
||||
excludes = set(parent.get('pkgs_exclude', []) + sparse.get('pkgs_exclude', []))
|
||||
# our includes override parent excludes
|
||||
if 'pkgs_include' in sparse:
|
||||
excludes -= set(sparse['pkgs_include'])
|
||||
full['pkgs_exclude'] = list(excludes)
|
||||
|
||||
# now init missing keys
|
||||
for key, value in PROFILE_DEFAULTS.items():
|
||||
if key not in full.keys():
|
||||
full[key] = None # type: ignore[literal-required]
|
||||
if type(value) == list:
|
||||
full[key] = [] # type: ignore[literal-required]
|
||||
|
||||
full['size_extra_mb'] = int(full['size_extra_mb'] or 0)
|
||||
|
||||
resolved[name] = full
|
||||
return resolved
|
||||
|
||||
|
||||
def sanitize_config(conf: dict[str, dict], warn_missing_defaultprofile=True) -> dict[str, dict]:
|
||||
"""checks the input config dict for unknown keys and returns only the known parts"""
|
||||
return merge_configs(conf_new=conf, conf_base={}, warn_missing_defaultprofile=warn_missing_defaultprofile)
|
||||
|
||||
|
||||
def merge_configs(conf_new: Mapping[str, dict], conf_base={}, warn_missing_defaultprofile=True) -> dict[str, dict]:
|
||||
"""
|
||||
Returns `conf_new` semantically merged into `conf_base`, after validating
|
||||
`conf_new` keys against `CONFIG_DEFAULTS` and `PROFILE_DEFAULTS`.
|
||||
Pass `conf_base={}` to get a sanitized version of `conf_new`.
|
||||
NOTE: `conf_base` is NOT checked for invalid keys. Sanitize beforehand.
|
||||
"""
|
||||
parsed = deepcopy(conf_base)
|
||||
|
||||
for outer_name, outer_conf in deepcopy(conf_new).items():
|
||||
# only handle known config sections
|
||||
if outer_name not in CONFIG_DEFAULTS.keys():
|
||||
logging.warning(f'Skipped unknown config section "{outer_name}"')
|
||||
continue
|
||||
logging.debug(f'Parsing config section "{outer_name}"')
|
||||
# check if outer_conf is a dict
|
||||
if not isinstance(outer_conf, dict):
|
||||
parsed[outer_name] = outer_conf
|
||||
else:
|
||||
# init section
|
||||
if outer_name not in parsed:
|
||||
parsed[outer_name] = {}
|
||||
|
||||
# profiles need special handling:
|
||||
# 1. profile names are unknown keys by definition, but we want 'default' to exist
|
||||
# 2. A profile's subkeys must be compared against PROFILE_DEFAULTS.keys()
|
||||
if outer_name == 'profiles':
|
||||
if warn_missing_defaultprofile and 'default' not in outer_conf.keys():
|
||||
logging.warning('Default profile is not defined in config file')
|
||||
|
||||
for profile_name, profile_conf in outer_conf.items():
|
||||
if not isinstance(profile_conf, dict):
|
||||
if profile_name == 'current':
|
||||
parsed[outer_name][profile_name] = profile_conf
|
||||
else:
|
||||
logging.warning('Skipped key "{profile_name}" in profile section: only subsections and "current" allowed')
|
||||
continue
|
||||
|
||||
# init profile
|
||||
if profile_name not in parsed[outer_name]:
|
||||
parsed[outer_name][profile_name] = {}
|
||||
|
||||
for key, val in profile_conf.items():
|
||||
if key not in PROFILE_DEFAULTS:
|
||||
logging.warning(f'Skipped unknown config item "{key}" in profile "{profile_name}"')
|
||||
continue
|
||||
parsed[outer_name][profile_name][key] = val
|
||||
|
||||
else:
|
||||
# handle generic inner config dict
|
||||
for inner_name, inner_conf in outer_conf.items():
|
||||
if inner_name not in CONFIG_DEFAULTS[outer_name].keys():
|
||||
logging.warning(f'Skipped unknown config item "{inner_name}" in "{outer_name}"')
|
||||
continue
|
||||
parsed[outer_name][inner_name] = inner_conf
|
||||
|
||||
return parsed
|
||||
|
||||
|
||||
def dump_toml(conf) -> str:
|
||||
return toml.dumps(conf)
|
||||
|
||||
|
||||
def dump_file(file_path: str, config: dict, file_mode: int = 0o600):
|
||||
|
||||
def _opener(path, flags):
|
||||
return os.open(path, flags, file_mode)
|
||||
|
||||
conf_dir = os.path.dirname(file_path)
|
||||
if not os.path.exists(conf_dir):
|
||||
os.makedirs(conf_dir)
|
||||
old_umask = os.umask(0)
|
||||
with open(file_path, 'w', opener=_opener) as f:
|
||||
f.write(dump_toml(conf=config))
|
||||
os.umask(old_umask)
|
||||
|
||||
|
||||
def parse_file(config_file: str, base: dict = CONFIG_DEFAULTS) -> dict:
|
||||
"""
|
||||
Parse the toml contents of `config_file`, validating keys against `CONFIG_DEFAULTS`.
|
||||
The parsed results are semantically merged into `base` before returning.
|
||||
`base` itself is NOT checked for invalid keys.
|
||||
"""
|
||||
_conf_file = config_file if config_file is not None else CONFIG_DEFAULT_PATH
|
||||
logging.debug(f'Trying to load config file: {_conf_file}')
|
||||
loaded_conf = toml.load(_conf_file)
|
||||
return merge_configs(conf_new=loaded_conf, conf_base=base)
|
||||
|
||||
|
||||
class ConfigLoadException(Exception):
|
||||
inner = None
|
||||
|
||||
def __init__(self, extra_msg='', inner_exception: Exception = None):
|
||||
msg: list[str] = ['Config load failed!']
|
||||
if extra_msg:
|
||||
msg.append(extra_msg)
|
||||
if inner_exception:
|
||||
self.inner = inner_exception
|
||||
msg.append(str(inner_exception))
|
||||
super().__init__(self, ' '.join(msg))
|
||||
|
||||
|
||||
class ConfigStateHolder:
|
||||
|
||||
class ConfigLoadState:
|
||||
load_finished = False
|
||||
exception = None
|
||||
|
||||
file_state = ConfigLoadState()
|
||||
|
||||
defaults = CONFIG_DEFAULTS
|
||||
# config options that are persisted to file
|
||||
file: dict = {}
|
||||
# runtime config not persisted anywhere
|
||||
runtime: dict = CONFIG_RUNTIME_DEFAULTS
|
||||
_profile_cache: dict[str, Profile]
|
||||
|
||||
def __init__(self, runtime_conf={}, file_conf_path: Optional[str] = None, file_conf_base: dict = {}):
|
||||
"""init a stateholder, optionally loading `file_conf_path`"""
|
||||
self.runtime.update(runtime_conf)
|
||||
self.runtime['arch'] = os.uname().machine
|
||||
self.file.update(file_conf_base)
|
||||
if file_conf_path:
|
||||
self.try_load_file(file_conf_path)
|
||||
|
||||
def try_load_file(self, config_file=None, base=CONFIG_DEFAULTS):
|
||||
config_file = config_file or CONFIG_DEFAULT_PATH
|
||||
self.runtime['config_file'] = config_file
|
||||
self._profile_cache = None
|
||||
try:
|
||||
self.file = parse_file(config_file=config_file, base=base)
|
||||
except Exception as ex:
|
||||
self.file_state.exception = ex
|
||||
self.file_state.load_finished = True
|
||||
|
||||
def is_loaded(self) -> bool:
|
||||
return self.file_state.load_finished and self.file_state.exception is None
|
||||
|
||||
def enforce_config_loaded(self):
|
||||
if not self.file_state.load_finished:
|
||||
raise ConfigLoadException(Exception("Config file wasn't even parsed yet. This is probably a bug in kupferbootstrap :O"))
|
||||
ex = self.file_state.exception
|
||||
if ex:
|
||||
if type(ex) == FileNotFoundError:
|
||||
ex = Exception("File doesn't exist. Try running `kupferbootstrap config init` first?")
|
||||
raise ex
|
||||
|
||||
def get_profile(self, name: Optional[str] = None) -> Profile:
|
||||
name = name or self.file['profiles']['current']
|
||||
self._profile_cache = resolve_profile(name=name, sparse_profiles=self.file['profiles'], resolved=self._profile_cache)
|
||||
return self._profile_cache[name]
|
||||
|
||||
def get_path(self, path_name: str) -> str:
|
||||
paths = self.file['paths']
|
||||
return resolve_path_template(paths[path_name], paths)
|
||||
|
||||
def get_package_dir(self, arch: str):
|
||||
return os.path.join(self.get_path('packages'), arch)
|
||||
|
||||
def dump(self) -> str:
|
||||
"""dump toml representation of `self.file`"""
|
||||
return dump_toml(self.file)
|
||||
|
||||
def write(self, path=None):
|
||||
"""write toml representation of `self.file` to `path`"""
|
||||
if path is None:
|
||||
path = self.runtime['config_file']
|
||||
os.makedirs(os.path.dirname(path), exist_ok=True)
|
||||
dump_file(path, self.file)
|
||||
logging.info(f'Created config file at {path}')
|
||||
|
||||
def invalidate_profile_cache(self):
|
||||
"""Clear the profile cache (usually after modification)"""
|
||||
self._profile_cache = None
|
||||
|
||||
def update(self, config_fragment: dict[str, dict], warn_missing_defaultprofile: bool = True) -> bool:
|
||||
"""Update `self.file` with `config_fragment`. Returns `True` if the config was changed"""
|
||||
merged = merge_configs(config_fragment, conf_base=self.file, warn_missing_defaultprofile=warn_missing_defaultprofile)
|
||||
changed = self.file != merged
|
||||
self.file = merged
|
||||
if changed and 'profiles' in config_fragment and self.file['profiles'] != config_fragment['profiles']:
|
||||
self.invalidate_profile_cache()
|
||||
return changed
|
||||
|
||||
def update_profile(self, name: str, profile: Profile, merge: bool = False, create: bool = True, prune: bool = True):
|
||||
new = {}
|
||||
if name not in self.file['profiles']:
|
||||
if not create:
|
||||
raise Exception(f'Unknown profile: {name}')
|
||||
else:
|
||||
if merge:
|
||||
new = deepcopy(self.file['profiles'][name])
|
||||
|
||||
logging.debug(f'new: {new}')
|
||||
logging.debug(f'profile: {profile}')
|
||||
new |= profile
|
||||
|
||||
if prune:
|
||||
new = {key: val for key, val in new.items() if val is not None}
|
||||
self.file['profiles'][name] = new
|
||||
self.invalidate_profile_cache()
|
||||
|
||||
|
||||
def list_to_comma_str(str_list: list[str], default='') -> str:
|
||||
if str_list is None:
|
||||
return default
|
||||
return ','.join(str_list)
|
||||
|
||||
|
||||
def comma_str_to_list(s: str, default=None) -> list[str]:
|
||||
if not s:
|
||||
return default
|
||||
return [a for a in s.split(',') if a]
|
||||
|
||||
|
||||
def prompt_config(
|
||||
text: str,
|
||||
default: Any,
|
||||
field_type: type = str,
|
||||
bold: bool = True,
|
||||
echo_changes: bool = True,
|
||||
) -> tuple[Any, bool]:
|
||||
"""
|
||||
prompts for a new value for a config key. returns the result and a boolean that indicates
|
||||
whether the result is different, considering empty strings and None equal to each other.
|
||||
"""
|
||||
|
||||
def true_or_zero(to_check) -> bool:
|
||||
"""returns true if the value is truthy or int(0)"""
|
||||
zero = 0 # compiler complains about 'is with literal' otherwise
|
||||
return to_check or to_check is zero # can't do == due to boolean<->int casting
|
||||
|
||||
if type(None) == field_type:
|
||||
field_type = str
|
||||
|
||||
if field_type == dict:
|
||||
raise Exception('Dictionaries not supported by config_prompt, this is likely a bug in kupferbootstrap')
|
||||
elif field_type == list:
|
||||
default = list_to_comma_str(default)
|
||||
value_conv = comma_str_to_list
|
||||
else:
|
||||
value_conv = None
|
||||
default = '' if default is None else default
|
||||
|
||||
if bold:
|
||||
text = click.style(text, bold=True)
|
||||
|
||||
result = click.prompt(text, type=field_type, default=default, value_proc=value_conv, show_default=True)
|
||||
changed = (result != default) and (true_or_zero(default) or true_or_zero(result))
|
||||
if changed and echo_changes:
|
||||
print(f'value changed: "{text}" = "{result}"')
|
||||
|
||||
return result, changed
|
||||
|
||||
|
||||
def prompt_profile(name: str, create: bool = True, defaults: Profile = {}) -> tuple[Profile, bool]:
|
||||
"""Prompts the user for every field in `defaults`. Set values to None for an empty profile."""
|
||||
|
||||
profile: Any = PROFILE_EMPTY | defaults
|
||||
# don't use get_profile() here because we need the sparse profile
|
||||
if name in config.file['profiles']:
|
||||
profile |= config.file['profiles'][name]
|
||||
elif create:
|
||||
logging.info(f"Profile {name} doesn't exist yet, creating new profile.")
|
||||
else:
|
||||
raise Exception(f'Unknown profile "{name}"')
|
||||
logging.info(f'Configuring profile "{name}"')
|
||||
changed = False
|
||||
for key, current in profile.items():
|
||||
current = profile[key]
|
||||
text = f'{name}.{key}'
|
||||
result, _changed = prompt_config(text=text, default=current, field_type=type(PROFILE_DEFAULTS[key])) # type: ignore
|
||||
if _changed:
|
||||
profile[key] = result
|
||||
changed = True
|
||||
return profile, changed
|
||||
|
||||
|
||||
def config_dot_name_get(name: str, config: dict[str, Any], prefix: str = '') -> Any:
|
||||
if not isinstance(config, dict):
|
||||
raise Exception(f"Couldn't resolve config name: passed config is not a dict: {repr(config)}")
|
||||
split_name = name.split('.')
|
||||
name = split_name[0]
|
||||
if name not in config:
|
||||
raise Exception(f"Couldn't resolve config name: key {prefix + name} not found")
|
||||
value = config[name]
|
||||
if len(split_name) == 1:
|
||||
return value
|
||||
else:
|
||||
rest_name = '.'.join(split_name[1:])
|
||||
return config_dot_name_get(name=rest_name, config=value, prefix=prefix + name + '.')
|
||||
|
||||
|
||||
def config_dot_name_set(name: str, value: Any, config: dict[str, Any]):
|
||||
split_name = name.split('.')
|
||||
if len(split_name) > 1:
|
||||
config = config_dot_name_get('.'.join(split_name[:-1]), config)
|
||||
config[split_name[-1]] = value
|
||||
|
||||
|
||||
config = ConfigStateHolder(file_conf_base=CONFIG_DEFAULTS)
|
||||
|
||||
config_option = click.option(
|
||||
'-C',
|
||||
'--config',
|
||||
'config_file',
|
||||
help='Override path to config file',
|
||||
)
|
||||
|
||||
|
||||
@click.group(name='config')
|
||||
def cmd_config():
|
||||
"""Manage the configuration and -profiles"""
|
||||
|
||||
|
||||
noninteractive_flag = click.option('-N', '--non-interactive', is_flag=True)
|
||||
noop_flag = click.option('--noop', '-n', help="Don't write changes to file", is_flag=True)
|
||||
|
||||
|
||||
@cmd_config.command(name='init')
|
||||
@noninteractive_flag
|
||||
@noop_flag
|
||||
@click.option(
|
||||
'--sections',
|
||||
'-s',
|
||||
multiple=True,
|
||||
type=click.Choice(CONFIG_SECTIONS),
|
||||
default=CONFIG_SECTIONS,
|
||||
show_choices=True,
|
||||
)
|
||||
def cmd_config_init(sections: list[str] = CONFIG_SECTIONS, non_interactive: bool = False, noop: bool = False):
|
||||
"""Initialize the config file"""
|
||||
if not non_interactive:
|
||||
results: dict[str, dict] = {}
|
||||
for section in sections:
|
||||
if section not in CONFIG_SECTIONS:
|
||||
raise Exception(f'Unknown section: {section}')
|
||||
if section == 'profiles':
|
||||
continue
|
||||
|
||||
results[section] = {}
|
||||
for key, current in config.file[section].items():
|
||||
text = f'{section}.{key}'
|
||||
result, changed = prompt_config(text=text, default=current, field_type=type(CONFIG_DEFAULTS[section][key]))
|
||||
if changed:
|
||||
results[section][key] = result
|
||||
|
||||
config.update(results)
|
||||
if 'profiles' in sections:
|
||||
current_profile = 'default' if 'current' not in config.file['profiles'] else config.file['profiles']['current']
|
||||
new_current, _ = prompt_config('profile.current', default=current_profile, field_type=str)
|
||||
profile, changed = prompt_profile(new_current, create=True)
|
||||
config.update_profile(new_current, profile)
|
||||
if not noop:
|
||||
if not click.confirm(f'Do you want to save your changes to {config.runtime["config_file"]}?'):
|
||||
return
|
||||
|
||||
if not noop:
|
||||
config.write()
|
||||
else:
|
||||
logging.info(f'--noop passed, not writing to {config.runtime["config_file"]}!')
|
||||
|
||||
|
||||
@cmd_config.command(name='set')
|
||||
@noninteractive_flag
|
||||
@noop_flag
|
||||
@click.argument('key_vals', nargs=-1)
|
||||
def cmd_config_set(key_vals: list[str], non_interactive: bool = False, noop: bool = False):
|
||||
"""
|
||||
Set config entries. Pass entries as `key=value` pairs, with keys as dot-separated identifiers,
|
||||
like `build.clean_mode=false` or alternatively just keys to get prompted if run interactively.
|
||||
"""
|
||||
config.enforce_config_loaded()
|
||||
config_copy = deepcopy(config.file)
|
||||
for pair in key_vals:
|
||||
split_pair = pair.split('=')
|
||||
if len(split_pair) == 2:
|
||||
key: str = split_pair[0]
|
||||
value: Any = split_pair[1]
|
||||
value_type = type(config_dot_name_get(key, CONFIG_DEFAULTS))
|
||||
if value_type != list:
|
||||
value = click.types.convert_type(value_type)(value)
|
||||
else:
|
||||
value = comma_str_to_list(value, default=[])
|
||||
elif len(split_pair) == 1 and not non_interactive:
|
||||
key = split_pair[0]
|
||||
value_type = type(config_dot_name_get(key, CONFIG_DEFAULTS))
|
||||
current = config_dot_name_get(key, config.file)
|
||||
value, _ = prompt_config(text=key, default=current, field_type=value_type, echo_changes=False)
|
||||
else:
|
||||
raise Exception(f'Invalid key=value pair "{pair}"')
|
||||
print('%s = %s' % (key, value))
|
||||
config_dot_name_set(key, value, config_copy)
|
||||
if merge_configs(config_copy, warn_missing_defaultprofile=False) != config_copy:
|
||||
raise Exception('Config "{key}" = "{value}" failed to evaluate')
|
||||
if not noop:
|
||||
if not non_interactive and not click.confirm(f'Do you want to save your changes to {config.runtime["config_file"]}?'):
|
||||
return
|
||||
config.update(config_copy)
|
||||
config.write()
|
||||
|
||||
|
||||
@cmd_config.command(name='get')
|
||||
@click.argument('keys', nargs=-1)
|
||||
def cmd_config_get(keys: list[str]):
|
||||
"""Get config entries.
|
||||
Get entries for keys passed as dot-separated identifiers, like `build.clean_mode`"""
|
||||
if len(keys) == 1:
|
||||
print(config_dot_name_get(keys[0], config.file))
|
||||
return
|
||||
for key in keys:
|
||||
print('%s = %s' % (key, config_dot_name_get(key, config.file)))
|
||||
|
||||
|
||||
@cmd_config.group(name='profile')
|
||||
def cmd_profile():
|
||||
"""Manage config profiles"""
|
||||
|
||||
|
||||
@cmd_profile.command(name='init')
|
||||
@noninteractive_flag
|
||||
@noop_flag
|
||||
@click.argument('name', required=True)
|
||||
def cmd_profile_init(name: str, non_interactive: bool = False, noop: bool = False):
|
||||
"""Create or edit a profile"""
|
||||
profile = deepcopy(PROFILE_EMPTY)
|
||||
if name in config.file['profiles']:
|
||||
profile |= config.file['profiles'][name]
|
||||
|
||||
if not non_interactive:
|
||||
profile, _changed = prompt_profile(name, create=True)
|
||||
|
||||
config.update_profile(name, profile)
|
||||
if not noop:
|
||||
if not click.confirm(f'Do you want to save your changes to {config.runtime["config_file"]}?'):
|
||||
return
|
||||
config.write()
|
||||
else:
|
||||
logging.info(f'--noop passed, not writing to {config.runtime["config_file"]}!')
|
||||
|
||||
|
||||
# temporary demo
|
||||
if __name__ == '__main__':
|
||||
print('vanilla:')
|
||||
print(toml.dumps(config.file))
|
||||
print('\n\n-----------------------------\n\n')
|
||||
|
||||
try:
|
||||
config.try_load_file()
|
||||
config.enforce_config_loaded()
|
||||
conf = config.file
|
||||
except ConfigLoadException as ex:
|
||||
logging.fatal(str(ex))
|
||||
conf = deepcopy(CONFIG_DEFAULTS)
|
||||
conf['profiles']['pinephone'] = {
|
||||
'hostname': 'slowphone',
|
||||
'parent': '',
|
||||
'pkgs_include': ['zsh', 'tmux', 'mpv', 'firefox'],
|
||||
'pkgs_exclude': ['pixman-git'],
|
||||
}
|
||||
conf['profiles']['yeetphone'] = {
|
||||
'parent': 'pinephone',
|
||||
'hostname': 'yeetphone',
|
||||
'pkgs_include': ['pixman-git'],
|
||||
'pkgs_exclude': ['tmux'],
|
||||
}
|
||||
print(toml.dumps(conf))
|
||||
265
config/__init__.py
Normal file
265
config/__init__.py
Normal file
@@ -0,0 +1,265 @@
|
||||
import click
|
||||
import logging
|
||||
from copy import deepcopy
|
||||
from typing import Any, Optional, Union
|
||||
|
||||
from .scheme import Profile
|
||||
from .profile import PROFILE_EMPTY, PROFILE_DEFAULTS
|
||||
from .state import ConfigStateHolder, CONFIG_DEFAULTS, CONFIG_SECTIONS, merge_configs
|
||||
|
||||
|
||||
def list_to_comma_str(str_list: list[str], default='') -> str:
|
||||
if str_list is None:
|
||||
return default
|
||||
return ','.join(str_list)
|
||||
|
||||
|
||||
def comma_str_to_list(s: str, default=None) -> list[str]:
|
||||
if not s:
|
||||
return default
|
||||
return [a for a in s.split(',') if a]
|
||||
|
||||
|
||||
def prompt_config(
|
||||
text: str,
|
||||
default: Any,
|
||||
field_type: type = str,
|
||||
bold: bool = True,
|
||||
echo_changes: bool = True,
|
||||
) -> tuple[Any, bool]:
|
||||
"""
|
||||
prompts for a new value for a config key. returns the result and a boolean that indicates
|
||||
whether the result is different, considering empty strings and None equal to each other.
|
||||
"""
|
||||
|
||||
original_default = default
|
||||
|
||||
def true_or_zero(to_check) -> bool:
|
||||
"""returns true if the value is truthy or int(0)"""
|
||||
zero = 0 # compiler complains about 'is with literal' otherwise
|
||||
return to_check or to_check is zero # can't do == due to boolean<->int casting
|
||||
|
||||
if type(None) == field_type:
|
||||
field_type = str
|
||||
|
||||
if field_type == dict:
|
||||
raise Exception('Dictionaries not supported by config_prompt, this is likely a bug in kupferbootstrap')
|
||||
elif field_type == list:
|
||||
default = list_to_comma_str(default)
|
||||
value_conv = comma_str_to_list
|
||||
else:
|
||||
value_conv = None
|
||||
default = '' if default is None else default
|
||||
|
||||
if bold:
|
||||
text = click.style(text, bold=True)
|
||||
|
||||
result = click.prompt(text, type=field_type, default=default, value_proc=value_conv, show_default=True) # type: ignore
|
||||
changed = result != (original_default if field_type == list else default) and (true_or_zero(default) or true_or_zero(result))
|
||||
if changed and echo_changes:
|
||||
print(f'value changed: "{text}" = "{result}"')
|
||||
return result, changed
|
||||
|
||||
|
||||
def prompt_profile(name: str, create: bool = True, defaults: Union[Profile, dict] = {}) -> tuple[Profile, bool]:
|
||||
"""Prompts the user for every field in `defaults`. Set values to None for an empty profile."""
|
||||
|
||||
profile: Any = PROFILE_EMPTY | defaults
|
||||
# don't use get_profile() here because we need the sparse profile
|
||||
if name in config.file['profiles']:
|
||||
profile |= config.file['profiles'][name]
|
||||
elif create:
|
||||
logging.info(f"Profile {name} doesn't exist yet, creating new profile.")
|
||||
else:
|
||||
raise Exception(f'Unknown profile "{name}"')
|
||||
logging.info(f'Configuring profile "{name}"')
|
||||
changed = False
|
||||
for key, current in profile.items():
|
||||
current = profile[key]
|
||||
text = f'{name}.{key}'
|
||||
result, _changed = prompt_config(text=text, default=current, field_type=type(PROFILE_DEFAULTS[key])) # type: ignore
|
||||
if _changed:
|
||||
profile[key] = result
|
||||
changed = True
|
||||
return profile, changed
|
||||
|
||||
|
||||
def config_dot_name_get(name: str, config: dict[str, Any], prefix: str = '') -> Any:
|
||||
if not isinstance(config, dict):
|
||||
raise Exception(f"Couldn't resolve config name: passed config is not a dict: {repr(config)}")
|
||||
split_name = name.split('.')
|
||||
name = split_name[0]
|
||||
if name not in config:
|
||||
raise Exception(f"Couldn't resolve config name: key {prefix + name} not found")
|
||||
value = config[name]
|
||||
if len(split_name) == 1:
|
||||
return value
|
||||
else:
|
||||
rest_name = '.'.join(split_name[1:])
|
||||
return config_dot_name_get(name=rest_name, config=value, prefix=prefix + name + '.')
|
||||
|
||||
|
||||
def config_dot_name_set(name: str, value: Any, config: dict[str, Any]):
|
||||
split_name = name.split('.')
|
||||
if len(split_name) > 1:
|
||||
config = config_dot_name_get('.'.join(split_name[:-1]), config)
|
||||
config[split_name[-1]] = value
|
||||
|
||||
|
||||
def prompt_for_save(retry_ctx: Optional[click.Context] = None):
|
||||
"""
|
||||
Prompt whether to save the config file. If no is answered, `False` is returned.
|
||||
|
||||
If `retry_ctx` is passed, the context's command will be reexecuted with the same arguments if the user chooses to retry.
|
||||
False will still be returned as the retry is expected to either save, perform another retry or arbort.
|
||||
"""
|
||||
if click.confirm(f'Do you want to save your changes to {config.runtime["config_file"]}?', default=True):
|
||||
return True
|
||||
if retry_ctx:
|
||||
if click.confirm('Retry? ("n" to quit without saving)', default=True):
|
||||
retry_ctx.forward(retry_ctx.command)
|
||||
return False
|
||||
|
||||
|
||||
config: ConfigStateHolder = ConfigStateHolder(file_conf_base=CONFIG_DEFAULTS)
|
||||
|
||||
config_option = click.option(
|
||||
'-C',
|
||||
'--config',
|
||||
'config_file',
|
||||
help='Override path to config file',
|
||||
)
|
||||
|
||||
|
||||
@click.group(name='config')
|
||||
def cmd_config():
|
||||
"""Manage the configuration and -profiles"""
|
||||
|
||||
|
||||
noninteractive_flag = click.option('-N', '--non-interactive', is_flag=True)
|
||||
noop_flag = click.option('--noop', '-n', help="Don't write changes to file", is_flag=True)
|
||||
|
||||
|
||||
@cmd_config.command(name='init')
|
||||
@noninteractive_flag
|
||||
@noop_flag
|
||||
@click.option(
|
||||
'--sections',
|
||||
'-s',
|
||||
multiple=True,
|
||||
type=click.Choice(CONFIG_SECTIONS),
|
||||
default=CONFIG_SECTIONS,
|
||||
show_choices=True,
|
||||
)
|
||||
@click.pass_context
|
||||
def cmd_config_init(ctx, sections: list[str] = CONFIG_SECTIONS, non_interactive: bool = False, noop: bool = False):
|
||||
"""Initialize the config file"""
|
||||
if not non_interactive:
|
||||
results: dict[str, dict] = {}
|
||||
for section in sections:
|
||||
if section not in CONFIG_SECTIONS:
|
||||
raise Exception(f'Unknown section: {section}')
|
||||
if section == 'profiles':
|
||||
continue
|
||||
|
||||
results[section] = {}
|
||||
for key, current in config.file[section].items():
|
||||
text = f'{section}.{key}'
|
||||
result, changed = prompt_config(text=text, default=current, field_type=type(CONFIG_DEFAULTS[section][key]))
|
||||
if changed:
|
||||
results[section][key] = result
|
||||
|
||||
config.update(results)
|
||||
if 'profiles' in sections:
|
||||
current_profile = 'default' if 'current' not in config.file['profiles'] else config.file['profiles']['current']
|
||||
new_current, _ = prompt_config('profile.current', default=current_profile, field_type=str)
|
||||
profile, changed = prompt_profile(new_current, create=True)
|
||||
config.update_profile(new_current, profile)
|
||||
if not noop:
|
||||
if not prompt_for_save(ctx):
|
||||
return
|
||||
|
||||
if not noop:
|
||||
config.write()
|
||||
else:
|
||||
logging.info(f'--noop passed, not writing to {config.runtime["config_file"]}!')
|
||||
|
||||
|
||||
@cmd_config.command(name='set')
|
||||
@noninteractive_flag
|
||||
@noop_flag
|
||||
@click.argument('key_vals', nargs=-1)
|
||||
@click.pass_context
|
||||
def cmd_config_set(ctx, key_vals: list[str], non_interactive: bool = False, noop: bool = False):
|
||||
"""
|
||||
Set config entries. Pass entries as `key=value` pairs, with keys as dot-separated identifiers,
|
||||
like `build.clean_mode=false` or alternatively just keys to get prompted if run interactively.
|
||||
"""
|
||||
config.enforce_config_loaded()
|
||||
config_copy = deepcopy(config.file)
|
||||
for pair in key_vals:
|
||||
split_pair = pair.split('=')
|
||||
if len(split_pair) == 2:
|
||||
key: str = split_pair[0]
|
||||
value: Any = split_pair[1]
|
||||
value_type = type(config_dot_name_get(key, CONFIG_DEFAULTS))
|
||||
if value_type != list:
|
||||
value = click.types.convert_type(value_type)(value)
|
||||
else:
|
||||
value = comma_str_to_list(value, default=[])
|
||||
elif len(split_pair) == 1 and not non_interactive:
|
||||
key = split_pair[0]
|
||||
value_type = type(config_dot_name_get(key, CONFIG_DEFAULTS))
|
||||
current = config_dot_name_get(key, config.file)
|
||||
value, _ = prompt_config(text=key, default=current, field_type=value_type, echo_changes=False)
|
||||
else:
|
||||
raise Exception(f'Invalid key=value pair "{pair}"')
|
||||
print('%s = %s' % (key, value))
|
||||
config_dot_name_set(key, value, config_copy)
|
||||
if merge_configs(config_copy, warn_missing_defaultprofile=False) != config_copy:
|
||||
raise Exception('Config "{key}" = "{value}" failed to evaluate')
|
||||
if not noop:
|
||||
if not non_interactive and not prompt_for_save(ctx):
|
||||
return
|
||||
config.update(config_copy)
|
||||
config.write()
|
||||
|
||||
|
||||
@cmd_config.command(name='get')
|
||||
@click.argument('keys', nargs=-1)
|
||||
def cmd_config_get(keys: list[str]):
|
||||
"""Get config entries.
|
||||
Get entries for keys passed as dot-separated identifiers, like `build.clean_mode`"""
|
||||
if len(keys) == 1:
|
||||
print(config_dot_name_get(keys[0], config.file))
|
||||
return
|
||||
for key in keys:
|
||||
print('%s = %s' % (key, config_dot_name_get(key, config.file)))
|
||||
|
||||
|
||||
@cmd_config.group(name='profile')
|
||||
def cmd_profile():
|
||||
"""Manage config profiles"""
|
||||
|
||||
|
||||
@cmd_profile.command(name='init')
|
||||
@noninteractive_flag
|
||||
@noop_flag
|
||||
@click.argument('name', required=True)
|
||||
@click.pass_context
|
||||
def cmd_profile_init(ctx, name: str, non_interactive: bool = False, noop: bool = False):
|
||||
"""Create or edit a profile"""
|
||||
profile = deepcopy(PROFILE_EMPTY)
|
||||
if name in config.file['profiles']:
|
||||
profile |= config.file['profiles'][name]
|
||||
|
||||
if not non_interactive:
|
||||
profile, _changed = prompt_profile(name, create=True)
|
||||
|
||||
config.update_profile(name, profile)
|
||||
if not noop:
|
||||
if not prompt_for_save(ctx):
|
||||
return
|
||||
config.write()
|
||||
else:
|
||||
logging.info(f'--noop passed, not writing to {config.runtime["config_file"]}!')
|
||||
86
config/profile.py
Normal file
86
config/profile.py
Normal file
@@ -0,0 +1,86 @@
|
||||
import logging
|
||||
|
||||
from copy import deepcopy
|
||||
|
||||
from .scheme import Profile, SparseProfile
|
||||
|
||||
PROFILE_DEFAULTS_DICT = {
|
||||
'parent': '',
|
||||
'device': '',
|
||||
'flavour': '',
|
||||
'pkgs_include': [],
|
||||
'pkgs_exclude': [],
|
||||
'hostname': 'kupfer',
|
||||
'username': 'kupfer',
|
||||
'password': None,
|
||||
'size_extra_mb': "0",
|
||||
}
|
||||
PROFILE_DEFAULTS = Profile.fromDict(PROFILE_DEFAULTS_DICT)
|
||||
|
||||
PROFILE_EMPTY: Profile = {key: None for key in PROFILE_DEFAULTS.keys()} # type: ignore
|
||||
|
||||
|
||||
def resolve_profile(
|
||||
name: str,
|
||||
sparse_profiles: dict[str, SparseProfile],
|
||||
resolved: dict[str, Profile] = None,
|
||||
_visited=None,
|
||||
) -> dict[str, Profile]:
|
||||
"""
|
||||
Recursively resolves the specified profile by `name` and its parents to merge the config semantically,
|
||||
applying include and exclude overrides along the hierarchy.
|
||||
If `resolved` is passed `None`, a fresh dictionary will be created.
|
||||
`resolved` will be modified in-place during parsing and also returned.
|
||||
A sanitized `sparse_profiles` dict is assumed, no checking for unknown keys or incorrect data types is performed.
|
||||
`_visited` should not be passed by users.
|
||||
"""
|
||||
if _visited is None:
|
||||
_visited = list[str]()
|
||||
if resolved is None:
|
||||
resolved = dict[str, Profile]()
|
||||
if name in _visited:
|
||||
loop = list(_visited)
|
||||
raise Exception(f'Dependency loop detected in profiles: {" -> ".join(loop+[loop[0]])}')
|
||||
if name in resolved:
|
||||
return resolved
|
||||
|
||||
logging.debug(f'Resolving profile {name}')
|
||||
_visited.append(name)
|
||||
sparse = sparse_profiles[name].copy()
|
||||
full = deepcopy(sparse)
|
||||
if name != 'default' and 'parent' not in sparse:
|
||||
sparse['parent'] = 'default'
|
||||
if 'parent' in sparse and (parent_name := sparse['parent']):
|
||||
parent = resolve_profile(name=parent_name, sparse_profiles=sparse_profiles, resolved=resolved, _visited=_visited)[parent_name]
|
||||
full = parent | sparse
|
||||
# add up size_extra_mb
|
||||
if 'size_extra_mb' in sparse:
|
||||
size = sparse['size_extra_mb']
|
||||
if isinstance(size, str) and size.startswith('+'):
|
||||
full['size_extra_mb'] = int(parent.get('size_extra_mb', 0)) + int(size.lstrip('+'))
|
||||
else:
|
||||
full['size_extra_mb'] = int(sparse['size_extra_mb'])
|
||||
# join our includes with parent's
|
||||
includes = set(parent.get('pkgs_include', []) + sparse.get('pkgs_include', []))
|
||||
if 'pkgs_exclude' in sparse:
|
||||
includes -= set(sparse['pkgs_exclude'])
|
||||
full['pkgs_include'] = list(includes)
|
||||
|
||||
# join our includes with parent's
|
||||
excludes = set(parent.get('pkgs_exclude', []) + sparse.get('pkgs_exclude', []))
|
||||
# our includes override parent excludes
|
||||
if 'pkgs_include' in sparse:
|
||||
excludes -= set(sparse['pkgs_include'])
|
||||
full['pkgs_exclude'] = list(excludes)
|
||||
|
||||
# now init missing keys
|
||||
for key, value in PROFILE_DEFAULTS_DICT.items():
|
||||
if key not in full.keys():
|
||||
full[key] = value # type: ignore[literal-required]
|
||||
if type(value) == list:
|
||||
full[key] = [] # type: ignore[literal-required]
|
||||
|
||||
full['size_extra_mb'] = int(full['size_extra_mb'] or 0)
|
||||
|
||||
resolved[name] = Profile.fromDict(full)
|
||||
return resolved
|
||||
217
config/scheme.py
Normal file
217
config/scheme.py
Normal file
@@ -0,0 +1,217 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from dataclasses import dataclass
|
||||
from typing import Optional, Union, Mapping, Any, get_type_hints, get_origin, get_args, Iterable
|
||||
from munch import Munch
|
||||
|
||||
from constants import Arch
|
||||
|
||||
|
||||
def munchclass(*args, init=False, **kwargs):
|
||||
return dataclass(*args, init=init, slots=True, **kwargs)
|
||||
|
||||
|
||||
def resolve_type_hint(hint: type):
|
||||
origin = get_origin(hint)
|
||||
args: Iterable[type] = get_args(hint)
|
||||
if origin is Optional:
|
||||
args = set(list(args) + [type(None)])
|
||||
if origin in [Union, Optional]:
|
||||
results = []
|
||||
for arg in args:
|
||||
results += resolve_type_hint(arg)
|
||||
return results
|
||||
return [origin or hint]
|
||||
|
||||
|
||||
class DataClass(Munch):
|
||||
|
||||
def __init__(self, d: dict = {}, validate: bool = True, **kwargs):
|
||||
self.update(d | kwargs, validate=validate)
|
||||
|
||||
@classmethod
|
||||
def transform(cls, values: Mapping[str, Any], validate: bool = True) -> Any:
|
||||
results = {}
|
||||
values = dict(values)
|
||||
for key in list(values.keys()):
|
||||
value = values.pop(key)
|
||||
type_hints = cls._type_hints
|
||||
if key in type_hints:
|
||||
_classes = tuple(resolve_type_hint(type_hints[key]))
|
||||
if issubclass(_classes[0], dict):
|
||||
assert isinstance(value, dict)
|
||||
target_class = _classes[0]
|
||||
if not issubclass(_classes[0], Munch):
|
||||
target_class = DataClass
|
||||
if not isinstance(value, target_class):
|
||||
value = target_class.fromDict(value, validate=validate)
|
||||
if validate:
|
||||
if not isinstance(value, _classes):
|
||||
raise Exception(f'key "{key}" has value of wrong type {_classes}: {value}')
|
||||
elif validate:
|
||||
raise Exception(f'Unknown key "{key}"')
|
||||
else:
|
||||
if isinstance(value, dict) and not isinstance(value, Munch):
|
||||
value = Munch.fromDict(value)
|
||||
results[key] = value
|
||||
if values:
|
||||
if validate:
|
||||
raise Exception(f'values contained unknown keys: {list(values.keys())}')
|
||||
results |= values
|
||||
|
||||
return results
|
||||
|
||||
@classmethod
|
||||
def fromDict(cls, values: Mapping[str, Any], validate: bool = True):
|
||||
return cls(**cls.transform(values, validate))
|
||||
|
||||
def update(self, d: Mapping[str, Any], validate: bool = True):
|
||||
Munch.update(self, type(self).transform(d, validate))
|
||||
|
||||
def __init_subclass__(cls):
|
||||
super().__init_subclass__()
|
||||
cls._type_hints = get_type_hints(cls)
|
||||
|
||||
def __repr__(self):
|
||||
return f'{type(self)}{dict.__repr__(self.toDict())}'
|
||||
|
||||
|
||||
@munchclass()
|
||||
class SparseProfile(DataClass):
|
||||
parent: Optional[str]
|
||||
device: Optional[str]
|
||||
flavour: Optional[str]
|
||||
pkgs_include: Optional[list[str]]
|
||||
pkgs_exclude: Optional[list[str]]
|
||||
hostname: Optional[str]
|
||||
username: Optional[str]
|
||||
password: Optional[str]
|
||||
size_extra_mb: Optional[Union[str, int]]
|
||||
|
||||
def __repr__(self):
|
||||
return f'{type(self)}{dict.__repr__(self.toDict())}'
|
||||
|
||||
|
||||
@munchclass()
|
||||
class Profile(SparseProfile):
|
||||
parent: Optional[str]
|
||||
device: str
|
||||
flavour: str
|
||||
pkgs_include: list[str]
|
||||
pkgs_exclude: list[str]
|
||||
hostname: str
|
||||
username: str
|
||||
password: Optional[str]
|
||||
size_extra_mb: Union[str, int]
|
||||
|
||||
|
||||
@munchclass()
|
||||
class WrapperSection(DataClass):
|
||||
type: str # NOTE: rename to 'wrapper_type' if this causes problems
|
||||
|
||||
|
||||
@munchclass()
|
||||
class BuildSection(DataClass):
|
||||
ccache: bool
|
||||
clean_mode: bool
|
||||
crosscompile: bool
|
||||
crossdirect: bool
|
||||
threads: int
|
||||
|
||||
|
||||
@munchclass()
|
||||
class PkgbuildsSection(DataClass):
|
||||
git_repo: str
|
||||
git_branch: str
|
||||
|
||||
|
||||
@munchclass()
|
||||
class PacmanSection(DataClass):
|
||||
parallel_downloads: int
|
||||
check_space: bool
|
||||
repo_branch: str
|
||||
|
||||
|
||||
@munchclass()
|
||||
class PathsSection(DataClass):
|
||||
cache_dir: str
|
||||
chroots: str
|
||||
pacman: str
|
||||
packages: str
|
||||
pkgbuilds: str
|
||||
jumpdrive: str
|
||||
images: str
|
||||
|
||||
|
||||
class ProfilesSection(DataClass):
|
||||
current: str
|
||||
default: SparseProfile
|
||||
|
||||
@classmethod
|
||||
def transform(cls, values: Mapping[str, Any], validate: bool = True):
|
||||
results = {}
|
||||
for k, v in values.items():
|
||||
if k == 'current':
|
||||
results[k] = v
|
||||
continue
|
||||
if not isinstance(v, dict):
|
||||
raise Exception(f'profile {v} is not a dict!')
|
||||
results[k] = SparseProfile.fromDict(v, validate=True)
|
||||
return results
|
||||
|
||||
def update(self, d, validate: bool = True):
|
||||
Munch.update(self, self.transform(values=d, validate=validate))
|
||||
|
||||
def __repr__(self):
|
||||
return f'{type(self)}{dict.__repr__(self.toDict())}'
|
||||
|
||||
|
||||
@munchclass()
|
||||
class Config(DataClass):
|
||||
wrapper: WrapperSection
|
||||
build: BuildSection
|
||||
pkgbuilds: PkgbuildsSection
|
||||
pacman: PacmanSection
|
||||
paths: PathsSection
|
||||
profiles: ProfilesSection
|
||||
|
||||
@classmethod
|
||||
def fromDict(cls, values: Mapping[str, Any], validate: bool = True, allow_incomplete: bool = False):
|
||||
values = dict(values) # copy for later modification
|
||||
_vals = {}
|
||||
for name, _class in cls._type_hints.items():
|
||||
if name not in values:
|
||||
if not allow_incomplete:
|
||||
raise Exception(f'Config key "{name}" not in input dictionary')
|
||||
continue
|
||||
value = values.pop(name)
|
||||
if not isinstance(value, _class):
|
||||
value = _class.fromDict(value, validate=validate)
|
||||
_vals[name] = value
|
||||
|
||||
if values:
|
||||
if validate:
|
||||
raise Exception(f'values contained unknown keys: {list(values.keys())}')
|
||||
_vals |= values
|
||||
|
||||
return Config(**_vals, validate=validate)
|
||||
|
||||
|
||||
@munchclass()
|
||||
class RuntimeConfiguration(DataClass):
|
||||
verbose: bool
|
||||
config_file: Optional[str]
|
||||
arch: Optional[Arch]
|
||||
no_wrap: bool
|
||||
script_source_dir: str
|
||||
error_shell: bool
|
||||
|
||||
|
||||
class ConfigLoadState(DataClass):
|
||||
load_finished: bool
|
||||
exception: Optional[Exception]
|
||||
|
||||
def __init__(self, d: dict = {}):
|
||||
self.load_finished = False
|
||||
self.exception = None
|
||||
self.update(d)
|
||||
302
config/state.py
Normal file
302
config/state.py
Normal file
@@ -0,0 +1,302 @@
|
||||
import appdirs
|
||||
import logging
|
||||
import os
|
||||
import toml
|
||||
from copy import deepcopy
|
||||
from typing import Mapping, Optional
|
||||
|
||||
from constants import DEFAULT_PACKAGE_BRANCH
|
||||
|
||||
from .scheme import Config, ConfigLoadState, DataClass, Profile, RuntimeConfiguration
|
||||
from .profile import PROFILE_DEFAULTS, PROFILE_DEFAULTS_DICT, resolve_profile
|
||||
|
||||
CONFIG_DIR = appdirs.user_config_dir('kupfer')
|
||||
CACHE_DIR = appdirs.user_cache_dir('kupfer')
|
||||
CONFIG_DEFAULT_PATH = os.path.join(CONFIG_DIR, 'kupferbootstrap.toml')
|
||||
|
||||
CONFIG_DEFAULTS_DICT = {
|
||||
'wrapper': {
|
||||
'type': 'docker',
|
||||
},
|
||||
'build': {
|
||||
'ccache': True,
|
||||
'clean_mode': True,
|
||||
'crosscompile': True,
|
||||
'crossdirect': True,
|
||||
'threads': 0,
|
||||
},
|
||||
'pkgbuilds': {
|
||||
'git_repo': 'https://gitlab.com/kupfer/packages/pkgbuilds.git',
|
||||
'git_branch': DEFAULT_PACKAGE_BRANCH,
|
||||
},
|
||||
'pacman': {
|
||||
'parallel_downloads': 4,
|
||||
'check_space': False, # TODO: investigate why True causes issues
|
||||
'repo_branch': DEFAULT_PACKAGE_BRANCH,
|
||||
},
|
||||
'paths': {
|
||||
'cache_dir': CACHE_DIR,
|
||||
'chroots': os.path.join('%cache_dir%', 'chroots'),
|
||||
'pacman': os.path.join('%cache_dir%', 'pacman'),
|
||||
'packages': os.path.join('%cache_dir%', 'packages'),
|
||||
'pkgbuilds': os.path.join('%cache_dir%', 'pkgbuilds'),
|
||||
'jumpdrive': os.path.join('%cache_dir%', 'jumpdrive'),
|
||||
'images': os.path.join('%cache_dir%', 'images'),
|
||||
},
|
||||
'profiles': {
|
||||
'current': 'default',
|
||||
'default': deepcopy(PROFILE_DEFAULTS_DICT),
|
||||
},
|
||||
}
|
||||
CONFIG_DEFAULTS: Config = Config.fromDict(CONFIG_DEFAULTS_DICT)
|
||||
CONFIG_SECTIONS = list(CONFIG_DEFAULTS.keys())
|
||||
|
||||
CONFIG_RUNTIME_DEFAULTS: RuntimeConfiguration = RuntimeConfiguration.fromDict({
|
||||
'verbose': False,
|
||||
'config_file': None,
|
||||
'arch': None,
|
||||
'no_wrap': False,
|
||||
'script_source_dir': os.path.dirname(os.path.dirname(os.path.realpath(__file__))),
|
||||
'error_shell': False,
|
||||
})
|
||||
|
||||
|
||||
def resolve_path_template(path_template: str, paths: dict[str, str]) -> str:
|
||||
terminator = '%' # i'll be back
|
||||
result = path_template
|
||||
for path_name, path in paths.items():
|
||||
result = result.replace(terminator + path_name + terminator, path)
|
||||
return result
|
||||
|
||||
|
||||
def sanitize_config(conf: dict[str, dict], warn_missing_defaultprofile=True) -> dict[str, dict]:
|
||||
"""checks the input config dict for unknown keys and returns only the known parts"""
|
||||
return merge_configs(conf_new=conf, conf_base={}, warn_missing_defaultprofile=warn_missing_defaultprofile)
|
||||
|
||||
|
||||
def merge_configs(conf_new: Mapping[str, dict], conf_base={}, warn_missing_defaultprofile=True) -> dict[str, dict]:
|
||||
"""
|
||||
Returns `conf_new` semantically merged into `conf_base`, after validating
|
||||
`conf_new` keys against `CONFIG_DEFAULTS` and `PROFILE_DEFAULTS`.
|
||||
Pass `conf_base={}` to get a sanitized version of `conf_new`.
|
||||
NOTE: `conf_base` is NOT checked for invalid keys. Sanitize beforehand.
|
||||
"""
|
||||
parsed = deepcopy(conf_base)
|
||||
|
||||
for outer_name, outer_conf in deepcopy(conf_new).items():
|
||||
# only handle known config sections
|
||||
if outer_name not in CONFIG_SECTIONS:
|
||||
logging.warning(f'Skipped unknown config section "{outer_name}"')
|
||||
continue
|
||||
logging.debug(f'Parsing config section "{outer_name}"')
|
||||
# check if outer_conf is a dict
|
||||
if not (isinstance(outer_conf, (dict, DataClass))):
|
||||
parsed[outer_name] = outer_conf
|
||||
else:
|
||||
# init section
|
||||
if outer_name not in parsed:
|
||||
parsed[outer_name] = {}
|
||||
|
||||
# profiles need special handling:
|
||||
# 1. profile names are unknown keys by definition, but we want 'default' to exist
|
||||
# 2. A profile's subkeys must be compared against PROFILE_DEFAULTS.keys()
|
||||
if outer_name == 'profiles':
|
||||
if warn_missing_defaultprofile and 'default' not in outer_conf.keys():
|
||||
logging.warning('Default profile is not defined in config file')
|
||||
|
||||
update = dict[str, dict]()
|
||||
for profile_name, profile_conf in outer_conf.items():
|
||||
if not isinstance(profile_conf, (dict, Profile)):
|
||||
if profile_name == 'current':
|
||||
parsed[outer_name][profile_name] = profile_conf
|
||||
else:
|
||||
logging.warning('Skipped key "{profile_name}" in profile section: only subsections and "current" allowed')
|
||||
continue
|
||||
|
||||
# init profile
|
||||
if profile_name in parsed[outer_name]:
|
||||
profile = parsed[outer_name][profile_name]
|
||||
else:
|
||||
profile = {}
|
||||
|
||||
for key, val in profile_conf.items():
|
||||
if key not in PROFILE_DEFAULTS:
|
||||
logging.warning(f'Skipped unknown config item "{key}" in profile "{profile_name}"')
|
||||
continue
|
||||
profile[key] = val
|
||||
update |= {profile_name: profile}
|
||||
parsed[outer_name].update(update)
|
||||
|
||||
else:
|
||||
# handle generic inner config dict
|
||||
for inner_name, inner_conf in outer_conf.items():
|
||||
if inner_name not in CONFIG_DEFAULTS[outer_name].keys():
|
||||
logging.warning(f'Skipped unknown config item "{inner_name}" in "{outer_name}"')
|
||||
continue
|
||||
parsed[outer_name][inner_name] = inner_conf
|
||||
|
||||
return parsed
|
||||
|
||||
|
||||
def dump_toml(conf) -> str:
|
||||
return toml.dumps(conf)
|
||||
|
||||
|
||||
def dump_file(file_path: str, config: dict, file_mode: int = 0o600):
|
||||
|
||||
def _opener(path, flags):
|
||||
return os.open(path, flags, file_mode)
|
||||
|
||||
conf_dir = os.path.dirname(file_path)
|
||||
if not os.path.exists(conf_dir):
|
||||
os.makedirs(conf_dir)
|
||||
old_umask = os.umask(0)
|
||||
with open(file_path, 'w', opener=_opener) as f:
|
||||
f.write(dump_toml(conf=config))
|
||||
os.umask(old_umask)
|
||||
|
||||
|
||||
def parse_file(config_file: str, base: dict = CONFIG_DEFAULTS) -> dict:
|
||||
"""
|
||||
Parse the toml contents of `config_file`, validating keys against `CONFIG_DEFAULTS`.
|
||||
The parsed results are semantically merged into `base` before returning.
|
||||
`base` itself is NOT checked for invalid keys.
|
||||
"""
|
||||
_conf_file = config_file if config_file is not None else CONFIG_DEFAULT_PATH
|
||||
logging.debug(f'Trying to load config file: {_conf_file}')
|
||||
loaded_conf = toml.load(_conf_file)
|
||||
return merge_configs(conf_new=loaded_conf, conf_base=base)
|
||||
|
||||
|
||||
class ConfigLoadException(Exception):
|
||||
inner = None
|
||||
|
||||
def __init__(self, extra_msg='', inner_exception: Exception = None):
|
||||
msg: list[str] = ['Config load failed!']
|
||||
if extra_msg:
|
||||
msg.append(extra_msg)
|
||||
if inner_exception:
|
||||
self.inner = inner_exception
|
||||
msg.append(str(inner_exception))
|
||||
super().__init__(self, ' '.join(msg))
|
||||
|
||||
|
||||
class ConfigStateHolder:
|
||||
# config options that are persisted to file
|
||||
file: Config
|
||||
# runtime config not persisted anywhere
|
||||
runtime: RuntimeConfiguration
|
||||
file_state: ConfigLoadState
|
||||
_profile_cache: dict[str, Profile]
|
||||
|
||||
def __init__(self, file_conf_path: Optional[str] = None, runtime_conf={}, file_conf_base: dict = {}):
|
||||
"""init a stateholder, optionally loading `file_conf_path`"""
|
||||
self.file = Config.fromDict(merge_configs(conf_new=file_conf_base, conf_base=CONFIG_DEFAULTS))
|
||||
self.file_state = ConfigLoadState()
|
||||
self.runtime = RuntimeConfiguration.fromDict(CONFIG_RUNTIME_DEFAULTS | runtime_conf)
|
||||
self.runtime['arch'] = os.uname().machine
|
||||
self._profile_cache = {}
|
||||
if file_conf_path:
|
||||
self.try_load_file(file_conf_path)
|
||||
|
||||
def try_load_file(self, config_file=None, base=CONFIG_DEFAULTS):
|
||||
config_file = config_file or CONFIG_DEFAULT_PATH
|
||||
self.runtime['config_file'] = config_file
|
||||
self._profile_cache = None
|
||||
try:
|
||||
self.file = parse_file(config_file=config_file, base=base)
|
||||
except Exception as ex:
|
||||
self.file_state.exception = ex
|
||||
self.file_state.load_finished = True
|
||||
|
||||
def is_loaded(self) -> bool:
|
||||
"returns True if a file was **sucessfully** loaded"
|
||||
return self.file_state.load_finished and self.file_state.exception is None
|
||||
|
||||
def enforce_config_loaded(self):
|
||||
if not self.file_state.load_finished:
|
||||
m = "Config file wasn't even parsed yet. This is probably a bug in kupferbootstrap :O"
|
||||
raise ConfigLoadException(Exception(m))
|
||||
ex = self.file_state.exception
|
||||
if ex:
|
||||
if type(ex) == FileNotFoundError:
|
||||
ex = Exception("Config file doesn't exist. Try running `kupferbootstrap config init` first?")
|
||||
raise ex
|
||||
|
||||
def get_profile(self, name: Optional[str] = None) -> Profile:
|
||||
name = name or self.file['profiles']['current']
|
||||
self._profile_cache = resolve_profile(name=name, sparse_profiles=self.file['profiles'], resolved=self._profile_cache)
|
||||
return self._profile_cache[name]
|
||||
|
||||
def enforce_profile_device_set(self, profile_name: Optional[str] = None, hint_or_set_arch: bool = False) -> Profile:
|
||||
arch_hint = ''
|
||||
if not hint_or_set_arch:
|
||||
self.enforce_config_loaded()
|
||||
else:
|
||||
arch_hint = (' or specifiy the target architecture by passing `--arch` to the current command,\n'
|
||||
'e.g. `kupferbootstrap packages build --arch x86_64`')
|
||||
if not self.is_loaded():
|
||||
if not self.file_state.exception:
|
||||
raise Exception('Error enforcing/ config profile device: config hadnt even been loaded yet.\n'
|
||||
'This is a bug in kupferbootstrap!')
|
||||
raise Exception("Profile device couldn't be resolved because the config file couldn't be loaded.\n"
|
||||
"If the config doesn't exist, try running `kupferbootstrap config init`.\n"
|
||||
f"Error: {self.file_state.exception}")
|
||||
if profile_name and profile_name not in self.file.profiles:
|
||||
raise Exception(f'Unknown profile "{profile_name}". Please run `kupferbootstrap config profile init`{arch_hint}')
|
||||
profile = self.get_profile(profile_name)
|
||||
if not profile.device:
|
||||
m = (f'Profile "{profile_name}" has no device configured.\n'
|
||||
f'Please run `kupferbootstrap config profile init device`{arch_hint}')
|
||||
raise Exception(m)
|
||||
return profile
|
||||
|
||||
def get_path(self, path_name: str) -> str:
|
||||
paths = self.file['paths']
|
||||
return resolve_path_template(paths[path_name], paths)
|
||||
|
||||
def get_package_dir(self, arch: str):
|
||||
return os.path.join(self.get_path('packages'), arch)
|
||||
|
||||
def dump(self) -> str:
|
||||
"""dump toml representation of `self.file`"""
|
||||
return dump_toml(self.file)
|
||||
|
||||
def write(self, path=None):
|
||||
"""write toml representation of `self.file` to `path`"""
|
||||
if path is None:
|
||||
path = self.runtime['config_file']
|
||||
os.makedirs(os.path.dirname(path), exist_ok=True)
|
||||
dump_file(path, self.file)
|
||||
logging.info(f'Created config file at {path}')
|
||||
|
||||
def invalidate_profile_cache(self):
|
||||
"""Clear the profile cache (usually after modification)"""
|
||||
self._profile_cache = None
|
||||
|
||||
def update(self, config_fragment: dict[str, dict], warn_missing_defaultprofile: bool = True) -> bool:
|
||||
"""Update `self.file` with `config_fragment`. Returns `True` if the config was changed"""
|
||||
merged = merge_configs(config_fragment, conf_base=self.file, warn_missing_defaultprofile=warn_missing_defaultprofile)
|
||||
changed = self.file != merged
|
||||
self.file.update(merged)
|
||||
if changed and 'profiles' in config_fragment and self.file['profiles'] != config_fragment['profiles']:
|
||||
self.invalidate_profile_cache()
|
||||
return changed
|
||||
|
||||
def update_profile(self, name: str, profile: Profile, merge: bool = False, create: bool = True, prune: bool = True):
|
||||
new = {}
|
||||
if name not in self.file['profiles']:
|
||||
if not create:
|
||||
raise Exception(f'Unknown profile: {name}')
|
||||
else:
|
||||
if merge:
|
||||
new = deepcopy(self.file['profiles'][name])
|
||||
|
||||
logging.debug(f'new: {new}')
|
||||
logging.debug(f'profile: {profile}')
|
||||
new |= profile
|
||||
|
||||
if prune:
|
||||
new = {key: val for key, val in new.items() if val is not None}
|
||||
self.file['profiles'][name] = new
|
||||
self.invalidate_profile_cache()
|
||||
216
config/test_config.py
Normal file
216
config/test_config.py
Normal file
@@ -0,0 +1,216 @@
|
||||
import pytest
|
||||
|
||||
import os
|
||||
import pickle
|
||||
import toml
|
||||
|
||||
from tempfile import mktemp, gettempdir as get_system_tempdir
|
||||
from typing import Optional
|
||||
|
||||
from config.profile import PROFILE_DEFAULTS
|
||||
from config.scheme import Config, Profile
|
||||
from config.state import CONFIG_DEFAULTS, ConfigStateHolder
|
||||
|
||||
|
||||
def get_filename():
|
||||
return mktemp() + '_pytest.toml'
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def conf_filename():
|
||||
f = get_filename()
|
||||
yield f
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def empty_config():
|
||||
f = get_filename()
|
||||
with open(f, 'w') as fd:
|
||||
fd.write('')
|
||||
yield f
|
||||
os.unlink(f)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def configstate_nonexistant(conf_filename):
|
||||
return ConfigStateHolder(conf_filename)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def configstate_emptyfile(empty_config):
|
||||
return ConfigStateHolder(empty_config)
|
||||
|
||||
|
||||
def validate_ConfigStateHolder(c: ConfigStateHolder, should_load: Optional[bool] = None):
|
||||
assert isinstance(c, ConfigStateHolder)
|
||||
if should_load is not None:
|
||||
assert c.file_state.load_finished is True
|
||||
assert c.is_loaded() == should_load
|
||||
assert c.file
|
||||
|
||||
|
||||
@pytest.mark.parametrize('conf_fixture,exists', [('configstate_emptyfile', True), ('configstate_nonexistant', False)])
|
||||
def test_fixture_configstate(conf_fixture: str, exists: bool, request):
|
||||
configstate = request.getfixturevalue(conf_fixture)
|
||||
assert 'config_file' in configstate.runtime
|
||||
confpath = configstate.runtime['config_file']
|
||||
assert isinstance(confpath, str)
|
||||
assert confpath
|
||||
assert exists == os.path.exists(confpath)
|
||||
assert confpath.startswith(get_system_tempdir())
|
||||
|
||||
|
||||
def test_config_load_emptyfile(configstate_emptyfile):
|
||||
validate_ConfigStateHolder(configstate_emptyfile, should_load=True)
|
||||
|
||||
|
||||
def test_config_load_nonexistant(configstate_nonexistant):
|
||||
validate_ConfigStateHolder(configstate_nonexistant, should_load=False)
|
||||
|
||||
|
||||
@pytest.mark.parametrize('path_fixture,should_load', [('conf_filename', False), ('empty_config', True)])
|
||||
def test_loadstate_is_loaded(path_fixture: str, should_load: bool, request: pytest.FixtureRequest):
|
||||
path = request.getfixturevalue(path_fixture)
|
||||
assert os.path.exists(path) == should_load
|
||||
c = ConfigStateHolder(path)
|
||||
validate_ConfigStateHolder(c, should_load)
|
||||
assert c.file_state.load_finished is True
|
||||
assert (c.file_state.exception is None) == should_load
|
||||
assert c.is_loaded() == should_load
|
||||
|
||||
|
||||
@pytest.mark.parametrize('conf_fixture', ['configstate_emptyfile', 'configstate_nonexistant'])
|
||||
def test_config_fills_defaults(conf_fixture: str, request):
|
||||
c = request.getfixturevalue(conf_fixture)
|
||||
assert c.file == CONFIG_DEFAULTS
|
||||
|
||||
|
||||
def dict_filter_out_None(d: dict):
|
||||
return {k: v for k, v in d.items() if v is not None}
|
||||
|
||||
|
||||
def compare_to_defaults(config: dict, defaults: dict = CONFIG_DEFAULTS, filter_None_from_defaults: Optional[bool] = None):
|
||||
if filter_None_from_defaults is None:
|
||||
filter_None_from_defaults = not isinstance(config, Config)
|
||||
# assert sections match
|
||||
assert config.keys() == defaults.keys()
|
||||
for section, section_defaults in defaults.items():
|
||||
assert section in config
|
||||
assert isinstance(section_defaults, dict)
|
||||
# Filter out None values from defaults - they're not written unless set
|
||||
if filter_None_from_defaults:
|
||||
section_defaults = dict_filter_out_None(section_defaults)
|
||||
section_values_config = config[section]
|
||||
if section != 'profiles':
|
||||
assert section_values_config == section_defaults
|
||||
else:
|
||||
CURRENT_KEY = 'current'
|
||||
assert CURRENT_KEY in section_defaults.keys()
|
||||
assert section_defaults.keys() == section_values_config.keys()
|
||||
assert section_defaults[CURRENT_KEY] == section_values_config[CURRENT_KEY]
|
||||
for profile_name, profile in section_defaults.items():
|
||||
if profile_name == CURRENT_KEY:
|
||||
continue # not a profile
|
||||
if filter_None_from_defaults:
|
||||
profile = dict_filter_out_None(profile)
|
||||
assert profile == section_values_config[profile_name]
|
||||
|
||||
|
||||
def load_toml_file(path) -> dict:
|
||||
with open(path, 'r') as f:
|
||||
text = f.read()
|
||||
assert text
|
||||
return toml.loads(text)
|
||||
|
||||
|
||||
def get_path_from_stateholder(c: ConfigStateHolder):
|
||||
return c.runtime['config_file']
|
||||
|
||||
|
||||
def test_config_save_nonexistant(configstate_nonexistant: ConfigStateHolder):
|
||||
c = configstate_nonexistant
|
||||
confpath = c.runtime['config_file']
|
||||
assert not os.path.exists(confpath)
|
||||
c.write()
|
||||
assert confpath
|
||||
assert os.path.exists(confpath)
|
||||
loaded = load_toml_file(confpath)
|
||||
assert loaded
|
||||
# sadly we can't just assert `loaded == CONFIG_DEFAULTS` due to `None` values
|
||||
compare_to_defaults(loaded)
|
||||
|
||||
|
||||
def test_config_save_modified(configstate_emptyfile: ConfigStateHolder):
|
||||
c = configstate_emptyfile
|
||||
WRAPPER_KEY = 'wrapper'
|
||||
TYPE_KEY = 'type'
|
||||
assert WRAPPER_KEY in c.file
|
||||
assert TYPE_KEY in c.file[WRAPPER_KEY]
|
||||
wrapper_section = CONFIG_DEFAULTS[WRAPPER_KEY] | {TYPE_KEY: 'none'}
|
||||
c.file[WRAPPER_KEY] |= wrapper_section
|
||||
c.write()
|
||||
defaults_modified = CONFIG_DEFAULTS | {WRAPPER_KEY: wrapper_section}
|
||||
compare_to_defaults(load_toml_file(get_path_from_stateholder(c)), defaults_modified)
|
||||
|
||||
|
||||
def test_config_scheme_defaults():
|
||||
c = Config.fromDict(CONFIG_DEFAULTS, validate=True, allow_incomplete=False)
|
||||
assert c
|
||||
compare_to_defaults(c)
|
||||
|
||||
|
||||
def test_config_scheme_modified():
|
||||
modifications = {'wrapper': {'type': 'none'}, 'build': {'crossdirect': False}}
|
||||
assert set(modifications.keys()).issubset(CONFIG_DEFAULTS.keys())
|
||||
d = {section_name: (section | modifications.get(section_name, {})) for section_name, section in CONFIG_DEFAULTS.items()}
|
||||
c = Config.fromDict(d, validate=True, allow_incomplete=False)
|
||||
assert c
|
||||
assert c.build.crossdirect is False
|
||||
assert c.wrapper.type == 'none'
|
||||
|
||||
|
||||
def test_configstate_profile_pickle():
|
||||
c = ConfigStateHolder()
|
||||
assert c.file.wrapper
|
||||
assert c.file.profiles
|
||||
# add new profile to check it doesn't error out due to unknown keys
|
||||
c.file.profiles['graphical'] = {'username': 'kupfer123', 'hostname': 'test123'}
|
||||
p = pickle.dumps(c)
|
||||
unpickled = pickle.loads(p)
|
||||
assert c.file == unpickled.file
|
||||
|
||||
|
||||
def test_profile():
|
||||
p = None
|
||||
p = Profile.fromDict(PROFILE_DEFAULTS)
|
||||
assert p is not None
|
||||
assert isinstance(p, Profile)
|
||||
|
||||
|
||||
def test_get_profile():
|
||||
c = ConfigStateHolder()
|
||||
d = {'username': 'kupfer123', 'hostname': 'test123'}
|
||||
c.file.profiles['testprofile'] = d
|
||||
p = c.get_profile('testprofile')
|
||||
assert p
|
||||
assert isinstance(p, Profile)
|
||||
|
||||
|
||||
def test_get_profile_from_disk(configstate_emptyfile):
|
||||
profile_name = 'testprofile'
|
||||
device = 'sdm845-oneplus-enchilada'
|
||||
c = configstate_emptyfile
|
||||
c.file.profiles.default.device = device
|
||||
d = {'parent': 'default', 'username': 'kupfer123', 'hostname': 'test123'}
|
||||
c.file.profiles[profile_name] = d
|
||||
filepath = c.runtime.config_file
|
||||
assert filepath
|
||||
c.write()
|
||||
del c
|
||||
c = ConfigStateHolder(filepath)
|
||||
c.try_load_file(filepath)
|
||||
c.enforce_config_loaded()
|
||||
p: Profile = c.get_profile(profile_name)
|
||||
assert isinstance(p, Profile)
|
||||
assert 'device' in p
|
||||
assert p.device == device
|
||||
55
constants.py
55
constants.py
@@ -17,6 +17,7 @@ JUMPDRIVE_VERSION = '0.8'
|
||||
|
||||
BOOT_STRATEGIES: dict[str, str] = {
|
||||
'oneplus-enchilada': FASTBOOT,
|
||||
'oneplus-fajita': FASTBOOT,
|
||||
'xiaomi-beryllium-ebbg': FASTBOOT,
|
||||
'xiaomi-beryllium-tianma': FASTBOOT,
|
||||
'bq-paella': FASTBOOT,
|
||||
@@ -24,6 +25,7 @@ BOOT_STRATEGIES: dict[str, str] = {
|
||||
|
||||
DEVICES: dict[str, list[str]] = {
|
||||
'oneplus-enchilada': ['device-sdm845-oneplus-enchilada'],
|
||||
'oneplus-fajita': ['device-sdm845-oneplus-fajita'],
|
||||
'xiaomi-beryllium-ebbg': ['device-sdm845-xiaomi-beryllium-ebbg'],
|
||||
'xiaomi-beryllium-tianma': ['device-sdm845-xiaomi-beryllium-tianma'],
|
||||
'bq-paella': ['device-msm8916-bq-paella'],
|
||||
@@ -89,11 +91,20 @@ Arch: TypeAlias = str
|
||||
ARCHES = [
|
||||
'x86_64',
|
||||
'aarch64',
|
||||
'armv7h',
|
||||
]
|
||||
|
||||
DistroArch: TypeAlias = Arch
|
||||
TargetArch: TypeAlias = Arch
|
||||
|
||||
ALARM_REPOS = {
|
||||
'core': 'http://mirror.archlinuxarm.org/$arch/$repo',
|
||||
'extra': 'http://mirror.archlinuxarm.org/$arch/$repo',
|
||||
'community': 'http://mirror.archlinuxarm.org/$arch/$repo',
|
||||
'alarm': 'http://mirror.archlinuxarm.org/$arch/$repo',
|
||||
'aur': 'http://mirror.archlinuxarm.org/$arch/$repo',
|
||||
}
|
||||
|
||||
BASE_DISTROS: dict[DistroArch, dict[str, dict[str, str]]] = {
|
||||
'x86_64': {
|
||||
'repos': {
|
||||
@@ -103,42 +114,58 @@ BASE_DISTROS: dict[DistroArch, dict[str, dict[str, str]]] = {
|
||||
},
|
||||
},
|
||||
'aarch64': {
|
||||
'repos': {
|
||||
'core': 'http://mirror.archlinuxarm.org/$arch/$repo',
|
||||
'extra': 'http://mirror.archlinuxarm.org/$arch/$repo',
|
||||
'community': 'http://mirror.archlinuxarm.org/$arch/$repo',
|
||||
'alarm': 'http://mirror.archlinuxarm.org/$arch/$repo',
|
||||
'aur': 'http://mirror.archlinuxarm.org/$arch/$repo',
|
||||
},
|
||||
'repos': ALARM_REPOS,
|
||||
},
|
||||
'armv7h': {
|
||||
'repos': ALARM_REPOS,
|
||||
},
|
||||
}
|
||||
|
||||
COMPILE_ARCHES: dict[Arch, str] = {
|
||||
'x86_64': 'amd64',
|
||||
'aarch64': 'arm64',
|
||||
'armv7h': 'arm',
|
||||
}
|
||||
|
||||
GCC_HOSTSPECS: dict[DistroArch, dict[TargetArch, str]] = {
|
||||
'x86_64': {
|
||||
'x86_64': 'x86_64-pc-linux-gnu',
|
||||
'aarch64': 'aarch64-linux-gnu',
|
||||
'armv7h': 'arm-unknown-linux-gnueabihf'
|
||||
},
|
||||
'aarch64': {
|
||||
'aarch64': 'aarch64-unknown-linux-gnu',
|
||||
}
|
||||
},
|
||||
'armv7h': {
|
||||
'armv7h': 'armv7l-unknown-linux-gnueabihf'
|
||||
},
|
||||
}
|
||||
|
||||
CFLAGS_GENERAL = ['-O2', '-pipe', '-fstack-protector-strong']
|
||||
CFLAGS_ALARM = [
|
||||
' -fno-plt',
|
||||
'-fexceptions',
|
||||
'-Wp,-D_FORTIFY_SOURCE=2',
|
||||
'-Wformat',
|
||||
'-Werror=format-security',
|
||||
'-fstack-clash-protection',
|
||||
]
|
||||
CFLAGS_ARCHES: dict[Arch, list[str]] = {
|
||||
'x86_64': ['-march=x86-64', '-mtune=generic'],
|
||||
'aarch64': [
|
||||
'-march=armv8-a',
|
||||
'-fexceptions',
|
||||
'-Wp,-D_FORTIFY_SOURCE=2',
|
||||
'-Wformat',
|
||||
'-Werror=format-security',
|
||||
'-fstack-clash-protection',
|
||||
]
|
||||
] + CFLAGS_ALARM,
|
||||
'armv7h': [
|
||||
'-march=armv7-a',
|
||||
'-mfloat-abi=hard',
|
||||
'-mfpu=neon',
|
||||
] + CFLAGS_ALARM,
|
||||
}
|
||||
|
||||
QEMU_ARCHES: dict[Arch, str] = {
|
||||
'x86_64': 'x86_64',
|
||||
'aarch64': 'aarch64',
|
||||
'armv7h': 'arm',
|
||||
}
|
||||
|
||||
QEMU_BINFMT_PKGS = ['qemu-user-static-bin', 'binfmt-qemu-static']
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
from typing import Optional, Mapping
|
||||
|
||||
from constants import ARCHES, BASE_DISTROS, REPOSITORIES, KUPFER_HTTPS, CHROOT_PATHS
|
||||
from constants import Arch, ARCHES, BASE_DISTROS, REPOSITORIES, KUPFER_HTTPS, CHROOT_PATHS
|
||||
from generator import generate_pacman_conf_body
|
||||
from config import config
|
||||
|
||||
@@ -12,7 +12,7 @@ class Distro:
|
||||
repos: Mapping[str, Repo]
|
||||
arch: str
|
||||
|
||||
def __init__(self, arch: str, repo_infos: dict[str, RepoInfo], scan=False):
|
||||
def __init__(self, arch: Arch, repo_infos: dict[str, RepoInfo], scan=False):
|
||||
assert (arch in ARCHES)
|
||||
self.arch = arch
|
||||
self.repos = dict[str, Repo]()
|
||||
@@ -25,41 +25,71 @@ class Distro:
|
||||
scan=scan,
|
||||
)
|
||||
|
||||
def get_packages(self):
|
||||
def get_packages(self) -> dict[str, PackageInfo]:
|
||||
""" get packages from all repos, semantically overlaying them"""
|
||||
results = dict[str, PackageInfo]()
|
||||
for repo in self.repos.values().reverse():
|
||||
assert (repo.packages is not None)
|
||||
for package in repo.packages:
|
||||
results[package.name] = package
|
||||
for repo in list(self.repos.values())[::-1]:
|
||||
assert repo.packages is not None
|
||||
results.update(repo.packages)
|
||||
return results
|
||||
|
||||
def repos_config_snippet(self, extra_repos: Mapping[str, RepoInfo] = {}) -> str:
|
||||
extras = [Repo(name, url_template=info.url_template, arch=self.arch, options=info.options, scan=False) for name, info in extra_repos.items()]
|
||||
return '\n\n'.join(repo.config_snippet() for repo in (extras + list(self.repos.values())))
|
||||
|
||||
def get_pacman_conf(self, extra_repos: Mapping[str, RepoInfo] = {}, check_space: bool = True):
|
||||
def get_pacman_conf(self, extra_repos: Mapping[str, RepoInfo] = {}, check_space: bool = True, in_chroot: bool = True):
|
||||
body = generate_pacman_conf_body(self.arch, check_space=check_space)
|
||||
return body + self.repos_config_snippet(extra_repos)
|
||||
|
||||
def scan(self, lazy=True):
|
||||
for repo in self.repos.values():
|
||||
if not (lazy and repo.scanned):
|
||||
repo.scan()
|
||||
|
||||
def is_scanned(self):
|
||||
for repo in self.repos.values():
|
||||
if not repo.scanned:
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
def get_base_distro(arch: str) -> Distro:
|
||||
repos = {name: RepoInfo(url_template=url) for name, url in BASE_DISTROS[arch]['repos'].items()}
|
||||
return Distro(arch=arch, repo_infos=repos, scan=False)
|
||||
|
||||
|
||||
def get_kupfer(arch: str, url_template: str) -> Distro:
|
||||
def get_kupfer(arch: str, url_template: str, scan: bool = False) -> Distro:
|
||||
repos = {name: RepoInfo(url_template=url_template, options={'SigLevel': 'Never'}) for name in REPOSITORIES}
|
||||
return Distro(
|
||||
arch=arch,
|
||||
repo_infos=repos,
|
||||
scan=scan,
|
||||
)
|
||||
|
||||
|
||||
def get_kupfer_https(arch: str) -> Distro:
|
||||
return get_kupfer(arch, KUPFER_HTTPS.replace('%branch%', config.file['pacman']['repo_branch']))
|
||||
_kupfer_https = dict[Arch, Distro]()
|
||||
_kupfer_local = dict[Arch, Distro]()
|
||||
_kupfer_local_chroots = dict[Arch, Distro]()
|
||||
|
||||
|
||||
def get_kupfer_local(arch: Optional[str] = None, in_chroot: bool = True) -> Distro:
|
||||
def get_kupfer_https(arch: Arch, scan: bool = False) -> Distro:
|
||||
global _kupfer_https
|
||||
if arch not in _kupfer_https or not _kupfer_https[arch]:
|
||||
_kupfer_https[arch] = get_kupfer(arch, KUPFER_HTTPS.replace('%branch%', config.file['pacman']['repo_branch']), scan)
|
||||
item = _kupfer_https[arch]
|
||||
if scan and not item.is_scanned():
|
||||
item.scan()
|
||||
return item
|
||||
|
||||
|
||||
def get_kupfer_local(arch: Optional[Arch] = None, in_chroot: bool = True, scan: bool = False) -> Distro:
|
||||
global _kupfer_local, _kupfer_local_chroots
|
||||
cache = _kupfer_local_chroots if in_chroot else _kupfer_local
|
||||
arch = arch or config.runtime['arch']
|
||||
dir = CHROOT_PATHS['packages'] if in_chroot else config.get_path('packages')
|
||||
return get_kupfer(arch, f"file://{dir}/$arch/$repo")
|
||||
if arch not in cache or not cache[arch]:
|
||||
dir = CHROOT_PATHS['packages'] if in_chroot else config.get_path('packages')
|
||||
cache[arch] = get_kupfer(arch, f"file://{dir}/$arch/$repo")
|
||||
item = cache[arch]
|
||||
if scan and not item.is_scanned():
|
||||
item.scan()
|
||||
return item
|
||||
|
||||
@@ -30,4 +30,4 @@ class PackageInfo:
|
||||
desc = {}
|
||||
for key, value in zip(pruned_lines[0::2], pruned_lines[1::2]):
|
||||
desc[key.strip()] = value.strip()
|
||||
return PackageInfo(desc['NAME'], desc['VERSION'], desc['FILENAME'], resolved_url=resolved_url)
|
||||
return PackageInfo(desc['NAME'], desc['VERSION'], desc['FILENAME'], resolved_url='/'.join([resolved_url, desc['FILENAME']]))
|
||||
|
||||
@@ -5,14 +5,12 @@ import tarfile
|
||||
import tempfile
|
||||
import urllib.request
|
||||
|
||||
from config import config
|
||||
|
||||
from .package import PackageInfo
|
||||
|
||||
|
||||
def resolve_url(url_template, repo_name: str, arch: str):
|
||||
result = url_template
|
||||
for template, replacement in {'$repo': repo_name, '$arch': config.runtime['arch']}.items():
|
||||
for template, replacement in {'$repo': repo_name, '$arch': arch}.items():
|
||||
result = result.replace(template, replacement)
|
||||
return result
|
||||
|
||||
@@ -34,13 +32,16 @@ class Repo(RepoInfo):
|
||||
remote: bool
|
||||
scanned: bool = False
|
||||
|
||||
def resolve_url(self) -> str:
|
||||
return resolve_url(self.url_template, repo_name=self.name, arch=self.arch)
|
||||
|
||||
def scan(self):
|
||||
self.resolved_url = resolve_url(self.url_template, repo_name=self.name, arch=self.arch)
|
||||
self.resolved_url = self.resolve_url()
|
||||
self.remote = not self.resolved_url.startswith('file://')
|
||||
uri = f'{self.resolved_url}/{self.name}.db'
|
||||
path = ''
|
||||
if self.remote:
|
||||
logging.debug(f'Downloading repo file from {uri}')
|
||||
logging.info(f'Downloading repo file from {uri}')
|
||||
with urllib.request.urlopen(uri) as request:
|
||||
fd, path = tempfile.mkstemp()
|
||||
with open(fd, 'wb') as writable:
|
||||
@@ -66,6 +67,9 @@ class Repo(RepoInfo):
|
||||
if scan:
|
||||
self.scan()
|
||||
|
||||
def __repr__(self):
|
||||
return f'<Repo:{self.name}:{self.arch}:{self.url_template}>'
|
||||
|
||||
def config_snippet(self) -> str:
|
||||
options = {'Server': self.url_template} | self.options
|
||||
return ('[%s]\n' % self.name) + '\n'.join([f"{key} = {value}" for key, value in options.items()])
|
||||
|
||||
4
docs/.gitignore
vendored
Normal file
4
docs/.gitignore
vendored
Normal file
@@ -0,0 +1,4 @@
|
||||
.buildinfo
|
||||
.doctrees
|
||||
html
|
||||
source/cli
|
||||
16
docs/Makefile
Normal file
16
docs/Makefile
Normal file
@@ -0,0 +1,16 @@
|
||||
buildargs := -b dirhtml -aE source html
|
||||
|
||||
.PHONY: cleanbuild clean
|
||||
|
||||
cleanbuild:
|
||||
@make clean
|
||||
@make html
|
||||
|
||||
clean:
|
||||
rm -rf html source/cli
|
||||
|
||||
html:
|
||||
sphinx-build $(buildargs)
|
||||
|
||||
serve: html
|
||||
(cd html && python -m http.server 9999)
|
||||
3
docs/requirements.txt
Normal file
3
docs/requirements.txt
Normal file
@@ -0,0 +1,3 @@
|
||||
sphinx-click
|
||||
# furo sphinx theme
|
||||
furo
|
||||
17
docs/source/cli.rst
Normal file
17
docs/source/cli.rst
Normal file
@@ -0,0 +1,17 @@
|
||||
#############
|
||||
CLI Interface
|
||||
#############
|
||||
|
||||
.. click:: main:cli
|
||||
:nested: none
|
||||
:prog: kupferbootstrap
|
||||
|
||||
|
||||
Commands
|
||||
========
|
||||
|
||||
.. generated by cmd.rst
|
||||
.. toctree::
|
||||
:glob:
|
||||
|
||||
cli/*
|
||||
21
docs/source/cmd.rst
Normal file
21
docs/source/cmd.rst
Normal file
@@ -0,0 +1,21 @@
|
||||
:orphan:
|
||||
:nosearch:
|
||||
|
||||
only used to trigger builds of the submodule docs!
|
||||
|
||||
.. autosummary::
|
||||
:toctree: cli
|
||||
:template: command.rst
|
||||
:recursive:
|
||||
|
||||
boot
|
||||
cache
|
||||
chroot
|
||||
config
|
||||
flash
|
||||
forwarding
|
||||
image
|
||||
packages
|
||||
ssh
|
||||
telnet
|
||||
|
||||
21
docs/source/conf.py
Normal file
21
docs/source/conf.py
Normal file
@@ -0,0 +1,21 @@
|
||||
import os
|
||||
import sys
|
||||
|
||||
sys.path.insert(0, os.path.abspath('../..'))
|
||||
extensions = [
|
||||
'sphinx_click',
|
||||
'sphinx.ext.autosummary', # Create neat summary tables
|
||||
]
|
||||
templates_path = ['templates']
|
||||
project = 'Kupfer👢strap'
|
||||
html_title = 'Kupferbootstrap'
|
||||
html_theme = 'furo'
|
||||
html_static_path = ['static']
|
||||
html_css_files = ['kupfer_docs.css']
|
||||
html_favicon = 'static/kupfer-white-filled.svg'
|
||||
html_theme_options = {
|
||||
"globaltoc_maxdepth": 5,
|
||||
"globaltoc_collapse": True,
|
||||
"light_logo": "kupfer-black-transparent.svg",
|
||||
"dark_logo": "kupfer-white-transparent.svg",
|
||||
}
|
||||
134
docs/source/config.rst
Normal file
134
docs/source/config.rst
Normal file
@@ -0,0 +1,134 @@
|
||||
#############
|
||||
Configuration
|
||||
#############
|
||||
|
||||
|
||||
Kupferbootstrap uses `toml <https://en.wikipedia.org/wiki/TOML>`_ for its configuration file.
|
||||
|
||||
The file can either be edited manually or managed via the :doc:`cli/config` subcommand.
|
||||
|
||||
You can quickly generate a default config by running :code:`kupferbootstrap config init -N`.
|
||||
|
||||
|
||||
File Location
|
||||
#############
|
||||
|
||||
The configuration is stored in ``~/.config/kupfer/kupferbootstrap.toml``, where ``~`` is your user's home folder.
|
||||
|
||||
Kupferbootstrap needs to create a number of folders, e.g. to download ``PKGBUILDs.git`` and store binary packages.
|
||||
By default, all of those folders live inside ``~/.cache/kupfer/``.
|
||||
|
||||
See also the ``[paths]`` section in your config.
|
||||
|
||||
Sections
|
||||
########
|
||||
|
||||
A config file is split into sections like so:
|
||||
|
||||
.. code-block:: toml
|
||||
|
||||
[pkgbuilds]
|
||||
git_repo = "https://gitlab.com/kupfer/packages/pkgbuilds.git"
|
||||
git_branch = "dev"
|
||||
|
||||
[pacman]
|
||||
parallel_downloads = 3
|
||||
|
||||
|
||||
Here, we have two sections: ``pkgbuilds`` and ``pacman``.
|
||||
|
||||
Flavours
|
||||
########
|
||||
|
||||
Flavours are preset collections of software and functionality to enable,
|
||||
i.e. desktop environments like `Gnome <https://en.wikipedia.org/wiki/GNOME>`_
|
||||
and `Phosh <https://en.wikipedia.org/wiki/Phosh>`_.
|
||||
|
||||
|
||||
Profiles
|
||||
########
|
||||
|
||||
The last section and currently the only one with subsections is the ``profiles`` section.
|
||||
|
||||
A profile is the configuration of a specific device image. It specifies (amongst others):
|
||||
|
||||
* the device model
|
||||
* the flavour (desktop environment)
|
||||
* the host- and user name
|
||||
* extra packages to install
|
||||
|
||||
Using a profile's ``parent`` key,
|
||||
you can inherit settings from another profile.
|
||||
|
||||
This allows you to easily keep a number of slight variations of the same target profile around
|
||||
without the need to constantly modify your Kupferbootstrap configuration file.
|
||||
|
||||
You can easily create new profiles with
|
||||
`kupferbootstrap config profile init <../cli/config/#kupferbootstrap-config-profile-init>`_.
|
||||
|
||||
Here's an example:
|
||||
|
||||
.. code:: toml
|
||||
|
||||
[profiles]
|
||||
current = "graphical"
|
||||
|
||||
[profiles.default]
|
||||
parent = ""
|
||||
device = "oneplus-enchilada"
|
||||
flavour = "barebone"
|
||||
pkgs_include = [ "wget", "rsync", "nano", "tmux", "zsh", "pv", ]
|
||||
pkgs_exclude = []
|
||||
hostname = "kupferphone"
|
||||
username = "prawn"
|
||||
size_extra_mb = 800
|
||||
|
||||
[profiles.graphical]
|
||||
parent = "default"
|
||||
flavour = "phosh"
|
||||
pkgs_include = [ "firefox", "tilix", "gnome-tweaks" ]
|
||||
size_extra_mb = "+3000"
|
||||
|
||||
[profiles.hades]
|
||||
parent = "graphical"
|
||||
flavour = "phosh"
|
||||
hostname = "hades"
|
||||
|
||||
[profiles.recovery]
|
||||
parent = "default"
|
||||
flavour = "debug-shell"
|
||||
|
||||
[profiles.beryllium]
|
||||
parent = "graphical"
|
||||
device = "xiaomi-beryllium-ebbg"
|
||||
flavour = "gnome"
|
||||
hostname = "pocof1"
|
||||
|
||||
|
||||
|
||||
The ``current`` key in the ``profiles`` section controlls which profile gets used by Kupferbootstrap by default.
|
||||
|
||||
The first subsection (``profiles.default``) describes the `default` profile
|
||||
which gets created by `config init <../cli/config/#kupferbootstrap-config-init>`_.
|
||||
|
||||
Next, we have a `graphical` profile that defines a couple of graphical programs for all but the `recovery` profile,
|
||||
since that doesn't have a GUI.
|
||||
|
||||
``size_extra_mb``
|
||||
-----------------
|
||||
|
||||
Note how ``size_extra_mb`` can either be a plain integer (``800``) or a string,
|
||||
optionally leading with a plus sign (``+3000``),
|
||||
which instructs Kupferbootstrap to add the value to the parent profile's ``size_extra_mb``.
|
||||
|
||||
``pkgs_include`` / ``pkgs_exclude``
|
||||
-----------------------------------
|
||||
|
||||
Like ``size_extra_mb``, ``pkgs_include`` will be merged with the parent profile's ``pkgs_include``.
|
||||
|
||||
To exclude unwanted packages from being inherited from a parent profile, use ``pkgs_exclude`` in the child profile.
|
||||
|
||||
.. hint::
|
||||
``pkgs_exclude`` has no influence on Pacman's dependency resolution.
|
||||
It only blocks packages during image build that would usually be explicitly installed
|
||||
due to being listed in a parent profile or the selected flavour.
|
||||
16
docs/source/index.rst
Normal file
16
docs/source/index.rst
Normal file
@@ -0,0 +1,16 @@
|
||||
#############################
|
||||
Kupferbootstrap Documentation
|
||||
#############################
|
||||
|
||||
This is the documentation for `Kupferbootstrap <https://gitlab.com/kupfer/kupferbootstrap>`_,
|
||||
a tool to build and flash packages and images for the `Kupfer <https://gitlab.com/kupfer/>`_ mobile Linux distro.
|
||||
|
||||
|
||||
Documentation pages
|
||||
===================
|
||||
|
||||
.. toctree::
|
||||
|
||||
install
|
||||
config
|
||||
cli
|
||||
35
docs/source/install.rst
Normal file
35
docs/source/install.rst
Normal file
@@ -0,0 +1,35 @@
|
||||
############
|
||||
Installation
|
||||
############
|
||||
|
||||
|
||||
#.
|
||||
Install Python 3, Docker, and git.
|
||||
|
||||
On Arch: ``pacman -S python docker git --needed --noconfirm``
|
||||
|
||||
.. Hint::
|
||||
After installing Docker you will have to add your user to the ``docker`` group:
|
||||
|
||||
``sudo usermod -aG docker "$(whoami)"``
|
||||
|
||||
Then restart your desktop session for the new group to take effect.
|
||||
|
||||
#. Pick which Kupferbootstrap branch to clone: usually either ``main`` or ``dev``
|
||||
|
||||
#. Clone the repository: ``git clone -b INSERT_BRANCHNAME_HERE https://gitlab.com/kupfer/kupferbootstrap``
|
||||
|
||||
#. Change into the folder: ``cd kupferbootstrap``
|
||||
|
||||
#.
|
||||
Install python dependencies: ``pip3 install -r requirements.txt``
|
||||
|
||||
.. Note::
|
||||
Most of our python dependencies are available as distro packages on most distros,
|
||||
sadly it's incomplete on Arch.
|
||||
|
||||
See ``requirements.txt`` for the list of required python packages.
|
||||
|
||||
#. Symlink ``kupferbootstrap`` into your ``$PATH``: ``sudo ln -s "$(pwd)/bin/kupferbootstrap" /usr/local/bin/``
|
||||
|
||||
#. You should now be able to run ``kupferbootstrap --help``!
|
||||
90
docs/source/static/kupfer-black-transparent.svg
Normal file
90
docs/source/static/kupfer-black-transparent.svg
Normal file
@@ -0,0 +1,90 @@
|
||||
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
|
||||
<!-- Created with Inkscape (http://www.inkscape.org/) -->
|
||||
|
||||
<svg
|
||||
width="190"
|
||||
height="190"
|
||||
viewBox="0 0 190 190"
|
||||
version="1.1"
|
||||
id="svg5"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink"
|
||||
xmlns="http://www.w3.org/2000/svg"
|
||||
xmlns:svg="http://www.w3.org/2000/svg">
|
||||
<defs
|
||||
id="defs2">
|
||||
<linearGradient
|
||||
id="linearGradient2922">
|
||||
<stop
|
||||
style="stop-color:#000000;stop-opacity:1;"
|
||||
offset="0"
|
||||
id="stop2918" />
|
||||
<stop
|
||||
style="stop-color:#000000;stop-opacity:0;"
|
||||
offset="1"
|
||||
id="stop2920" />
|
||||
</linearGradient>
|
||||
<rect
|
||||
x="13.627879"
|
||||
y="59.548416"
|
||||
width="111.21325"
|
||||
height="97.633041"
|
||||
id="rect5030" />
|
||||
<linearGradient
|
||||
xlink:href="#linearGradient2922"
|
||||
id="linearGradient2924"
|
||||
x1="90.118146"
|
||||
y1="164.56091"
|
||||
x2="170.81263"
|
||||
y2="164.56091"
|
||||
gradientUnits="userSpaceOnUse" />
|
||||
</defs>
|
||||
<g
|
||||
id="layer2"
|
||||
style="display:none">
|
||||
<rect
|
||||
style="fill:#343a40;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:2.04836;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
|
||||
id="rect53-7"
|
||||
width="184.064"
|
||||
height="184.064"
|
||||
x="3.0180202"
|
||||
y="3.0180202"
|
||||
ry="15.325292" />
|
||||
</g>
|
||||
<g
|
||||
id="layer1">
|
||||
<path
|
||||
style="color:#000000;fill:#000000;fill-rule:evenodd;stroke-linejoin:round;-inkscape-stroke:hairline;fill-opacity:1;stroke:#ffffff;stroke-opacity:1;stroke-width:0.000001;vector-effect:non-scaling-stroke;stroke-miterlimit:4;stroke-dasharray:none"
|
||||
d="M 19.966797,4 C 11.138816,4 4,11.138816 4,19.966797 V 169.78516 c 0,8.82798 7.138816,15.96679 15.966797,15.96679 H 169.73242 c 8.82798,0 15.9668,-7.13881 15.9668,-15.96679 V 19.966797 C 185.69922,11.138816 178.5604,4 169.73242,4 Z m 0,2 H 169.73242 c 7.75458,0 13.9668,6.21222 13.9668,13.966797 V 169.78516 c 0,7.75457 -6.21222,13.96679 -13.9668,13.96679 H 19.966797 C 12.21222,183.75195 6,177.53973 6,169.78516 V 19.966797 C 6,12.21222 12.21222,6 19.966797,6 Z"
|
||||
id="rect53" />
|
||||
<text
|
||||
xml:space="preserve"
|
||||
id="text5028"
|
||||
style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:55.9664px;line-height:1.25;font-family:sans-serif;-inkscape-font-specification:'sans-serif, Bold';font-variant-ligatures:normal;font-variant-caps:normal;font-variant-numeric:normal;font-variant-east-asian:normal;white-space:pre;shape-inside:url(#rect5030);fill:#000000;fill-opacity:1;stroke:none;stroke-opacity:1;stroke-width:0;stroke-miterlimit:4;stroke-dasharray:none;vector-effect:non-scaling-stroke;-inkscape-stroke:hairline"
|
||||
transform="matrix(1.7767576,0,0,1.5652748,1.1199194,-51.120758)"><tspan
|
||||
x="13.626953"
|
||||
y="111.31775"
|
||||
id="tspan42"><tspan
|
||||
style="vector-effect:non-scaling-stroke"
|
||||
id="tspan40">Cu</tspan></tspan></text>
|
||||
<text
|
||||
xml:space="preserve"
|
||||
style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:26.6667px;line-height:1.25;font-family:sans-serif;-inkscape-font-specification:'sans-serif, Normal';font-variant-ligatures:normal;font-variant-caps:normal;font-variant-numeric:normal;font-variant-east-asian:normal;fill:#000000;fill-opacity:1;stroke:none;stroke-opacity:1;stroke-width:0;stroke-miterlimit:4;stroke-dasharray:none;paint-order:normal;vector-effect:non-scaling-stroke;-inkscape-stroke:hairline"
|
||||
x="15.241241"
|
||||
y="34.91935"
|
||||
id="text66922"><tspan
|
||||
id="tspan66920"
|
||||
x="15.241241"
|
||||
y="34.91935"
|
||||
style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:26.6667px;font-family:sans-serif;-inkscape-font-specification:'sans-serif, Normal';font-variant-ligatures:normal;font-variant-caps:normal;font-variant-numeric:normal;font-variant-east-asian:normal;fill:#000000;fill-opacity:1;stroke:none;stroke-opacity:1;stroke-width:0;stroke-miterlimit:4;stroke-dasharray:none;vector-effect:non-scaling-stroke;-inkscape-stroke:hairline">29</tspan></text>
|
||||
<text
|
||||
xml:space="preserve"
|
||||
style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:26.6667px;line-height:1.25;font-family:sans-serif;-inkscape-font-specification:'sans-serif, Normal';font-variant-ligatures:normal;font-variant-caps:normal;font-variant-numeric:normal;font-variant-east-asian:normal;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;paint-order:normal"
|
||||
x="91.402611"
|
||||
y="168.75438"
|
||||
id="text66922-3"><tspan
|
||||
id="tspan66920-6"
|
||||
x="91.402611"
|
||||
y="168.75438"
|
||||
style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:26.6667px;font-family:sans-serif;-inkscape-font-specification:'sans-serif, Normal';font-variant-ligatures:normal;font-variant-caps:normal;font-variant-numeric:normal;font-variant-east-asian:normal;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1">63.546</tspan></text>
|
||||
</g>
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 5.1 KiB |
90
docs/source/static/kupfer-white-filled.svg
Normal file
90
docs/source/static/kupfer-white-filled.svg
Normal file
@@ -0,0 +1,90 @@
|
||||
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
|
||||
<!-- Created with Inkscape (http://www.inkscape.org/) -->
|
||||
|
||||
<svg
|
||||
width="190"
|
||||
height="190"
|
||||
viewBox="0 0 190 190"
|
||||
version="1.1"
|
||||
id="svg5"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink"
|
||||
xmlns="http://www.w3.org/2000/svg"
|
||||
xmlns:svg="http://www.w3.org/2000/svg">
|
||||
<defs
|
||||
id="defs2">
|
||||
<linearGradient
|
||||
id="linearGradient2922">
|
||||
<stop
|
||||
style="stop-color:#000000;stop-opacity:1;"
|
||||
offset="0"
|
||||
id="stop2918" />
|
||||
<stop
|
||||
style="stop-color:#000000;stop-opacity:0;"
|
||||
offset="1"
|
||||
id="stop2920" />
|
||||
</linearGradient>
|
||||
<rect
|
||||
x="13.627879"
|
||||
y="59.548416"
|
||||
width="111.21325"
|
||||
height="97.633041"
|
||||
id="rect5030" />
|
||||
<linearGradient
|
||||
xlink:href="#linearGradient2922"
|
||||
id="linearGradient2924"
|
||||
x1="90.118146"
|
||||
y1="164.56091"
|
||||
x2="170.81263"
|
||||
y2="164.56091"
|
||||
gradientUnits="userSpaceOnUse" />
|
||||
</defs>
|
||||
<g
|
||||
id="layer2"
|
||||
style="display:inline">
|
||||
<rect
|
||||
style="display:inline;fill:#343a40;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:2.04836;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
|
||||
id="rect53-7"
|
||||
width="184.064"
|
||||
height="184.064"
|
||||
x="3.0180202"
|
||||
y="3.0180202"
|
||||
ry="17" />
|
||||
</g>
|
||||
<g
|
||||
id="layer1">
|
||||
<path
|
||||
style="color:#000000;fill:#ffffff;fill-rule:evenodd;stroke-linejoin:round;-inkscape-stroke:hairline;fill-opacity:1;stroke:#000000;stroke-opacity:1;stroke-width:0.000001;vector-effect:non-scaling-stroke;stroke-miterlimit:4;stroke-dasharray:none"
|
||||
d="M 19.966797,4 C 11.138816,4 4,11.138816 4,19.966797 V 169.78516 c 0,8.82798 7.138816,15.96679 15.966797,15.96679 H 169.73242 c 8.82798,0 15.9668,-7.13881 15.9668,-15.96679 V 19.966797 C 185.69922,11.138816 178.5604,4 169.73242,4 Z m 0,2 H 169.73242 c 7.75458,0 13.9668,6.21222 13.9668,13.966797 V 169.78516 c 0,7.75457 -6.21222,13.96679 -13.9668,13.96679 H 19.966797 C 12.21222,183.75195 6,177.53973 6,169.78516 V 19.966797 C 6,12.21222 12.21222,6 19.966797,6 Z"
|
||||
id="rect53" />
|
||||
<text
|
||||
xml:space="preserve"
|
||||
id="text5028"
|
||||
style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:55.9664px;line-height:1.25;font-family:sans-serif;-inkscape-font-specification:'sans-serif, Bold';font-variant-ligatures:normal;font-variant-caps:normal;font-variant-numeric:normal;font-variant-east-asian:normal;white-space:pre;shape-inside:url(#rect5030);display:inline;vector-effect:non-scaling-stroke;fill:#ffffff;fill-opacity:1;stroke:none;stroke-width:0;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;-inkscape-stroke:hairline"
|
||||
transform="matrix(1.7767576,0,0,1.5652748,1.1199194,-51.120758)"><tspan
|
||||
x="13.626953"
|
||||
y="110.47127"
|
||||
id="tspan42"><tspan
|
||||
style="vector-effect:non-scaling-stroke"
|
||||
id="tspan40">Cu</tspan></tspan></text>
|
||||
<text
|
||||
xml:space="preserve"
|
||||
style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:26.6667px;line-height:1.25;font-family:sans-serif;-inkscape-font-specification:'sans-serif, Normal';font-variant-ligatures:normal;font-variant-caps:normal;font-variant-numeric:normal;font-variant-east-asian:normal;fill:#ffffff;fill-opacity:1;stroke:none;stroke-opacity:1;stroke-width:0;stroke-miterlimit:4;stroke-dasharray:none;paint-order:normal;vector-effect:non-scaling-stroke;-inkscape-stroke:hairline"
|
||||
x="15.241241"
|
||||
y="34.91935"
|
||||
id="text66922"><tspan
|
||||
id="tspan66920"
|
||||
x="15.241241"
|
||||
y="34.91935"
|
||||
style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:26.6667px;font-family:sans-serif;-inkscape-font-specification:'sans-serif, Normal';font-variant-ligatures:normal;font-variant-caps:normal;font-variant-numeric:normal;font-variant-east-asian:normal;fill:#ffffff;fill-opacity:1;stroke:none;stroke-opacity:1;stroke-width:0;stroke-miterlimit:4;stroke-dasharray:none;vector-effect:non-scaling-stroke;-inkscape-stroke:hairline">29</tspan></text>
|
||||
<text
|
||||
xml:space="preserve"
|
||||
style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:26.6667px;line-height:1.25;font-family:sans-serif;-inkscape-font-specification:'sans-serif, Normal';font-variant-ligatures:normal;font-variant-caps:normal;font-variant-numeric:normal;font-variant-east-asian:normal;fill:#ffffff;fill-opacity:1;stroke:none;stroke-width:0;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;paint-order:normal"
|
||||
x="91.402611"
|
||||
y="168.75438"
|
||||
id="text66922-3"><tspan
|
||||
id="tspan66920-6"
|
||||
x="91.402611"
|
||||
y="168.75438"
|
||||
style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:26.6667px;font-family:sans-serif;-inkscape-font-specification:'sans-serif, Normal';font-variant-ligatures:normal;font-variant-caps:normal;font-variant-numeric:normal;font-variant-east-asian:normal;fill:#ffffff;fill-opacity:1;stroke:none;stroke-width:0;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1">63.546</tspan></text>
|
||||
</g>
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 5.2 KiB |
90
docs/source/static/kupfer-white-transparent.svg
Normal file
90
docs/source/static/kupfer-white-transparent.svg
Normal file
@@ -0,0 +1,90 @@
|
||||
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
|
||||
<!-- Created with Inkscape (http://www.inkscape.org/) -->
|
||||
|
||||
<svg
|
||||
width="190"
|
||||
height="190"
|
||||
viewBox="0 0 190 190"
|
||||
version="1.1"
|
||||
id="svg5"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink"
|
||||
xmlns="http://www.w3.org/2000/svg"
|
||||
xmlns:svg="http://www.w3.org/2000/svg">
|
||||
<defs
|
||||
id="defs2">
|
||||
<linearGradient
|
||||
id="linearGradient2922">
|
||||
<stop
|
||||
style="stop-color:#000000;stop-opacity:1;"
|
||||
offset="0"
|
||||
id="stop2918" />
|
||||
<stop
|
||||
style="stop-color:#000000;stop-opacity:0;"
|
||||
offset="1"
|
||||
id="stop2920" />
|
||||
</linearGradient>
|
||||
<rect
|
||||
x="13.627879"
|
||||
y="59.548416"
|
||||
width="111.21325"
|
||||
height="97.633041"
|
||||
id="rect5030" />
|
||||
<linearGradient
|
||||
xlink:href="#linearGradient2922"
|
||||
id="linearGradient2924"
|
||||
x1="90.118146"
|
||||
y1="164.56091"
|
||||
x2="170.81263"
|
||||
y2="164.56091"
|
||||
gradientUnits="userSpaceOnUse" />
|
||||
</defs>
|
||||
<g
|
||||
id="layer2"
|
||||
style="display:none">
|
||||
<rect
|
||||
style="fill:#343a40;fill-opacity:1;fill-rule:evenodd;stroke:none;stroke-width:2.04836;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
|
||||
id="rect53-7"
|
||||
width="184.064"
|
||||
height="184.064"
|
||||
x="3.0180202"
|
||||
y="3.0180202"
|
||||
ry="15.325292" />
|
||||
</g>
|
||||
<g
|
||||
id="layer1">
|
||||
<path
|
||||
style="color:#000000;fill:#ffffff;fill-rule:evenodd;stroke-linejoin:round;-inkscape-stroke:hairline;fill-opacity:1;stroke:#000000;stroke-opacity:1;stroke-width:0.000001;vector-effect:non-scaling-stroke;stroke-miterlimit:4;stroke-dasharray:none"
|
||||
d="M 19.966797,4 C 11.138816,4 4,11.138816 4,19.966797 V 169.78516 c 0,8.82798 7.138816,15.96679 15.966797,15.96679 H 169.73242 c 8.82798,0 15.9668,-7.13881 15.9668,-15.96679 V 19.966797 C 185.69922,11.138816 178.5604,4 169.73242,4 Z m 0,2 H 169.73242 c 7.75458,0 13.9668,6.21222 13.9668,13.966797 V 169.78516 c 0,7.75457 -6.21222,13.96679 -13.9668,13.96679 H 19.966797 C 12.21222,183.75195 6,177.53973 6,169.78516 V 19.966797 C 6,12.21222 12.21222,6 19.966797,6 Z"
|
||||
id="rect53" />
|
||||
<text
|
||||
xml:space="preserve"
|
||||
id="text5028"
|
||||
style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:55.9664px;line-height:1.25;font-family:sans-serif;-inkscape-font-specification:'sans-serif, Bold';font-variant-ligatures:normal;font-variant-caps:normal;font-variant-numeric:normal;font-variant-east-asian:normal;white-space:pre;shape-inside:url(#rect5030);fill:#ffffff;fill-opacity:1;stroke:none;stroke-opacity:1;stroke-width:0;stroke-miterlimit:4;stroke-dasharray:none;vector-effect:non-scaling-stroke;-inkscape-stroke:hairline"
|
||||
transform="matrix(1.7767576,0,0,1.5652748,1.1199194,-51.120758)"><tspan
|
||||
x="13.626953"
|
||||
y="111.31775"
|
||||
id="tspan42"><tspan
|
||||
style="vector-effect:non-scaling-stroke"
|
||||
id="tspan40">Cu</tspan></tspan></text>
|
||||
<text
|
||||
xml:space="preserve"
|
||||
style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:26.6667px;line-height:1.25;font-family:sans-serif;-inkscape-font-specification:'sans-serif, Normal';font-variant-ligatures:normal;font-variant-caps:normal;font-variant-numeric:normal;font-variant-east-asian:normal;fill:#ffffff;fill-opacity:1;stroke:none;stroke-opacity:1;stroke-width:0;stroke-miterlimit:4;stroke-dasharray:none;paint-order:normal;vector-effect:non-scaling-stroke;-inkscape-stroke:hairline"
|
||||
x="15.241241"
|
||||
y="34.91935"
|
||||
id="text66922"><tspan
|
||||
id="tspan66920"
|
||||
x="15.241241"
|
||||
y="34.91935"
|
||||
style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:26.6667px;font-family:sans-serif;-inkscape-font-specification:'sans-serif, Normal';font-variant-ligatures:normal;font-variant-caps:normal;font-variant-numeric:normal;font-variant-east-asian:normal;fill:#ffffff;fill-opacity:1;stroke:none;stroke-opacity:1;stroke-width:0;stroke-miterlimit:4;stroke-dasharray:none;vector-effect:non-scaling-stroke;-inkscape-stroke:hairline">29</tspan></text>
|
||||
<text
|
||||
xml:space="preserve"
|
||||
style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:26.6667px;line-height:1.25;font-family:sans-serif;-inkscape-font-specification:'sans-serif, Normal';font-variant-ligatures:normal;font-variant-caps:normal;font-variant-numeric:normal;font-variant-east-asian:normal;fill:#ffffff;fill-opacity:1;stroke:none;stroke-width:0;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;paint-order:normal"
|
||||
x="91.402611"
|
||||
y="168.75438"
|
||||
id="text66922-3"><tspan
|
||||
id="tspan66920-6"
|
||||
x="91.402611"
|
||||
y="168.75438"
|
||||
style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:26.6667px;font-family:sans-serif;-inkscape-font-specification:'sans-serif, Normal';font-variant-ligatures:normal;font-variant-caps:normal;font-variant-numeric:normal;font-variant-east-asian:normal;fill:#ffffff;fill-opacity:1;stroke:none;stroke-width:0;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1">63.546</tspan></text>
|
||||
</g>
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 5.1 KiB |
3
docs/source/static/kupfer_docs.css
Normal file
3
docs/source/static/kupfer_docs.css
Normal file
@@ -0,0 +1,3 @@
|
||||
.sidebar-brand-text {
|
||||
text-align: center;
|
||||
}
|
||||
5
docs/source/templates/command.rst
Normal file
5
docs/source/templates/command.rst
Normal file
@@ -0,0 +1,5 @@
|
||||
.. title: {{fullname}}
|
||||
|
||||
.. click:: {% if fullname == 'main' %}main:cli{% else %}{{fullname}}:cmd_{{fullname}}{% endif %}
|
||||
:prog: kupferbootstrap {{fullname}}
|
||||
:nested: full
|
||||
0
exec/__init__.py
Normal file
0
exec/__init__.py
Normal file
121
exec/cmd.py
Normal file
121
exec/cmd.py
Normal file
@@ -0,0 +1,121 @@
|
||||
import logging
|
||||
import os
|
||||
import pwd
|
||||
import subprocess
|
||||
|
||||
from shlex import quote as shell_quote
|
||||
from typing import Optional, Union, TypeAlias
|
||||
|
||||
ElevationMethod: TypeAlias = str
|
||||
|
||||
# as long as **only** sudo is supported, hardcode the default into ELEVATION_METHOD_DEFAULT.
|
||||
# when other methods are added, all mentions of ELEVATION_METHOD_DEFAULT should be replaced by a config key.
|
||||
|
||||
ELEVATION_METHOD_DEFAULT = "sudo"
|
||||
|
||||
ELEVATION_METHODS: dict[ElevationMethod, list[str]] = {
|
||||
"sudo": ['sudo', '--'],
|
||||
}
|
||||
|
||||
|
||||
def generate_env_cmd(env: dict[str, str]):
|
||||
return ['/usr/bin/env'] + [f'{key}={value}' for key, value in env.items()]
|
||||
|
||||
|
||||
def flatten_shell_script(script: Union[list[str], str], shell_quote_items: bool = False, wrap_in_shell_quote=False) -> str:
|
||||
"""
|
||||
takes a shell-script and returns a flattened string for consumption with `sh -c`.
|
||||
|
||||
`shell_quote_items` should only be used on `script` arrays that have no shell magic anymore,
|
||||
e.g. `['bash', '-c', 'echo $USER']`, which would return the string `'bash' '-c' 'echo user'`,
|
||||
which is suited for consumption by another bash -c process.
|
||||
"""
|
||||
if not isinstance(script, str) and isinstance(script, list):
|
||||
cmds = script
|
||||
if shell_quote_items:
|
||||
cmds = [shell_quote(i) for i in cmds]
|
||||
script = " ".join(cmds)
|
||||
if wrap_in_shell_quote:
|
||||
script = shell_quote(script)
|
||||
return script
|
||||
|
||||
|
||||
def wrap_in_bash(cmd: Union[list[str], str], flatten_result=True) -> Union[str, list[str]]:
|
||||
res: Union[str, list[str]] = ['/bin/bash', '-c', flatten_shell_script(cmd, shell_quote_items=False, wrap_in_shell_quote=False)]
|
||||
if flatten_result:
|
||||
res = flatten_shell_script(res, shell_quote_items=True, wrap_in_shell_quote=False)
|
||||
return res
|
||||
|
||||
|
||||
def generate_cmd_elevated(cmd: list[str], elevation_method: ElevationMethod):
|
||||
"wraps `cmd` in the necessary commands to escalate, e.g. `['sudo', '--', cmd]`."
|
||||
if elevation_method not in ELEVATION_METHODS:
|
||||
raise Exception(f"Unknown elevation method {elevation_method}")
|
||||
return ELEVATION_METHODS[elevation_method] + cmd
|
||||
|
||||
|
||||
def generate_cmd_su(
|
||||
cmd: list[str],
|
||||
switch_user: str,
|
||||
elevation_method: Optional[ElevationMethod] = None,
|
||||
force_su: bool = False,
|
||||
force_elevate: bool = False,
|
||||
):
|
||||
"""
|
||||
returns cmd to escalate (e.g. sudo) and switch users (su) to run `cmd` as `switch_user` as necessary.
|
||||
If `switch_user` is neither the current user nor root, cmd will have to be flattened into a single string.
|
||||
A result might look like `['sudo', '--', 'su', '-s', '/bin/bash', '-c', cmd_as_a_string]`.
|
||||
"""
|
||||
current_uid = os.getuid()
|
||||
if pwd.getpwuid(current_uid).pw_name != switch_user or force_su:
|
||||
if switch_user != 'root' or force_su:
|
||||
cmd = ['/bin/su', switch_user, '-s', '/bin/bash', '-c', flatten_shell_script(cmd, shell_quote_items=True)]
|
||||
if current_uid != 0 or force_elevate: # in order to use `/bin/su`, we have to be root first.
|
||||
cmd = generate_cmd_elevated(cmd, elevation_method or ELEVATION_METHOD_DEFAULT)
|
||||
|
||||
return cmd
|
||||
|
||||
|
||||
def run_cmd(
|
||||
script: Union[str, list[str]],
|
||||
env: dict[str, str] = {},
|
||||
attach_tty: bool = False,
|
||||
capture_output: bool = False,
|
||||
cwd: Optional[str] = None,
|
||||
switch_user: Optional[str] = None,
|
||||
elevation_method: Optional[ElevationMethod] = None,
|
||||
stdout: Optional[int] = None,
|
||||
stderr=None,
|
||||
) -> Union[subprocess.CompletedProcess, int]:
|
||||
"execute `script` as `switch_user`, elevating and su'ing as necessary"
|
||||
kwargs: dict = {}
|
||||
env_cmd = []
|
||||
if env:
|
||||
env_cmd = generate_env_cmd(env)
|
||||
kwargs['env'] = env
|
||||
if not attach_tty:
|
||||
kwargs |= {'stdout': stdout} if stdout else {'capture_output': capture_output}
|
||||
if stderr:
|
||||
kwargs['stderr'] = stderr
|
||||
|
||||
script = flatten_shell_script(script)
|
||||
if cwd:
|
||||
kwargs['cwd'] = cwd
|
||||
wrapped_script: list[str] = wrap_in_bash(script, flatten_result=False) # type: ignore
|
||||
cmd = env_cmd + wrapped_script
|
||||
if switch_user:
|
||||
cmd = generate_cmd_su(cmd, switch_user, elevation_method=elevation_method)
|
||||
logging.debug(f'Running cmd: "{cmd}"')
|
||||
if attach_tty:
|
||||
return subprocess.call(cmd, **kwargs)
|
||||
else:
|
||||
return subprocess.run(cmd, **kwargs)
|
||||
|
||||
|
||||
def run_root_cmd(*kargs, **kwargs):
|
||||
kwargs['switch_user'] = 'root'
|
||||
return run_cmd(*kargs, **kwargs)
|
||||
|
||||
|
||||
def elevation_noop(**kwargs):
|
||||
run_root_cmd('/bin/true', **kwargs)
|
||||
171
exec/file.py
Normal file
171
exec/file.py
Normal file
@@ -0,0 +1,171 @@
|
||||
import atexit
|
||||
import logging
|
||||
import os
|
||||
import stat
|
||||
import subprocess
|
||||
|
||||
from shutil import rmtree
|
||||
from tempfile import mkdtemp
|
||||
from typing import Optional, Union
|
||||
|
||||
from .cmd import run_root_cmd, elevation_noop, generate_cmd_su, wrap_in_bash, shell_quote
|
||||
from utils import get_user_name, get_group_name
|
||||
|
||||
|
||||
def try_native_filewrite(path: str, content: Union[str, bytes], chmod: Optional[str] = None) -> Optional[Exception]:
|
||||
"try writing with python open(), return None on success, return(!) Exception on failure"
|
||||
bflag = 'b' if isinstance(content, bytes) else ''
|
||||
try:
|
||||
kwargs = {}
|
||||
if chmod:
|
||||
kwargs['mode'] = chmod
|
||||
descriptor = os.open(path, **kwargs) # type: ignore
|
||||
with open(descriptor, 'w' + bflag) as f:
|
||||
f.write(content)
|
||||
except Exception as ex:
|
||||
return ex
|
||||
return None
|
||||
|
||||
|
||||
def chown(path: str, user: Optional[Union[str, int]] = None, group: Optional[Union[str, int]] = None, recursive: bool = False):
|
||||
owner = ''
|
||||
if user is not None:
|
||||
owner += get_user_name(user)
|
||||
if group is not None:
|
||||
owner += f':{get_group_name(group)}'
|
||||
if owner:
|
||||
cmd = ["chown"] + (['-R'] if recursive else [])
|
||||
result = run_root_cmd(cmd + [owner, path])
|
||||
assert isinstance(result, subprocess.CompletedProcess)
|
||||
if result.returncode:
|
||||
raise Exception(f"Failed to change owner of '{path}' to '{owner}'")
|
||||
|
||||
|
||||
def chmod(path, mode: Union[int, str] = 0o0755, force_sticky=True):
|
||||
if not isinstance(mode, str):
|
||||
octal = oct(mode)[2:]
|
||||
else:
|
||||
octal = mode
|
||||
assert octal.isnumeric()
|
||||
octal = octal.rjust(3, '0')
|
||||
if force_sticky:
|
||||
octal = octal.rjust(4, '0')
|
||||
try:
|
||||
os.chmod(path, mode=octal) # type: ignore
|
||||
except:
|
||||
cmd = ["chmod", octal, path]
|
||||
result = run_root_cmd(cmd)
|
||||
assert isinstance(result, subprocess.CompletedProcess)
|
||||
if result.returncode:
|
||||
raise Exception(f"Failed to set mode of '{path}' to '{chmod}'")
|
||||
|
||||
|
||||
def root_check_exists(path):
|
||||
return os.path.exists(path) or run_root_cmd(['[', '-e', path, ']']).returncode == 0
|
||||
|
||||
|
||||
def root_check_is_dir(path):
|
||||
return os.path.isdir(path) or run_root_cmd(['[', '-d', path, ']'])
|
||||
|
||||
|
||||
def write_file(
|
||||
path: str,
|
||||
content: Union[str, bytes],
|
||||
lazy: bool = True,
|
||||
mode: Optional[str] = None,
|
||||
user: Optional[str] = None,
|
||||
group: Optional[str] = None,
|
||||
):
|
||||
chmod_mode = ''
|
||||
chown_user = get_user_name(user) if user else None
|
||||
chown_group = get_group_name(group) if group else None
|
||||
fstat: os.stat_result
|
||||
exists = root_check_exists(path)
|
||||
dirname = os.path.dirname(path)
|
||||
if exists:
|
||||
fstat = os.stat(path)
|
||||
else:
|
||||
chown_user = chown_user or get_user_name(os.getuid())
|
||||
chown_group = chown_group or get_group_name(os.getgid())
|
||||
dir_exists = root_check_exists(dirname)
|
||||
if not dir_exists or not root_check_is_dir(dirname):
|
||||
reason = "is not a directory" if dir_exists else "does not exist"
|
||||
raise Exception(f"Error writing file {path}, parent dir {reason}")
|
||||
if mode:
|
||||
if not mode.isnumeric():
|
||||
raise Exception(f"Unknown file mode '{mode}' (must be numeric): {path}")
|
||||
if not exists or stat.filemode(int(mode, 8)) != stat.filemode(fstat.st_mode):
|
||||
chmod_mode = mode
|
||||
failed = try_native_filewrite(path, content, chmod_mode)
|
||||
if exists or failed:
|
||||
if failed:
|
||||
try:
|
||||
elevation_noop(attach_tty=True) # avoid password prompt while writing file
|
||||
logging.debug(f"Writing to {path} using elevated /bin/tee")
|
||||
cmd: list[str] = generate_cmd_su(wrap_in_bash(f'tee {shell_quote(path)} >/dev/null', flatten_result=False), 'root') # type: ignore
|
||||
assert isinstance(cmd, list)
|
||||
s = subprocess.Popen(
|
||||
cmd,
|
||||
text=(not isinstance(content, bytes)),
|
||||
stdin=subprocess.PIPE,
|
||||
)
|
||||
s.communicate(content)
|
||||
s.wait(300) # 5 minute timeout
|
||||
if s.returncode:
|
||||
raise Exception(f"Write command excited non-zero: {s.returncode}")
|
||||
except Exception as ex:
|
||||
logging.fatal(f"Writing to file '{path}' with elevated privileges failed")
|
||||
raise ex
|
||||
if chmod_mode:
|
||||
chmod(path, chmod_mode)
|
||||
|
||||
chown(path, chown_user, chown_group)
|
||||
|
||||
|
||||
def root_write_file(*args, **kwargs):
|
||||
kwargs['user'] = 'root'
|
||||
kwargs['group'] = 'root'
|
||||
return write_file(*args, **kwargs)
|
||||
|
||||
|
||||
def remove_file(path: str, recursive=False):
|
||||
try:
|
||||
rm = rmtree if recursive else os.unlink
|
||||
rm(path) # type: ignore
|
||||
except:
|
||||
cmd = ['rm'] + (['-r'] if recursive else []) + [path]
|
||||
rc = run_root_cmd(cmd).returncode
|
||||
if rc:
|
||||
raise Exception(f"Unable to remove {path}: cmd returned {rc}")
|
||||
|
||||
|
||||
def makedir(path, user: Optional[str] = None, group: Optional[str] = None, parents: bool = True):
|
||||
if not root_check_exists(path):
|
||||
try:
|
||||
if parents:
|
||||
os.makedirs(path, exist_ok=True)
|
||||
else:
|
||||
os.mkdir(path)
|
||||
except:
|
||||
run_root_cmd(['mkdir'] + (['-p'] if parents else []) + [path])
|
||||
chown(path, user, group)
|
||||
|
||||
|
||||
def root_makedir(path, parents: bool = True):
|
||||
return makedir(path, user='root', group='root', parents=parents)
|
||||
|
||||
|
||||
def symlink(source, target):
|
||||
try:
|
||||
os.symlink(source, target)
|
||||
except:
|
||||
run_root_cmd(['ln', '-s', source, target])
|
||||
|
||||
|
||||
def get_temp_dir(register_cleanup=True, mode: int = 0o0755):
|
||||
"create a new tempdir and sanitize ownership so root can access user files as god intended"
|
||||
t = mkdtemp()
|
||||
chmod(t, mode)
|
||||
if register_cleanup:
|
||||
atexit.register(remove_file, t, recursive=True)
|
||||
return t
|
||||
70
exec/test_cmd.py
Normal file
70
exec/test_cmd.py
Normal file
@@ -0,0 +1,70 @@
|
||||
import logging
|
||||
import os
|
||||
import pwd
|
||||
import subprocess
|
||||
|
||||
from .cmd import run_cmd, run_root_cmd, generate_cmd_su
|
||||
|
||||
|
||||
def get_username(id: int):
|
||||
return pwd.getpwuid(id).pw_name
|
||||
|
||||
|
||||
def run_func(f, expected_user: str = None, **kwargs):
|
||||
current_uid = os.getuid()
|
||||
current_username = get_username(current_uid)
|
||||
target_uid = current_uid
|
||||
result = f(['id', '-u'], capture_output=True, **kwargs)
|
||||
assert isinstance(result, subprocess.CompletedProcess)
|
||||
result.check_returncode()
|
||||
if expected_user and current_username != expected_user:
|
||||
target_uid = pwd.getpwnam(expected_user).pw_uid
|
||||
result_uid = result.stdout.decode()
|
||||
assert int(result_uid) == target_uid
|
||||
|
||||
|
||||
def run_generate_and_exec(script, generate_args={}, switch_user=None, **kwargs):
|
||||
"runs generate_cmd_su() and executes the resulting argv"
|
||||
if not switch_user:
|
||||
switch_user = get_username(os.getuid())
|
||||
cmd = generate_cmd_su(script, switch_user=switch_user, **generate_args)
|
||||
logging.debug(f'run_generate_and_exec: running {cmd}')
|
||||
return subprocess.run(
|
||||
cmd,
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
|
||||
def test_generate_su_force_su():
|
||||
run_func(run_generate_and_exec, generate_args={'force_su': True})
|
||||
|
||||
|
||||
def test_generate_su_force_elevate():
|
||||
run_func(run_generate_and_exec, generate_args={'force_elevate': True}, expected_user='root', switch_user='root')
|
||||
|
||||
|
||||
def test_generate_su_nobody_force_su():
|
||||
user = 'nobody'
|
||||
run_func(run_generate_and_exec, expected_user=user, switch_user=user, generate_args={'force_su': True})
|
||||
|
||||
|
||||
def test_generate_su_nobody_force_su_and_elevate():
|
||||
user = 'nobody'
|
||||
run_func(run_generate_and_exec, expected_user=user, switch_user=user, generate_args={'force_su': True, 'force_elevate': True})
|
||||
|
||||
|
||||
def test_run_cmd():
|
||||
run_func(run_cmd)
|
||||
|
||||
|
||||
def test_run_cmd_su_nobody():
|
||||
user = 'nobody'
|
||||
run_func(run_cmd, expected_user=user, switch_user=user)
|
||||
|
||||
|
||||
def test_run_cmd_as_root():
|
||||
run_func(run_cmd, expected_user='root', switch_user='root')
|
||||
|
||||
|
||||
def test_run_root_cmd():
|
||||
run_func(run_root_cmd, expected_user='root')
|
||||
181
exec/test_file.py
Normal file
181
exec/test_file.py
Normal file
@@ -0,0 +1,181 @@
|
||||
import pytest
|
||||
|
||||
import os
|
||||
import stat
|
||||
|
||||
from typing import Union, Generator
|
||||
from dataclasses import dataclass
|
||||
|
||||
from .cmd import run_root_cmd
|
||||
from .file import chmod, chown, get_temp_dir, write_file
|
||||
from utils import get_gid, get_uid
|
||||
|
||||
TEMPDIR_MODE = 0o755
|
||||
|
||||
|
||||
@dataclass
|
||||
class TempdirFillInfo():
|
||||
path: str
|
||||
files: dict[str, str]
|
||||
|
||||
|
||||
def _get_tempdir():
|
||||
d = get_temp_dir(register_cleanup=False, mode=TEMPDIR_MODE)
|
||||
assert os.path.exists(d)
|
||||
return d
|
||||
|
||||
|
||||
def remove_dir(d):
|
||||
run_root_cmd(['rm', '-rf', d]).check_returncode()
|
||||
|
||||
|
||||
def create_file(filepath, owner='root', group='root'):
|
||||
assert not os.path.exists(filepath)
|
||||
run_root_cmd(['touch', filepath]).check_returncode()
|
||||
run_root_cmd(['chown', f'{owner}:{group}', filepath]).check_returncode()
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def tempdir():
|
||||
d = _get_tempdir()
|
||||
yield d
|
||||
# cleanup, gets run after the test since we yield above
|
||||
remove_dir(d)
|
||||
|
||||
|
||||
def test_get_tempdir(tempdir):
|
||||
mode = os.stat(tempdir).st_mode
|
||||
assert stat.S_ISDIR(mode)
|
||||
assert stat.S_IMODE(mode) == TEMPDIR_MODE
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def tempdir_filled() -> Generator[TempdirFillInfo, None, None]:
|
||||
d = _get_tempdir()
|
||||
contents = {
|
||||
'rootfile': {
|
||||
'owner': 'root',
|
||||
'group': 'root',
|
||||
},
|
||||
'userfile': {
|
||||
'owner': 'nobody',
|
||||
'group': 'nobody',
|
||||
},
|
||||
}
|
||||
res = TempdirFillInfo(path=d, files={})
|
||||
for p, opts in contents.items():
|
||||
path = os.path.join(d, p)
|
||||
res.files[p] = path
|
||||
create_file(path, **opts)
|
||||
yield res
|
||||
# cleanup, gets run after the test since we yield above
|
||||
remove_dir(d)
|
||||
|
||||
|
||||
def verify_ownership(filepath, user: Union[str, int], group: Union[str, int]):
|
||||
uid = get_uid(user)
|
||||
gid = get_gid(group)
|
||||
assert os.path.exists(filepath)
|
||||
fstat = os.stat(filepath)
|
||||
assert fstat.st_uid == uid
|
||||
assert fstat.st_gid == gid
|
||||
|
||||
|
||||
def verify_mode(filepath, mode: int = TEMPDIR_MODE):
|
||||
assert stat.S_IMODE(os.stat(filepath).st_mode) == mode
|
||||
|
||||
|
||||
def verify_content(filepath, content):
|
||||
assert os.path.exists(filepath)
|
||||
with open(filepath, 'r') as f:
|
||||
assert f.read().strip() == content.strip()
|
||||
|
||||
|
||||
@pytest.mark.parametrize("user,group", [('root', 'root'), ('nobody', 'nobody')])
|
||||
def test_chown(tempdir: str, user: str, group: str):
|
||||
assert os.path.exists(tempdir)
|
||||
target_uid = get_uid(user)
|
||||
target_gid = get_gid(group)
|
||||
chown(tempdir, target_uid, target_gid)
|
||||
verify_ownership(tempdir, target_uid, target_gid)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("mode", [0, 0o700, 0o755, 0o600, 0o555])
|
||||
def test_chmod(tempdir_filled, mode: int):
|
||||
for filepath in tempdir_filled.files.values():
|
||||
chmod(filepath, mode)
|
||||
verify_mode(filepath, mode)
|
||||
|
||||
|
||||
def test_tempdir_filled_fixture(tempdir_filled: TempdirFillInfo):
|
||||
files = tempdir_filled.files
|
||||
assert files
|
||||
assert 'rootfile' in files
|
||||
assert 'userfile' in files
|
||||
verify_ownership(files['rootfile'], 'root', 'root')
|
||||
verify_ownership(files['userfile'], 'nobody', 'nobody')
|
||||
|
||||
|
||||
def test_write_new_file_naive(tempdir: str):
|
||||
assert os.path.exists(tempdir)
|
||||
new = os.path.join(tempdir, 'newfiletest')
|
||||
content = 'test12345'
|
||||
assert not os.path.exists(new)
|
||||
write_file(new, content)
|
||||
verify_content(new, content)
|
||||
verify_ownership(new, user=os.getuid(), group=os.getgid())
|
||||
|
||||
|
||||
def test_write_new_file_root(tempdir: str):
|
||||
assert os.path.exists(tempdir)
|
||||
new = os.path.join(tempdir, 'newfiletest')
|
||||
content = 'test12345'
|
||||
assert not os.path.exists(new)
|
||||
write_file(new, content, user='root', group='root')
|
||||
verify_content(new, content)
|
||||
verify_ownership(new, user=0, group=0)
|
||||
|
||||
|
||||
def test_write_new_file_user(tempdir: str):
|
||||
user = 'nobody'
|
||||
group = 'nobody'
|
||||
assert os.path.exists(tempdir)
|
||||
new = os.path.join(tempdir, 'newfiletest')
|
||||
content = 'test12345'
|
||||
assert not os.path.exists(new)
|
||||
write_file(new, content, user=user, group=group)
|
||||
assert os.path.exists(new)
|
||||
verify_content(new, content)
|
||||
verify_ownership(new, user=user, group=group)
|
||||
|
||||
|
||||
def test_write_new_file_user_in_root_dir(tempdir: str):
|
||||
assert os.path.exists(tempdir)
|
||||
chown(tempdir, user='root', group='root')
|
||||
verify_ownership(tempdir, 'root', 'root')
|
||||
test_write_new_file_user(tempdir)
|
||||
|
||||
|
||||
def test_write_rootfile_naive(tempdir_filled: TempdirFillInfo):
|
||||
files = tempdir_filled.files
|
||||
assert 'rootfile' in files
|
||||
p = files['rootfile']
|
||||
assert os.path.exists(p)
|
||||
verify_ownership(p, 'root', 'root')
|
||||
content = 'test123'
|
||||
write_file(p, content)
|
||||
verify_content(p, 'test123')
|
||||
verify_ownership(p, 'root', 'root')
|
||||
|
||||
|
||||
@pytest.mark.parametrize("user,group", [('root', 'root'), ('nobody', 'nobody')])
|
||||
def test_write_rootfile(tempdir_filled: TempdirFillInfo, user: str, group: str):
|
||||
files = tempdir_filled.files
|
||||
assert 'rootfile' in files
|
||||
p = files['rootfile']
|
||||
assert os.path.exists(p)
|
||||
verify_ownership(p, 'root', 'root')
|
||||
content = 'test123'
|
||||
write_file(p, content)
|
||||
verify_content(p, 'test123')
|
||||
verify_ownership(p, 'root', 'root')
|
||||
22
flash.py
22
flash.py
@@ -1,13 +1,12 @@
|
||||
import atexit
|
||||
import shutil
|
||||
import os
|
||||
import subprocess
|
||||
import click
|
||||
import tempfile
|
||||
|
||||
from constants import FLASH_PARTS, LOCATIONS
|
||||
from exec.cmd import run_root_cmd
|
||||
from exec.file import get_temp_dir
|
||||
from fastboot import fastboot_flash
|
||||
from image import dd_image, partprobe, shrink_fs, losetup_rootfs_image, dump_aboot, dump_lk2nd, dump_qhypstub, get_device_and_flavour, get_image_name, get_image_path
|
||||
from image import dd_image, partprobe, shrink_fs, losetup_rootfs_image, losetup_destroy, dump_aboot, dump_lk2nd, dump_qhypstub, get_device_and_flavour, get_image_name, get_image_path
|
||||
from wrapper import enforce_wrap
|
||||
|
||||
ABOOT = FLASH_PARTS['ABOOT']
|
||||
@@ -49,29 +48,24 @@ def cmd_flash(what: str, location: str):
|
||||
if f'jumpdrive{location.split("-")[0]}' in sanitized_file:
|
||||
path = os.path.realpath(os.path.join(dir, file))
|
||||
partprobe(path)
|
||||
result = subprocess.run(['lsblk', path, '-o', 'SIZE'], capture_output=True)
|
||||
result = run_root_cmd(['lsblk', path, '-o', 'SIZE'], capture_output=True)
|
||||
if result.returncode != 0:
|
||||
raise Exception(f'Failed to lsblk {path}')
|
||||
if result.stdout == b'SIZE\n 0B\n':
|
||||
raise Exception(
|
||||
f'Disk {path} has a size of 0B. That probably means it is not available (e.g. no microSD inserted or no microSD card slot installed in the device) or corrupt or defect'
|
||||
)
|
||||
raise Exception(f'Disk {path} has a size of 0B. That probably means it is not available (e.g. no'
|
||||
'microSD inserted or no microSD card slot installed in the device) or corrupt or defect')
|
||||
if path == '':
|
||||
raise Exception('Unable to discover Jumpdrive')
|
||||
|
||||
minimal_image_dir = tempfile.gettempdir()
|
||||
minimal_image_dir = get_temp_dir(register_cleanup=True)
|
||||
minimal_image_path = os.path.join(minimal_image_dir, f'minimal-{device_image_name}')
|
||||
|
||||
def clean_dir():
|
||||
shutil.rmtree(minimal_image_dir)
|
||||
|
||||
atexit.register(clean_dir)
|
||||
|
||||
shutil.copyfile(device_image_path, minimal_image_path)
|
||||
|
||||
loop_device = losetup_rootfs_image(minimal_image_path, sector_size)
|
||||
partprobe(loop_device)
|
||||
shrink_fs(loop_device, minimal_image_path, sector_size)
|
||||
losetup_destroy(loop_device)
|
||||
|
||||
result = dd_image(input=minimal_image_path, output=path)
|
||||
|
||||
|
||||
14
format.sh
14
format.sh
@@ -4,16 +4,22 @@ yapf_args=('--recursive' '--parallel')
|
||||
autoflake_args=('--recursive' '--remove-unused-variables' '--remove-all-unused-imports' '--expand-star-imports' '--remove-duplicate-keys')
|
||||
|
||||
format() {
|
||||
yapf "${yapf_args[@]}" .
|
||||
autoflake "${autoflake_args[@]}" .
|
||||
files=("$@")
|
||||
if [[ -z "${files[*]}" ]]; then
|
||||
files=(".")
|
||||
fi
|
||||
|
||||
yapf "${yapf_args[@]}" "${files[@]}"
|
||||
autoflake "${autoflake_args[@]}" "${files[@]}"
|
||||
}
|
||||
|
||||
|
||||
if [[ "$1" == "--check" ]]; then
|
||||
yapf_args+=('--diff')
|
||||
[[ "$(format | tee /dev/stderr | wc -c)" == "0" ]]
|
||||
shift
|
||||
[[ "$(format "$@" | tee /dev/stderr | wc -c)" == "0" ]]
|
||||
else
|
||||
yapf_args+=('--in-place')
|
||||
autoflake_args+=('--in-place')
|
||||
format
|
||||
format "$@"
|
||||
fi
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import click
|
||||
import subprocess
|
||||
from logger import logging
|
||||
import logging
|
||||
|
||||
from exec.cmd import run_root_cmd
|
||||
from ssh import run_ssh_command
|
||||
from wrapper import check_programs_wrap
|
||||
|
||||
@@ -10,25 +11,26 @@ def cmd_forwarding():
|
||||
"""Enable network forwarding for a usb-attached device"""
|
||||
check_programs_wrap(['syctl', 'iptables'])
|
||||
|
||||
result = subprocess.run([
|
||||
logging.info("Enabling ipv4 forwarding with sysctl")
|
||||
result = run_root_cmd([
|
||||
'sysctl',
|
||||
'net.ipv4.ip_forward=1',
|
||||
])
|
||||
if result.returncode != 0:
|
||||
logging.fatal(f'Failed to enable ipv4 forward via sysctl')
|
||||
exit(1)
|
||||
click.Abort('Failed to enable ipv4 forward via sysctl')
|
||||
|
||||
result = subprocess.run([
|
||||
logging.info("Enabling ipv4 forwarding with iptables")
|
||||
result = run_root_cmd([
|
||||
'iptables',
|
||||
'-P',
|
||||
'FORWARD',
|
||||
'ACCEPT',
|
||||
])
|
||||
if result.returncode != 0:
|
||||
logging.fatal(f'Failed set iptables rule')
|
||||
exit(1)
|
||||
click.Abort('Failed set iptables rule')
|
||||
|
||||
result = subprocess.run([
|
||||
logging.info("Enabling ipv4 NATting with iptables")
|
||||
result = run_root_cmd([
|
||||
'iptables',
|
||||
'-A',
|
||||
'POSTROUTING',
|
||||
@@ -40,10 +42,9 @@ def cmd_forwarding():
|
||||
'172.16.42.0/24',
|
||||
])
|
||||
if result.returncode != 0:
|
||||
logging.fatal(f'Failed set iptables rule')
|
||||
exit(1)
|
||||
click.Abort('Failed set iptables rule')
|
||||
|
||||
result = run_ssh_command(cmd=['sudo -S route add default gw 172.16.42.2'])
|
||||
logging.info("Setting default route on device via ssh")
|
||||
result = run_ssh_command(cmd=['sudo -S route add default gw 172.16.42.2'], alloc_tty=True)
|
||||
if result.returncode != 0:
|
||||
logging.fatal(f'Failed to add gateway over ssh')
|
||||
exit(1)
|
||||
click.Abort('Failed to add gateway over ssh')
|
||||
|
||||
@@ -193,7 +193,9 @@ export LDFLAGS="$LDFLAGS,-L/usr/{hostspec}/lib,-L/{chroot}/usr/lib,-rpath-link,/
|
||||
def generate_pacman_conf_body(
|
||||
arch: Arch,
|
||||
check_space: bool = True,
|
||||
in_chroot: bool = True,
|
||||
):
|
||||
pacman_cache = config.get_path('pacman') if not in_chroot else CHROOT_PATHS['pacman']
|
||||
return f'''
|
||||
#
|
||||
# /etc/pacman.conf
|
||||
@@ -208,7 +210,7 @@ def generate_pacman_conf_body(
|
||||
# If you wish to use different paths, uncomment and update the paths.
|
||||
#RootDir = /
|
||||
#DBPath = /var/lib/pacman/
|
||||
CacheDir = {CHROOT_PATHS['pacman']}/{arch}
|
||||
CacheDir = {pacman_cache}/{arch}
|
||||
#LogFile = /var/log/pacman.log
|
||||
#GPGDir = /etc/pacman.d/gnupg/
|
||||
#HookDir = /etc/pacman.d/hooks/
|
||||
|
||||
138
image.py
138
image.py
@@ -6,16 +6,19 @@ import subprocess
|
||||
import click
|
||||
import logging
|
||||
from signal import pause
|
||||
from subprocess import run, CompletedProcess
|
||||
from subprocess import CompletedProcess
|
||||
from typing import Optional
|
||||
|
||||
from chroot.device import DeviceChroot, get_device_chroot
|
||||
from constants import Arch, BASE_PACKAGES, DEVICES, FLAVOURS
|
||||
from config import config, Profile
|
||||
from distro.distro import get_base_distro, get_kupfer_https
|
||||
from packages import build_enable_qemu_binfmt, discover_packages, build_packages
|
||||
from exec.cmd import run_root_cmd, generate_cmd_su
|
||||
from exec.file import root_write_file, root_makedir, makedir
|
||||
from packages import build_enable_qemu_binfmt, build_packages_by_paths
|
||||
from packages.device import get_profile_device
|
||||
from ssh import copy_ssh_keys
|
||||
from wrapper import enforce_wrap
|
||||
from wrapper import wrap_if_foreign_arch
|
||||
|
||||
# image files need to be slightly smaller than partitions to fit
|
||||
IMG_FILE_ROOT_DEFAULT_SIZE = "1800M"
|
||||
@@ -28,17 +31,16 @@ def dd_image(input: str, output: str, blocksize='1M') -> CompletedProcess:
|
||||
f'if={input}',
|
||||
f'of={output}',
|
||||
f'bs={blocksize}',
|
||||
'iflag=direct',
|
||||
'oflag=direct',
|
||||
'status=progress',
|
||||
'conv=sync,noerror',
|
||||
]
|
||||
logging.debug(f'running dd cmd: {cmd}')
|
||||
return subprocess.run(cmd)
|
||||
return run_root_cmd(cmd)
|
||||
|
||||
|
||||
def partprobe(device: str):
|
||||
return subprocess.run(['partprobe', device])
|
||||
return run_root_cmd(['partprobe', device])
|
||||
|
||||
|
||||
def shrink_fs(loop_device: str, file: str, sector_size: int):
|
||||
@@ -47,13 +49,13 @@ def shrink_fs(loop_device: str, file: str, sector_size: int):
|
||||
sectors_blocks_factor = 4096 // sector_size
|
||||
partprobe(loop_device)
|
||||
logging.debug(f"Checking filesystem at {loop_device}p2")
|
||||
result = subprocess.run(['e2fsck', '-fy', f'{loop_device}p2'])
|
||||
result = run_root_cmd(['e2fsck', '-fy', f'{loop_device}p2'])
|
||||
if result.returncode > 2:
|
||||
# https://man7.org/linux/man-pages/man8/e2fsck.8.html#EXIT_CODE
|
||||
raise Exception(f'Failed to e2fsck {loop_device}p2 with exit code {result.returncode}')
|
||||
|
||||
logging.debug(f'Shrinking filesystem at {loop_device}p2')
|
||||
result = subprocess.run(['resize2fs', '-M', f'{loop_device}p2'], capture_output=True)
|
||||
result = run_root_cmd(['resize2fs', '-M', f'{loop_device}p2'], capture_output=True)
|
||||
if result.returncode != 0:
|
||||
print(result.stdout)
|
||||
print(result.stderr)
|
||||
@@ -65,7 +67,7 @@ def shrink_fs(loop_device: str, file: str, sector_size: int):
|
||||
|
||||
logging.debug(f'Shrinking partition at {loop_device}p2 to {sectors} sectors')
|
||||
child_proccess = subprocess.Popen(
|
||||
['fdisk', '-b', str(sector_size), loop_device],
|
||||
generate_cmd_su(['fdisk', '-b', str(sector_size), loop_device], switch_user='root'), # type: ignore
|
||||
stdin=subprocess.PIPE,
|
||||
)
|
||||
child_proccess.stdin.write('\n'.join([ # type: ignore
|
||||
@@ -85,14 +87,14 @@ def shrink_fs(loop_device: str, file: str, sector_size: int):
|
||||
returncode = child_proccess.wait()
|
||||
if returncode == 1:
|
||||
# For some reason re-reading the partition table fails, but that is not a problem
|
||||
subprocess.run(['partprobe'])
|
||||
partprobe(loop_device)
|
||||
if returncode > 1:
|
||||
raise Exception(f'Failed to shrink partition size of {loop_device}p2 with fdisk')
|
||||
|
||||
partprobe(loop_device)
|
||||
|
||||
logging.debug(f'Finding end sector of partition at {loop_device}p2')
|
||||
result = subprocess.run(['fdisk', '-b', str(sector_size), '-l', loop_device], capture_output=True)
|
||||
result = run_root_cmd(['fdisk', '-b', str(sector_size), '-l', loop_device], capture_output=True)
|
||||
if result.returncode != 0:
|
||||
print(result.stdout)
|
||||
print(result.stderr)
|
||||
@@ -117,6 +119,18 @@ def shrink_fs(loop_device: str, file: str, sector_size: int):
|
||||
partprobe(loop_device)
|
||||
|
||||
|
||||
def losetup_destroy(loop_device):
|
||||
logging.debug(f'Destroying loop device {loop_device}')
|
||||
run_root_cmd(
|
||||
[
|
||||
'losetup',
|
||||
'-d',
|
||||
loop_device,
|
||||
],
|
||||
stderr=subprocess.DEVNULL,
|
||||
)
|
||||
|
||||
|
||||
def get_device_and_flavour(profile_name: Optional[str] = None) -> tuple[str, str]:
|
||||
config.enforce_config_loaded()
|
||||
profile = config.get_profile(profile_name)
|
||||
@@ -139,7 +153,7 @@ def get_image_path(device, flavour, img_type='full') -> str:
|
||||
|
||||
def losetup_rootfs_image(image_path: str, sector_size: int) -> str:
|
||||
logging.debug(f'Creating loop device for {image_path} with sector size {sector_size}')
|
||||
result = subprocess.run([
|
||||
result = run_root_cmd([
|
||||
'losetup',
|
||||
'-f',
|
||||
'-b',
|
||||
@@ -148,8 +162,7 @@ def losetup_rootfs_image(image_path: str, sector_size: int) -> str:
|
||||
image_path,
|
||||
])
|
||||
if result.returncode != 0:
|
||||
logging.fatal(f'Failed to create loop device for {image_path}')
|
||||
exit(1)
|
||||
raise Exception(f'Failed to create loop device for {image_path}')
|
||||
|
||||
logging.debug(f'Finding loop device for {image_path}')
|
||||
|
||||
@@ -157,8 +170,7 @@ def losetup_rootfs_image(image_path: str, sector_size: int) -> str:
|
||||
if result.returncode != 0:
|
||||
print(result.stdout)
|
||||
print(result.stderr)
|
||||
logging.fatal('Failed to list loop devices')
|
||||
exit(1)
|
||||
raise Exception('Failed to list loop devices')
|
||||
|
||||
data = json.loads(result.stdout.decode('utf-8'))
|
||||
loop_device = ''
|
||||
@@ -171,18 +183,7 @@ def losetup_rootfs_image(image_path: str, sector_size: int) -> str:
|
||||
raise Exception(f'Failed to find loop device for {image_path}')
|
||||
partprobe(loop_device)
|
||||
|
||||
def losetup_destroy():
|
||||
logging.debug(f'Destroying loop device {loop_device} for {image_path}')
|
||||
subprocess.run(
|
||||
[
|
||||
'losetup',
|
||||
'-d',
|
||||
loop_device,
|
||||
],
|
||||
stderr=subprocess.DEVNULL,
|
||||
)
|
||||
|
||||
atexit.register(losetup_destroy)
|
||||
atexit.register(losetup_destroy, loop_device)
|
||||
|
||||
return loop_device
|
||||
|
||||
@@ -193,7 +194,7 @@ def mount_chroot(rootfs_source: str, boot_src: str, chroot: DeviceChroot):
|
||||
chroot.mount_rootfs(rootfs_source)
|
||||
assert (os.path.ismount(chroot.path))
|
||||
|
||||
os.makedirs(chroot.get_path('boot'), exist_ok=True)
|
||||
root_makedir(chroot.get_path('boot'))
|
||||
|
||||
logging.debug(f'Mounting {boot_src} at {chroot.path}/boot')
|
||||
chroot.mount(boot_src, '/boot', options=['defaults'])
|
||||
@@ -208,8 +209,7 @@ def dump_aboot(image_path: str) -> str:
|
||||
f'dump /aboot.img {path}',
|
||||
])
|
||||
if result.returncode != 0:
|
||||
logging.fatal('Failed to dump aboot.img')
|
||||
exit(1)
|
||||
raise Exception('Failed to dump aboot.img')
|
||||
return path
|
||||
|
||||
|
||||
@@ -225,8 +225,7 @@ def dump_lk2nd(image_path: str) -> str:
|
||||
f'dump /lk2nd.img {path}',
|
||||
])
|
||||
if result.returncode != 0:
|
||||
logging.fatal('Failed to dump lk2nd.img')
|
||||
exit(1)
|
||||
raise Exception('Failed to dump lk2nd.img')
|
||||
return path
|
||||
|
||||
|
||||
@@ -239,8 +238,7 @@ def dump_qhypstub(image_path: str) -> str:
|
||||
f'dump /qhypstub.bin {path}',
|
||||
])
|
||||
if result.returncode != 0:
|
||||
logging.fatal('Failed to dump qhypstub.bin')
|
||||
exit(1)
|
||||
raise Exception('Failed to dump qhypstub.bin')
|
||||
return path
|
||||
|
||||
|
||||
@@ -262,7 +260,7 @@ def partition_device(device: str):
|
||||
create_boot_partition = ['mkpart', 'primary', 'ext2', '0%', boot_partition_size]
|
||||
create_root_partition = ['mkpart', 'primary', boot_partition_size, '100%']
|
||||
enable_boot = ['set', '1', 'boot', 'on']
|
||||
result = subprocess.run([
|
||||
result = run_root_cmd([
|
||||
'parted',
|
||||
'--script',
|
||||
device,
|
||||
@@ -285,7 +283,7 @@ def create_filesystem(device: str, blocksize: int = 4096, label=None, options=[]
|
||||
'-b',
|
||||
str(blocksize),
|
||||
] + labels + [device]
|
||||
result = subprocess.run(cmd)
|
||||
result = run_root_cmd(cmd)
|
||||
if result.returncode != 0:
|
||||
raise Exception(f'Failed to create {fstype} filesystem on {device} with CMD: {cmd}')
|
||||
|
||||
@@ -326,12 +324,16 @@ def install_rootfs(
|
||||
user=user,
|
||||
)
|
||||
files = {
|
||||
'etc/pacman.conf': get_base_distro(arch).get_pacman_conf(check_space=True, extra_repos=get_kupfer_https(arch).repos),
|
||||
'etc/pacman.conf': get_base_distro(arch).get_pacman_conf(
|
||||
check_space=True,
|
||||
extra_repos=get_kupfer_https(arch).repos,
|
||||
in_chroot=True,
|
||||
),
|
||||
'etc/sudoers.d/wheel': "# allow members of group wheel to execute any command\n%wheel ALL=(ALL:ALL) ALL\n",
|
||||
'etc/hostname': profile['hostname'],
|
||||
}
|
||||
for target, content in files.items():
|
||||
with open(os.path.join(chroot.path, target.lstrip('/')), 'w') as file:
|
||||
file.write(content)
|
||||
root_write_file(os.path.join(chroot.path, target.lstrip('/')), content)
|
||||
if post_cmds:
|
||||
result = chroot.run_cmd(' && '.join(post_cmds))
|
||||
assert isinstance(result, subprocess.CompletedProcess)
|
||||
@@ -344,7 +346,8 @@ def install_rootfs(
|
||||
chroot.deactivate()
|
||||
|
||||
logging.debug(f'Unmounting rootfs at "{chroot.path}"')
|
||||
res = run(['umount', chroot.path])
|
||||
res = run_root_cmd(['umount', chroot.path])
|
||||
assert isinstance(res, CompletedProcess)
|
||||
logging.debug(f'rc: {res.returncode}')
|
||||
|
||||
|
||||
@@ -355,19 +358,42 @@ def cmd_image():
|
||||
|
||||
@cmd_image.command(name='build')
|
||||
@click.argument('profile_name', required=False)
|
||||
@click.option('--local-repos/--no-local-repos', '-l/-L', default=True, help='Whether to use local packages. Defaults to true.')
|
||||
@click.option('--build-pkgs/--no-build-pkgs', '-p/-P', default=True, help='Whether to build missing/outdated local packages. Defaults to true.')
|
||||
@click.option('--block-target', default=None, help='Override the block device file to target')
|
||||
@click.option('--skip-part-images', default=False, help='Skip creating image files for the partitions and directly work on the target block device.')
|
||||
def cmd_build(profile_name: str = None, local_repos: bool = True, build_pkgs: bool = True, block_target: str = None, skip_part_images: bool = False):
|
||||
"""Build a device image"""
|
||||
enforce_wrap()
|
||||
@click.option('--local-repos/--no-local-repos',
|
||||
'-l/-L',
|
||||
default=True,
|
||||
show_default=True,
|
||||
help='Whether to use local package repos at all or only use HTTPS repos.')
|
||||
@click.option('--build-pkgs/--no-build-pkgs',
|
||||
'-p/-P',
|
||||
default=True,
|
||||
show_default=True,
|
||||
help='Whether to build missing/outdated local packages if local repos are enabled.')
|
||||
@click.option('--no-download-pkgs',
|
||||
is_flag=True,
|
||||
default=False,
|
||||
help='Disable trying to download packages instead of building if building is enabled.')
|
||||
@click.option('--block-target', type=click.Path(), default=None, help='Override the block device file to write the final image to')
|
||||
@click.option('--skip-part-images',
|
||||
is_flag=True,
|
||||
default=False,
|
||||
help='Skip creating image files for the partitions and directly work on the target block device.')
|
||||
def cmd_build(profile_name: str = None,
|
||||
local_repos: bool = True,
|
||||
build_pkgs: bool = True,
|
||||
no_download_pkgs=False,
|
||||
block_target: str = None,
|
||||
skip_part_images: bool = False):
|
||||
"""
|
||||
Build a device image.
|
||||
|
||||
Unless overriden, required packages will be built or preferably downloaded from HTTPS repos.
|
||||
"""
|
||||
arch = get_profile_device(profile_name).arch
|
||||
wrap_if_foreign_arch(arch)
|
||||
profile: Profile = config.get_profile(profile_name)
|
||||
device, flavour = get_device_and_flavour(profile_name)
|
||||
size_extra_mb: int = int(profile["size_extra_mb"])
|
||||
|
||||
# TODO: PARSE DEVICE ARCH AND SECTOR SIZE
|
||||
arch = 'aarch64'
|
||||
sector_size = 4096
|
||||
rootfs_size_mb = FLAVOURS[flavour].get('size', 2) * 1000
|
||||
|
||||
@@ -378,12 +404,11 @@ def cmd_build(profile_name: str = None, local_repos: bool = True, build_pkgs: bo
|
||||
|
||||
if local_repos and build_pkgs:
|
||||
logging.info("Making sure all packages are built")
|
||||
repo = discover_packages()
|
||||
build_packages(repo, [p for name, p in repo.items() if name in packages], arch)
|
||||
build_packages_by_paths(packages, arch, try_download=not no_download_pkgs)
|
||||
|
||||
image_path = block_target or get_image_path(device, flavour)
|
||||
|
||||
os.makedirs(os.path.dirname(image_path), exist_ok=True)
|
||||
makedir(os.path.dirname(image_path))
|
||||
|
||||
logging.info(f'Creating new file at {image_path}')
|
||||
create_img_file(image_path, f"{rootfs_size_mb + size_extra_mb}M")
|
||||
@@ -431,13 +456,12 @@ def cmd_build(profile_name: str = None, local_repos: bool = True, build_pkgs: bo
|
||||
|
||||
@cmd_image.command(name='inspect')
|
||||
@click.option('--shell', '-s', is_flag=True)
|
||||
@click.argument('profile')
|
||||
@click.argument('profile', required=False)
|
||||
def cmd_inspect(profile: str = None, shell: bool = False):
|
||||
"""Open a shell in a device image"""
|
||||
enforce_wrap()
|
||||
arch = get_profile_device(profile).arch
|
||||
wrap_if_foreign_arch(arch)
|
||||
device, flavour = get_device_and_flavour(profile)
|
||||
# TODO: get arch from profile
|
||||
arch = 'aarch64'
|
||||
# TODO: PARSE DEVICE SECTOR SIZE
|
||||
sector_size = 4096
|
||||
chroot = get_device_chroot(device, flavour, arch)
|
||||
|
||||
@@ -4,7 +4,7 @@ import logging
|
||||
import sys
|
||||
|
||||
|
||||
def setup_logging(verbose: bool):
|
||||
def setup_logging(verbose: bool, log_setup: bool = True):
|
||||
level_colors = coloredlogs.DEFAULT_LEVEL_STYLES | {'info': {'color': 'magenta', 'bright': True}, 'debug': {'color': 'blue', 'bright': True}}
|
||||
field_colors = coloredlogs.DEFAULT_FIELD_STYLES | {'asctime': {'color': 'white', 'faint': True}}
|
||||
level = logging.DEBUG if verbose else logging.INFO
|
||||
@@ -16,7 +16,8 @@ def setup_logging(verbose: bool):
|
||||
level_styles=level_colors,
|
||||
field_styles=field_colors,
|
||||
)
|
||||
logging.debug('Logging set up.')
|
||||
if log_setup:
|
||||
logging.debug('Logging set up.')
|
||||
|
||||
|
||||
verbose_option = click.option(
|
||||
|
||||
12
main.py
12
main.py
@@ -1,11 +1,13 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import click
|
||||
from traceback import format_exc as get_trace
|
||||
import subprocess
|
||||
|
||||
from traceback import format_exc as get_trace
|
||||
from typing import Optional
|
||||
|
||||
from logger import logging, setup_logging, verbose_option
|
||||
from wrapper import nowrapper_option
|
||||
from wrapper import nowrapper_option, enforce_wrap
|
||||
from config import config, config_option, cmd_config
|
||||
from forwarding import cmd_forwarding
|
||||
from packages import cmd_packages
|
||||
@@ -23,12 +25,14 @@ from ssh import cmd_ssh
|
||||
@verbose_option
|
||||
@config_option
|
||||
@nowrapper_option
|
||||
def cli(verbose: bool = False, config_file: str = None, no_wrapper: bool = False, error_shell: bool = False):
|
||||
def cli(verbose: bool = False, config_file: str = None, wrapper_override: Optional[bool] = None, error_shell: bool = False):
|
||||
setup_logging(verbose)
|
||||
config.runtime['verbose'] = verbose
|
||||
config.runtime['no_wrap'] = no_wrapper
|
||||
config.runtime['no_wrap'] = wrapper_override is False
|
||||
config.runtime['error_shell'] = error_shell
|
||||
config.try_load_file(config_file)
|
||||
if wrapper_override:
|
||||
enforce_wrap()
|
||||
|
||||
|
||||
def main():
|
||||
|
||||
@@ -5,19 +5,25 @@ import os
|
||||
import shutil
|
||||
import subprocess
|
||||
from copy import deepcopy
|
||||
from joblib import Parallel, delayed
|
||||
from glob import glob
|
||||
from shutil import rmtree
|
||||
from urllib.error import HTTPError
|
||||
from urllib.request import urlopen
|
||||
from shutil import copyfileobj
|
||||
from typing import Iterable, Iterator, Any, Optional
|
||||
|
||||
from binfmt import register as binfmt_register
|
||||
from constants import REPOSITORIES, CROSSDIRECT_PKGS, QEMU_BINFMT_PKGS, GCC_HOSTSPECS, ARCHES, Arch, CHROOT_PATHS, MAKEPKG_CMD
|
||||
from config import config
|
||||
from exec.cmd import run_cmd, run_root_cmd
|
||||
from exec.file import makedir, remove_file
|
||||
from chroot.build import get_build_chroot, BuildChroot
|
||||
from distro.distro import PackageInfo, get_kupfer_https, get_kupfer_local
|
||||
from ssh import run_ssh_command, scp_put_files
|
||||
from wrapper import enforce_wrap
|
||||
from wrapper import enforce_wrap, check_programs_wrap, wrap_if_foreign_arch
|
||||
from utils import git
|
||||
from binfmt import register as binfmt_register
|
||||
from .pkgbuild import Pkgbuild, parse_pkgbuild
|
||||
|
||||
from .pkgbuild import discover_pkgbuilds, init_pkgbuilds, Pkgbuild
|
||||
from .device import get_profile_device
|
||||
|
||||
pacman_cmd = [
|
||||
'pacman',
|
||||
@@ -28,59 +34,36 @@ pacman_cmd = [
|
||||
]
|
||||
|
||||
|
||||
def get_makepkg_env():
|
||||
def get_makepkg_env(arch: Optional[Arch] = None):
|
||||
# has to be a function because calls to `config` must be done after config file was read
|
||||
threads = config.file['build']['threads'] or multiprocessing.cpu_count()
|
||||
return {key: val for key, val in os.environ.items() if not key.split('_', maxsplit=1)[0] in ['CI', 'GITLAB', 'FF']} | {
|
||||
env = {key: val for key, val in os.environ.items() if not key.split('_', maxsplit=1)[0] in ['CI', 'GITLAB', 'FF']}
|
||||
env |= {
|
||||
'LANG': 'C',
|
||||
'CARGO_BUILD_JOBS': str(threads),
|
||||
'MAKEFLAGS': f"-j{threads}",
|
||||
'QEMU_LD_PREFIX': '/usr/aarch64-unknown-linux-gnu',
|
||||
'HOME': '/root',
|
||||
}
|
||||
|
||||
|
||||
def clone_pkbuilds(pkgbuilds_dir: str, repo_url: str, branch: str, interactive=False, update=True):
|
||||
git_dir = os.path.join(pkgbuilds_dir, '.git')
|
||||
if not os.path.exists(git_dir):
|
||||
logging.info('Cloning branch {branch} from {repo}')
|
||||
result = git(['clone', '-b', branch, repo_url, pkgbuilds_dir])
|
||||
if result.returncode != 0:
|
||||
raise Exception('Error cloning pkgbuilds')
|
||||
else:
|
||||
result = git(['--git-dir', git_dir, 'branch', '--show-current'], capture_output=True)
|
||||
current_branch = result.stdout.decode().strip()
|
||||
if current_branch != branch:
|
||||
logging.warning(f'pkgbuilds repository is on the wrong branch: {current_branch}, requested: {branch}')
|
||||
if interactive and click.confirm('Would you like to switch branches?', default=False):
|
||||
result = git(['switch', branch], dir=pkgbuilds_dir)
|
||||
if result.returncode != 0:
|
||||
raise Exception('failed switching branches')
|
||||
if update:
|
||||
if interactive:
|
||||
if not click.confirm('Would you like to try updating the PKGBUILDs repo?'):
|
||||
return
|
||||
result = git(['pull'], pkgbuilds_dir)
|
||||
if result.returncode != 0:
|
||||
raise Exception('failed to update pkgbuilds')
|
||||
|
||||
|
||||
def init_pkgbuilds(interactive=False):
|
||||
pkgbuilds_dir = config.get_path('pkgbuilds')
|
||||
repo_url = config.file['pkgbuilds']['git_repo']
|
||||
branch = config.file['pkgbuilds']['git_branch']
|
||||
clone_pkbuilds(pkgbuilds_dir, repo_url, branch, interactive=interactive, update=False)
|
||||
native = config.runtime.arch
|
||||
assert native
|
||||
if arch and arch != native:
|
||||
env |= {'QEMU_LD_PREFIX': f'/usr/{GCC_HOSTSPECS[native][arch]}'}
|
||||
return env
|
||||
|
||||
|
||||
def init_prebuilts(arch: Arch, dir: str = None):
|
||||
"""Ensure that all `constants.REPOSITORIES` inside `dir` exist"""
|
||||
prebuilts_dir = dir if dir else config.get_package_dir(arch)
|
||||
os.makedirs(prebuilts_dir, exist_ok=True)
|
||||
prebuilts_dir = dir or config.get_package_dir(arch)
|
||||
makedir(prebuilts_dir)
|
||||
for repo in REPOSITORIES:
|
||||
os.makedirs(os.path.join(prebuilts_dir, repo), exist_ok=True)
|
||||
repo_dir = os.path.join(prebuilts_dir, repo)
|
||||
if not os.path.exists(repo_dir):
|
||||
logging.info(f"Creating local repo {repo} ({arch})")
|
||||
makedir(repo_dir)
|
||||
for ext1 in ['db', 'files']:
|
||||
for ext2 in ['', '.tar.xz']:
|
||||
if not os.path.exists(os.path.join(prebuilts_dir, repo, f'{repo}.{ext1}{ext2}')):
|
||||
result = subprocess.run(
|
||||
result = run_cmd(
|
||||
[
|
||||
'tar',
|
||||
'-czf',
|
||||
@@ -90,64 +73,31 @@ def init_prebuilts(arch: Arch, dir: str = None):
|
||||
],
|
||||
cwd=os.path.join(prebuilts_dir, repo),
|
||||
)
|
||||
assert isinstance(result, subprocess.CompletedProcess)
|
||||
if result.returncode != 0:
|
||||
logging.fatal('Failed to create prebuilt repos')
|
||||
exit(1)
|
||||
raise Exception(f'Failed to create local repo {repo}')
|
||||
|
||||
|
||||
def discover_packages(parallel: bool = True) -> dict[str, Pkgbuild]:
|
||||
pkgbuilds_dir = config.get_path('pkgbuilds')
|
||||
packages: dict[str, Pkgbuild] = {}
|
||||
paths = []
|
||||
init_pkgbuilds(interactive=False)
|
||||
for repo in REPOSITORIES:
|
||||
for dir in os.listdir(os.path.join(pkgbuilds_dir, repo)):
|
||||
paths.append(os.path.join(repo, dir))
|
||||
|
||||
native_chroot = setup_build_chroot(config.runtime['arch'], add_kupfer_repos=False)
|
||||
results = []
|
||||
|
||||
if parallel:
|
||||
chunks = (Parallel(n_jobs=multiprocessing.cpu_count() * 4)(delayed(parse_pkgbuild)(path, native_chroot) for path in paths))
|
||||
else:
|
||||
chunks = (parse_pkgbuild(path, native_chroot) for path in paths)
|
||||
|
||||
for pkglist in chunks:
|
||||
results += pkglist
|
||||
|
||||
logging.debug('Building package dictionary!')
|
||||
for package in results:
|
||||
for name in [package.name] + package.replaces:
|
||||
if name in packages:
|
||||
logging.warn(f'Overriding {packages[package.name]} with {package}')
|
||||
packages[name] = package
|
||||
|
||||
# This filters the deps to only include the ones that are provided in this repo
|
||||
for package in packages.values():
|
||||
package.local_depends = package.depends.copy()
|
||||
for dep in package.depends.copy():
|
||||
found = dep in packages
|
||||
for p in packages.values():
|
||||
if found:
|
||||
break
|
||||
for name in p.names():
|
||||
if dep == name:
|
||||
logging.debug(f'Found {p.name} that provides {dep}')
|
||||
found = True
|
||||
break
|
||||
if not found:
|
||||
logging.debug(f'Removing {dep} from dependencies')
|
||||
package.local_depends.remove(dep)
|
||||
|
||||
return packages
|
||||
|
||||
|
||||
def filter_packages_by_paths(repo: dict[str, Pkgbuild], paths: Iterable[str], allow_empty_results=True) -> Iterable[Pkgbuild]:
|
||||
def filter_packages(
|
||||
paths: Iterable[str],
|
||||
repo: Optional[dict[str, Pkgbuild]] = None,
|
||||
allow_empty_results=True,
|
||||
use_paths=True,
|
||||
use_names=True,
|
||||
) -> Iterable[Pkgbuild]:
|
||||
if not allow_empty_results and not paths:
|
||||
raise Exception("Can't search for packages: no query given")
|
||||
repo = repo or discover_pkgbuilds()
|
||||
if 'all' in paths:
|
||||
return list(repo.values())
|
||||
result = []
|
||||
for pkg in repo.values():
|
||||
if pkg.path in paths:
|
||||
comparison = set()
|
||||
if use_paths:
|
||||
comparison.add(pkg.path)
|
||||
if use_names:
|
||||
comparison.add(pkg.name)
|
||||
if comparison.intersection(paths):
|
||||
result += [pkg]
|
||||
|
||||
if not allow_empty_results and not result:
|
||||
@@ -265,24 +215,26 @@ def generate_dependency_chain(package_repo: dict[str, Pkgbuild], to_build: Itera
|
||||
|
||||
|
||||
def add_file_to_repo(file_path: str, repo_name: str, arch: Arch):
|
||||
check_programs_wrap(['repo-add'])
|
||||
repo_dir = os.path.join(config.get_package_dir(arch), repo_name)
|
||||
pacman_cache_dir = os.path.join(config.get_path('pacman'), arch)
|
||||
file_name = os.path.basename(file_path)
|
||||
target_file = os.path.join(repo_dir, file_name)
|
||||
|
||||
os.makedirs(repo_dir, exist_ok=True)
|
||||
makedir(repo_dir)
|
||||
if file_path != target_file:
|
||||
logging.debug(f'moving {file_path} to {target_file} ({repo_dir})')
|
||||
shutil.copy(
|
||||
file_path,
|
||||
repo_dir,
|
||||
)
|
||||
os.unlink(file_path)
|
||||
remove_file(file_path)
|
||||
|
||||
# clean up same name package from pacman cache
|
||||
cache_file = os.path.join(pacman_cache_dir, file_name)
|
||||
if os.path.exists(cache_file):
|
||||
os.unlink(cache_file)
|
||||
logging.debug("Removing cached package file {cache_file}")
|
||||
remove_file(cache_file)
|
||||
cmd = [
|
||||
'repo-add',
|
||||
'--remove',
|
||||
@@ -293,17 +245,26 @@ def add_file_to_repo(file_path: str, repo_name: str, arch: Arch):
|
||||
target_file,
|
||||
]
|
||||
logging.debug(f'repo: running cmd: {cmd}')
|
||||
result = subprocess.run(cmd)
|
||||
result = run_cmd(cmd)
|
||||
assert isinstance(result, subprocess.CompletedProcess)
|
||||
if result.returncode != 0:
|
||||
raise Exception(f'Failed add package {target_file} to repo {repo_name}')
|
||||
for ext in ['db', 'files']:
|
||||
file = os.path.join(repo_dir, f'{repo_name}.{ext}')
|
||||
if os.path.exists(file + '.tar.xz'):
|
||||
os.unlink(file)
|
||||
remove_file(file)
|
||||
shutil.copyfile(file + '.tar.xz', file)
|
||||
old = file + '.tar.xz.old'
|
||||
if os.path.exists(old):
|
||||
os.unlink(old)
|
||||
remove_file(old)
|
||||
|
||||
|
||||
def strip_compression_extension(filename: str):
|
||||
for ext in ['zst', 'xz', 'gz', 'bz2']:
|
||||
if filename.endswith(f'.pkg.tar.{ext}'):
|
||||
return filename[:-(len(ext) + 1)]
|
||||
logging.warning(f"file {filename} matches no known package extension")
|
||||
return filename
|
||||
|
||||
|
||||
def add_package_to_repo(package: Pkgbuild, arch: Arch):
|
||||
@@ -312,15 +273,66 @@ def add_package_to_repo(package: Pkgbuild, arch: Arch):
|
||||
|
||||
files = []
|
||||
for file in os.listdir(pkgbuild_dir):
|
||||
stripped_name = strip_compression_extension(file)
|
||||
# Forced extension by makepkg.conf
|
||||
if file.endswith('.pkg.tar.xz') or file.endswith('.pkg.tar.zst'):
|
||||
repo_dir = os.path.join(config.get_package_dir(arch), package.repo)
|
||||
files.append(os.path.join(repo_dir, file))
|
||||
add_file_to_repo(os.path.join(pkgbuild_dir, file), package.repo, arch)
|
||||
if not stripped_name.endswith('.pkg.tar'):
|
||||
continue
|
||||
|
||||
repo_file = os.path.join(config.get_package_dir(arch), package.repo, file)
|
||||
files.append(repo_file)
|
||||
add_file_to_repo(os.path.join(pkgbuild_dir, file), package.repo, arch)
|
||||
|
||||
# copy any-arch packages to other repos as well
|
||||
if stripped_name.endswith('any.pkg.tar'):
|
||||
for repo_arch in ARCHES:
|
||||
if repo_arch == arch:
|
||||
continue
|
||||
copy_target = os.path.join(config.get_package_dir(repo_arch), package.repo, file)
|
||||
shutil.copy(repo_file, copy_target)
|
||||
add_file_to_repo(copy_target, package.repo, repo_arch)
|
||||
|
||||
return files
|
||||
|
||||
|
||||
def check_package_version_built(package: Pkgbuild, arch: Arch) -> bool:
|
||||
def try_download_package(dest_file_path: str, package: Pkgbuild, arch: Arch) -> bool:
|
||||
logging.debug(f"checking if we can download {package.name}")
|
||||
filename = os.path.basename(dest_file_path)
|
||||
pkgname = package.name
|
||||
repo_name = package.repo
|
||||
repos = get_kupfer_https(arch, scan=True).repos
|
||||
if repo_name not in repos:
|
||||
logging.warning(f"Repository {repo_name} is not a known HTTPS repo")
|
||||
return False
|
||||
repo = repos[repo_name]
|
||||
if pkgname not in repo.packages:
|
||||
logging.warning(f"Package {pkgname} not found in remote repos, building instead.")
|
||||
return False
|
||||
repo_pkg: PackageInfo = repo.packages[pkgname]
|
||||
if repo_pkg.version != package.version:
|
||||
logging.debug(f"Package {pkgname} versions differ: local: {package.version}, remote: {repo_pkg.version}. Building instead.")
|
||||
return False
|
||||
if repo_pkg.filename != filename:
|
||||
logging.debug(f"package filenames don't match: local: {filename}, remote: {repo_pkg.filename}")
|
||||
return False
|
||||
url = f"{repo.resolve_url()}/{filename}"
|
||||
assert url
|
||||
try:
|
||||
logging.info(f"Trying to download package {url}")
|
||||
makedir(os.path.dirname(dest_file_path))
|
||||
with urlopen(url) as fsrc, open(dest_file_path, 'wb') as fdst:
|
||||
copyfileobj(fsrc, fdst)
|
||||
logging.info(f"{filename} downloaded from repos")
|
||||
return True
|
||||
except HTTPError as e:
|
||||
if e.code == 404:
|
||||
logging.debug(f"remote package {filename} nonexistant on server: {url}")
|
||||
else:
|
||||
logging.error(f"remote package {filename} failed to download ({e.code}): {url}: {e}")
|
||||
return False
|
||||
|
||||
|
||||
def check_package_version_built(package: Pkgbuild, arch: Arch, try_download: bool = False) -> bool:
|
||||
enforce_wrap()
|
||||
native_chroot = setup_build_chroot(config.runtime['arch'])
|
||||
config_path = '/' + native_chroot.write_makepkg_conf(
|
||||
target_arch=arch,
|
||||
@@ -343,16 +355,49 @@ def check_package_version_built(package: Pkgbuild, arch: Arch) -> bool:
|
||||
if result.returncode != 0:
|
||||
raise Exception(f'Failed to get package list for {package.path}:' + '\n' + result.stdout.decode() + '\n' + result.stderr.decode())
|
||||
|
||||
missing = False
|
||||
missing = True
|
||||
for line in result.stdout.decode('utf-8').split('\n'):
|
||||
if line != "":
|
||||
file = os.path.join(config.get_package_dir(arch), package.repo, os.path.basename(line))
|
||||
logging.debug(f'Checking if {file} is built')
|
||||
if os.path.exists(file):
|
||||
add_file_to_repo(file, repo_name=package.repo, arch=arch)
|
||||
if not line:
|
||||
continue
|
||||
basename = os.path.basename(line)
|
||||
file = os.path.join(config.get_package_dir(arch), package.repo, basename)
|
||||
filename_stripped = strip_compression_extension(file)
|
||||
logging.debug(f'Checking if {file} is built')
|
||||
if not filename_stripped.endswith('.pkg.tar'):
|
||||
logging.debug(f'skipping unknown file extension {basename}')
|
||||
continue
|
||||
if os.path.exists(file) or (try_download and try_download_package(file, package, arch)):
|
||||
missing = False
|
||||
add_file_to_repo(file, repo_name=package.repo, arch=arch)
|
||||
# copy arch=(any) packages to all arches
|
||||
if filename_stripped.endswith('any.pkg.tar'):
|
||||
logging.debug("any-arch pkg detected")
|
||||
target_repo_file = os.path.join(config.get_package_dir(arch), package.repo, basename)
|
||||
if os.path.exists(target_repo_file):
|
||||
missing = False
|
||||
else:
|
||||
missing = True
|
||||
# we have to check if another arch's repo holds our any-arch pkg
|
||||
for repo_arch in ARCHES:
|
||||
if repo_arch == arch:
|
||||
continue # we already checked that
|
||||
other_repo_path = os.path.join(config.get_package_dir(repo_arch), package.repo, basename)
|
||||
if os.path.exists(other_repo_path):
|
||||
missing = False
|
||||
logging.info(f"package {file} found in {repo_arch} repos, copying to {arch}")
|
||||
shutil.copyfile(other_repo_path, target_repo_file)
|
||||
add_file_to_repo(target_repo_file, package.repo, arch)
|
||||
break
|
||||
|
||||
if os.path.exists(target_repo_file):
|
||||
# copy to other arches if they don't have it
|
||||
for repo_arch in ARCHES:
|
||||
if repo_arch == arch:
|
||||
continue # we already have that
|
||||
copy_target = os.path.join(config.get_package_dir(repo_arch), package.repo, basename)
|
||||
if not os.path.exists(copy_target):
|
||||
logging.info(f"copying to {copy_target}")
|
||||
shutil.copyfile(target_repo_file, copy_target)
|
||||
add_file_to_repo(copy_target, package.repo, repo_arch)
|
||||
return not missing
|
||||
|
||||
|
||||
@@ -362,10 +407,13 @@ def setup_build_chroot(
|
||||
add_kupfer_repos: bool = True,
|
||||
clean_chroot: bool = False,
|
||||
) -> BuildChroot:
|
||||
if arch != config.runtime['arch']:
|
||||
wrap_if_foreign_arch(arch)
|
||||
build_enable_qemu_binfmt(arch)
|
||||
init_prebuilts(arch)
|
||||
chroot = get_build_chroot(arch, add_kupfer_repos=add_kupfer_repos)
|
||||
chroot.mount_packages()
|
||||
logging.info(f'Initializing {arch} build chroot')
|
||||
logging.debug(f'Initializing {arch} build chroot')
|
||||
chroot.initialize(reset=clean_chroot)
|
||||
chroot.write_pacman_conf() # in case it was initialized with different repos
|
||||
chroot.activate()
|
||||
@@ -376,8 +424,16 @@ def setup_build_chroot(
|
||||
return chroot
|
||||
|
||||
|
||||
def setup_sources(package: Pkgbuild, chroot: BuildChroot, makepkg_conf_path='/etc/makepkg.conf', pkgbuilds_dir: str = None):
|
||||
pkgbuilds_dir = pkgbuilds_dir if pkgbuilds_dir else CHROOT_PATHS['pkgbuilds']
|
||||
def setup_git_insecure_paths(chroot: BuildChroot):
|
||||
chroot.run_cmd(
|
||||
["git", "config", "--global", "--add", "safe.directory", "'*'"],
|
||||
inner_env={
|
||||
'HOME': '/root'
|
||||
},
|
||||
).check_returncode() # type: ignore[union-attr]
|
||||
|
||||
|
||||
def setup_sources(package: Pkgbuild, chroot: BuildChroot, makepkg_conf_path='/etc/makepkg.conf'):
|
||||
makepkg_setup_args = [
|
||||
'--config',
|
||||
makepkg_conf_path,
|
||||
@@ -388,7 +444,12 @@ def setup_sources(package: Pkgbuild, chroot: BuildChroot, makepkg_conf_path='/et
|
||||
]
|
||||
|
||||
logging.info(f'Setting up sources for {package.path} in {chroot.name}')
|
||||
result = chroot.run_cmd(MAKEPKG_CMD + makepkg_setup_args, cwd=os.path.join(CHROOT_PATHS['pkgbuilds'], package.path))
|
||||
setup_git_insecure_paths(chroot)
|
||||
result = chroot.run_cmd(
|
||||
MAKEPKG_CMD + makepkg_setup_args,
|
||||
cwd=os.path.join(CHROOT_PATHS['pkgbuilds'], package.path),
|
||||
inner_env=get_makepkg_env(chroot.arch),
|
||||
)
|
||||
assert isinstance(result, subprocess.CompletedProcess)
|
||||
if result.returncode != 0:
|
||||
raise Exception(f'Failed to check sources for {package.path}')
|
||||
@@ -426,7 +487,7 @@ def build_package(
|
||||
logging.info(f'Cross-compiling {package.path}')
|
||||
build_root = native_chroot
|
||||
makepkg_compile_opts += ['--nodeps']
|
||||
env = deepcopy(get_makepkg_env())
|
||||
env = deepcopy(get_makepkg_env(arch))
|
||||
if enable_ccache:
|
||||
env['PATH'] = f"/usr/lib/ccache:{env['PATH']}"
|
||||
logging.info('Setting up dependencies for cross-compilation')
|
||||
@@ -445,7 +506,7 @@ def build_package(
|
||||
logging.info(f'Host-compiling {package.path}')
|
||||
build_root = target_chroot
|
||||
makepkg_compile_opts += ['--syncdeps']
|
||||
env = deepcopy(get_makepkg_env())
|
||||
env = deepcopy(get_makepkg_env(arch))
|
||||
if foreign_arch and enable_crossdirect and package.name not in CROSSDIRECT_PKGS:
|
||||
env['PATH'] = f"/native/usr/lib/crossdirect/{arch}:{env['PATH']}"
|
||||
target_chroot.mount_crossdirect(native_chroot)
|
||||
@@ -460,6 +521,7 @@ def build_package(
|
||||
if failed_deps:
|
||||
raise Exception(f'Dependencies failed to install: {failed_deps}')
|
||||
|
||||
setup_git_insecure_paths(build_root)
|
||||
makepkg_conf_absolute = os.path.join('/', makepkg_conf_path)
|
||||
setup_sources(package, build_root, makepkg_conf_path=makepkg_conf_absolute)
|
||||
|
||||
@@ -471,16 +533,42 @@ def build_package(
|
||||
raise Exception(f'Failed to compile package {package.path}')
|
||||
|
||||
|
||||
def get_unbuilt_package_levels(repo: dict[str, Pkgbuild], packages: Iterable[Pkgbuild], arch: Arch, force: bool = False) -> list[set[Pkgbuild]]:
|
||||
package_levels = generate_dependency_chain(repo, packages)
|
||||
def get_dependants(
|
||||
repo: dict[str, Pkgbuild],
|
||||
packages: Iterable[Pkgbuild],
|
||||
recursive: bool = True,
|
||||
) -> set[Pkgbuild]:
|
||||
names = set([pkg.name for pkg in packages])
|
||||
to_add = set[Pkgbuild]()
|
||||
for pkg in repo.values():
|
||||
if set.intersection(names, set(pkg.depends)):
|
||||
to_add.add(pkg)
|
||||
if recursive and to_add:
|
||||
to_add.update(get_dependants(repo, to_add))
|
||||
return to_add
|
||||
|
||||
|
||||
def get_unbuilt_package_levels(
|
||||
packages: Iterable[Pkgbuild],
|
||||
arch: Arch,
|
||||
repo: Optional[dict[str, Pkgbuild]] = None,
|
||||
force: bool = False,
|
||||
rebuild_dependants: bool = False,
|
||||
try_download: bool = False,
|
||||
) -> list[set[Pkgbuild]]:
|
||||
repo = repo or discover_pkgbuilds()
|
||||
dependants = set[Pkgbuild]()
|
||||
if rebuild_dependants:
|
||||
dependants = get_dependants(repo, packages)
|
||||
package_levels = generate_dependency_chain(repo, set(packages).union(dependants))
|
||||
build_names = set[str]()
|
||||
build_levels = list[set[Pkgbuild]]()
|
||||
i = 0
|
||||
for level_packages in package_levels:
|
||||
level = set[Pkgbuild]()
|
||||
for package in level_packages:
|
||||
if ((not check_package_version_built(package, arch)) or set.intersection(set(package.depends), set(build_names)) or
|
||||
(force and package in packages)):
|
||||
if ((force and package in packages) or (rebuild_dependants and package in dependants) or
|
||||
not check_package_version_built(package, arch, try_download)):
|
||||
level.add(package)
|
||||
build_names.update(package.names())
|
||||
if level:
|
||||
@@ -491,16 +579,26 @@ def get_unbuilt_package_levels(repo: dict[str, Pkgbuild], packages: Iterable[Pkg
|
||||
|
||||
|
||||
def build_packages(
|
||||
repo: dict[str, Pkgbuild],
|
||||
packages: Iterable[Pkgbuild],
|
||||
arch: Arch,
|
||||
repo: Optional[dict[str, Pkgbuild]] = None,
|
||||
force: bool = False,
|
||||
rebuild_dependants: bool = False,
|
||||
try_download: bool = False,
|
||||
enable_crosscompile: bool = True,
|
||||
enable_crossdirect: bool = True,
|
||||
enable_ccache: bool = True,
|
||||
clean_chroot: bool = False,
|
||||
):
|
||||
build_levels = get_unbuilt_package_levels(repo, packages, arch, force=force)
|
||||
init_prebuilts(arch)
|
||||
build_levels = get_unbuilt_package_levels(
|
||||
packages,
|
||||
arch,
|
||||
repo=repo,
|
||||
force=force,
|
||||
rebuild_dependants=rebuild_dependants,
|
||||
try_download=try_download,
|
||||
)
|
||||
|
||||
if not build_levels:
|
||||
logging.info('Everything built already')
|
||||
@@ -525,8 +623,10 @@ def build_packages(
|
||||
def build_packages_by_paths(
|
||||
paths: Iterable[str],
|
||||
arch: Arch,
|
||||
repo: dict[str, Pkgbuild],
|
||||
repo: Optional[dict[str, Pkgbuild]] = None,
|
||||
force=False,
|
||||
rebuild_dependants: bool = False,
|
||||
try_download: bool = False,
|
||||
enable_crosscompile: bool = True,
|
||||
enable_crossdirect: bool = True,
|
||||
enable_ccache: bool = True,
|
||||
@@ -537,12 +637,14 @@ def build_packages_by_paths(
|
||||
|
||||
for _arch in set([arch, config.runtime['arch']]):
|
||||
init_prebuilts(_arch)
|
||||
packages = filter_packages_by_paths(repo, paths, allow_empty_results=False)
|
||||
packages = filter_packages(paths, repo=repo, allow_empty_results=False)
|
||||
return build_packages(
|
||||
repo,
|
||||
packages,
|
||||
arch,
|
||||
repo=repo,
|
||||
force=force,
|
||||
rebuild_dependants=rebuild_dependants,
|
||||
try_download=try_download,
|
||||
enable_crosscompile=enable_crosscompile,
|
||||
enable_crossdirect=enable_crossdirect,
|
||||
enable_ccache=enable_ccache,
|
||||
@@ -550,27 +652,35 @@ def build_packages_by_paths(
|
||||
)
|
||||
|
||||
|
||||
def build_enable_qemu_binfmt(arch: Arch, repo: dict[str, Pkgbuild] = None):
|
||||
_qemu_enabled: dict[Arch, bool] = {arch: False for arch in ARCHES}
|
||||
|
||||
|
||||
def build_enable_qemu_binfmt(arch: Arch, repo: Optional[dict[str, Pkgbuild]] = None, lazy: bool = True):
|
||||
if arch not in ARCHES:
|
||||
raise Exception(f'Unknown architecture "{arch}". Choices: {", ".join(ARCHES)}')
|
||||
logging.info('Installing qemu-user (building if necessary)')
|
||||
enforce_wrap()
|
||||
if not repo:
|
||||
repo = discover_packages()
|
||||
if lazy and _qemu_enabled[arch]:
|
||||
return
|
||||
native = config.runtime['arch']
|
||||
if arch == native:
|
||||
return
|
||||
wrap_if_foreign_arch(arch)
|
||||
# build qemu-user, binfmt, crossdirect
|
||||
chroot = setup_build_chroot(native)
|
||||
build_packages_by_paths(
|
||||
['cross/' + pkg for pkg in CROSSDIRECT_PKGS],
|
||||
CROSSDIRECT_PKGS,
|
||||
native,
|
||||
repo,
|
||||
repo=repo,
|
||||
try_download=True,
|
||||
enable_crosscompile=False,
|
||||
enable_crossdirect=False,
|
||||
enable_ccache=False,
|
||||
)
|
||||
subprocess.run(['pacman', '-Syy', '--noconfirm', '--needed', '--config', os.path.join(chroot.path, 'etc/pacman.conf')] + QEMU_BINFMT_PKGS)
|
||||
crossrepo = get_kupfer_local(native, in_chroot=False, scan=True).repos['cross'].packages
|
||||
pkgfiles = [os.path.join(crossrepo[pkg].resolved_url.split('file://')[1]) for pkg in QEMU_BINFMT_PKGS] # type: ignore
|
||||
run_root_cmd(['pacman', '-U', '--noconfirm', '--needed'] + pkgfiles)
|
||||
if arch != native:
|
||||
binfmt_register(arch)
|
||||
_qemu_enabled[arch] = True
|
||||
|
||||
|
||||
@click.group(name='packages')
|
||||
@@ -582,42 +692,49 @@ def cmd_packages():
|
||||
@click.option('--non-interactive', is_flag=True)
|
||||
def cmd_update(non_interactive: bool = False):
|
||||
"""Update PKGBUILDs git repo"""
|
||||
enforce_wrap()
|
||||
init_pkgbuilds(interactive=not non_interactive)
|
||||
|
||||
|
||||
@cmd_packages.command(name='build')
|
||||
@click.option('--force', is_flag=True, default=False, help='Rebuild even if package is already built')
|
||||
@click.option('--arch', default=None, help="The CPU architecture to build for")
|
||||
@click.option('--arch', default=None, required=False, type=click.Choice(ARCHES), help="The CPU architecture to build for")
|
||||
@click.option('--rebuild-dependants', is_flag=True, default=False, help='Rebuild packages that depend on packages that will be [re]built')
|
||||
@click.option('--no-download', is_flag=True, default=False, help="Don't try downloading packages from online repos before building")
|
||||
@click.argument('paths', nargs=-1)
|
||||
def cmd_build(paths: list[str], force=False, arch=None):
|
||||
def cmd_build(paths: list[str], force=False, arch: Optional[Arch] = None, rebuild_dependants: bool = False, no_download: bool = False):
|
||||
"""
|
||||
Build packages by paths.
|
||||
Build packages (and dependencies) by paths as required.
|
||||
|
||||
The paths are specified relative to the PKGBUILDs dir, eg. "cross/crossdirect".
|
||||
|
||||
Multiple paths may be specified as separate arguments.
|
||||
|
||||
Packages that aren't built already will be downloaded from HTTPS repos unless --no-download is passed,
|
||||
if an exact version match exists on the server.
|
||||
"""
|
||||
build(paths, force, arch)
|
||||
build(paths, force, arch=arch, rebuild_dependants=rebuild_dependants, try_download=not no_download)
|
||||
|
||||
|
||||
def build(paths: Iterable[str], force: bool, arch: Optional[Arch]):
|
||||
def build(
|
||||
paths: Iterable[str],
|
||||
force: bool,
|
||||
arch: Optional[Arch] = None,
|
||||
rebuild_dependants: bool = False,
|
||||
try_download: bool = False,
|
||||
):
|
||||
# TODO: arch = config.get_profile()...
|
||||
arch = arch or 'aarch64'
|
||||
arch = arch or get_profile_device(hint_or_set_arch=True).arch
|
||||
|
||||
if arch not in ARCHES:
|
||||
raise Exception(f'Unknown architecture "{arch}". Choices: {", ".join(ARCHES)}')
|
||||
enforce_wrap()
|
||||
config.enforce_config_loaded()
|
||||
repo: dict[str, Pkgbuild] = discover_packages()
|
||||
if arch != config.runtime['arch']:
|
||||
build_enable_qemu_binfmt(arch, repo=repo)
|
||||
|
||||
return build_packages_by_paths(
|
||||
paths,
|
||||
arch,
|
||||
repo,
|
||||
force=force,
|
||||
rebuild_dependants=rebuild_dependants,
|
||||
try_download=try_download,
|
||||
enable_crosscompile=config.file['build']['crosscompile'],
|
||||
enable_crossdirect=config.file['build']['crossdirect'],
|
||||
enable_ccache=config.file['build']['ccache'],
|
||||
@@ -627,19 +744,32 @@ def build(paths: Iterable[str], force: bool, arch: Optional[Arch]):
|
||||
|
||||
@cmd_packages.command(name='sideload')
|
||||
@click.argument('paths', nargs=-1)
|
||||
def cmd_sideload(paths: Iterable[str]):
|
||||
@click.option('--arch', default=None, required=False, type=click.Choice(ARCHES), help="The CPU architecture to build for")
|
||||
@click.option('-B', '--no-build', is_flag=True, default=False, help="Don't try to build packages, just copy and install")
|
||||
def cmd_sideload(paths: Iterable[str], arch: Optional[Arch] = None, no_build: bool = False):
|
||||
"""Build packages, copy to the device via SSH and install them"""
|
||||
files = build(paths, True, None)
|
||||
scp_put_files(files, '/tmp')
|
||||
arch = arch or get_profile_device(hint_or_set_arch=True).arch
|
||||
if not no_build:
|
||||
build(paths, False, arch=arch, try_download=True)
|
||||
files = [
|
||||
pkg.resolved_url.split('file://')[1]
|
||||
for pkg in get_kupfer_local(arch=arch, scan=True, in_chroot=False).get_packages().values()
|
||||
if pkg.resolved_url and pkg.name in paths
|
||||
]
|
||||
logging.debug(f"Sideload: Found package files: {files}")
|
||||
if not files:
|
||||
logging.fatal("No packages matched")
|
||||
return
|
||||
scp_put_files(files, '/tmp').check_returncode()
|
||||
run_ssh_command([
|
||||
'sudo',
|
||||
'-S',
|
||||
'pacman',
|
||||
'-U',
|
||||
] + [os.path.join('/tmp', os.path.basename(file)) for file in files] + [
|
||||
'--noconfirm',
|
||||
'--overwrite=*',
|
||||
])
|
||||
'--overwrite=\\*',
|
||||
],
|
||||
alloc_tty=True).check_returncode()
|
||||
|
||||
|
||||
@cmd_packages.command(name='clean')
|
||||
@@ -648,7 +778,6 @@ def cmd_sideload(paths: Iterable[str]):
|
||||
@click.argument('what', type=click.Choice(['all', 'src', 'pkg']), nargs=-1)
|
||||
def cmd_clean(what: Iterable[str] = ['all'], force: bool = False, noop: bool = False):
|
||||
"""Remove files and directories not tracked in PKGBUILDs.git. Passing in an empty `what` defaults it to `['all']`"""
|
||||
enforce_wrap()
|
||||
if noop:
|
||||
logging.debug('Running in noop mode!')
|
||||
if force:
|
||||
@@ -657,6 +786,7 @@ def cmd_clean(what: Iterable[str] = ['all'], force: bool = False, noop: bool = F
|
||||
logging.debug(f'Clearing {what} from PKGBUILDs')
|
||||
pkgbuilds = config.get_path('pkgbuilds')
|
||||
if 'all' in what:
|
||||
check_programs_wrap(['git'])
|
||||
warning = "Really reset PKGBUILDs to git state completely?\nThis will erase any untracked changes to your PKGBUILDs directory."
|
||||
if not (noop or force or click.confirm(warning)):
|
||||
return
|
||||
@@ -688,15 +818,15 @@ def cmd_clean(what: Iterable[str] = ['all'], force: bool = False, noop: bool = F
|
||||
|
||||
for dir in dirs:
|
||||
if not noop:
|
||||
rmtree(dir)
|
||||
remove_file(dir, recursive=True)
|
||||
|
||||
|
||||
@cmd_packages.command(name='list')
|
||||
def cmd_list():
|
||||
enforce_wrap()
|
||||
logging.info('Discovering packages.')
|
||||
packages = discover_packages()
|
||||
logging.info('Done! Pkgbuilds:')
|
||||
packages = discover_pkgbuilds()
|
||||
logging.info(f'Done! {len(packages)} Pkgbuilds:')
|
||||
for p in set(packages.values()):
|
||||
print(
|
||||
f'name: {p.name}; ver: {p.version}; provides: {p.provides}; replaces: {p.replaces}; local_depends: {p.local_depends}; depends: {p.depends}'
|
||||
@@ -707,9 +837,16 @@ def cmd_list():
|
||||
@click.argument('paths', nargs=-1)
|
||||
def cmd_check(paths):
|
||||
"""Check that specified PKGBUILDs are formatted correctly"""
|
||||
enforce_wrap()
|
||||
|
||||
def check_quoteworthy(s: str) -> bool:
|
||||
quoteworthy = ['"', "'", "$", " ", ";", "&", "<", ">", "*", "?"]
|
||||
for symbol in quoteworthy:
|
||||
if symbol in s:
|
||||
return True
|
||||
return False
|
||||
|
||||
paths = list(paths)
|
||||
packages = filter_packages_by_paths(discover_packages(), paths, allow_empty_results=False)
|
||||
packages = filter_packages(paths, allow_empty_results=False)
|
||||
|
||||
for package in packages:
|
||||
name = package.name
|
||||
@@ -814,11 +951,11 @@ def cmd_check(paths):
|
||||
formatted = False
|
||||
reason = 'Multiline variables should be indented with 4 spaces'
|
||||
|
||||
if '"' in line and '$' not in line and ' ' not in line and ';' not in line:
|
||||
if '"' in line and not check_quoteworthy(line):
|
||||
formatted = False
|
||||
reason = 'Found literal " although no "$", " " or ";" was found in the line justifying the usage of a literal "'
|
||||
reason = 'Found literal " although no special character was found in the line to justify the usage of a literal "'
|
||||
|
||||
if '\'' in line:
|
||||
if "'" in line and not '"' in line:
|
||||
formatted = False
|
||||
reason = 'Found literal \' although either a literal " or no qoutes should be used'
|
||||
|
||||
|
||||
117
packages/device.py
Normal file
117
packages/device.py
Normal file
@@ -0,0 +1,117 @@
|
||||
import logging
|
||||
import os
|
||||
|
||||
from typing import Optional
|
||||
|
||||
from config import config
|
||||
from constants import Arch, ARCHES
|
||||
from config.scheme import DataClass, munchclass
|
||||
from .pkgbuild import discover_pkgbuilds, get_pkgbuild_by_path, _pkgbuilds_cache, Pkgbuild
|
||||
|
||||
DEVICE_DEPRECATIONS = {
|
||||
"oneplus-enchilada": "sdm845-oneplus-enchilada",
|
||||
"oneplus-fajita": "sdm845-oneplus-fajita",
|
||||
"xiaomi-beryllium-ebbg": "sdm845-sdm845-xiaomi-beryllium-ebbg",
|
||||
"xiaomi-beryllium-tianma": "sdm845-sdm845-xiaomi-tianma",
|
||||
"bq-paella": "msm8916-bq-paella",
|
||||
}
|
||||
|
||||
|
||||
@munchclass()
|
||||
class Device(DataClass):
|
||||
name: str
|
||||
arch: Arch
|
||||
package: Pkgbuild
|
||||
|
||||
def parse_deviceinfo(self):
|
||||
pass
|
||||
|
||||
|
||||
def check_devicepkg_name(name: str, log_level: Optional[int] = None):
|
||||
valid = True
|
||||
if not name.startswith('device-'):
|
||||
valid = False
|
||||
if log_level is not None:
|
||||
logging.log(log_level, f'invalid device package name "{name}": doesn\'t start with "device-"')
|
||||
if name.endswith('-common'):
|
||||
valid = False
|
||||
if log_level is not None:
|
||||
logging.log(log_level, f'invalid device package name "{name}": ends with "-common"')
|
||||
return valid
|
||||
|
||||
|
||||
def parse_device_pkg(pkgbuild: Pkgbuild) -> Device:
|
||||
if len(pkgbuild.arches) != 1:
|
||||
raise Exception(f"{pkgbuild.name}: Device package must have exactly one arch, but has {pkgbuild.arches}")
|
||||
arch = pkgbuild.arches[0]
|
||||
if arch == 'any' or arch not in ARCHES:
|
||||
raise Exception(f'unknown arch for device package: {arch}')
|
||||
if pkgbuild.repo != 'device':
|
||||
logging.warning(f'device package {pkgbuild.name} is in unexpected repo "{pkgbuild.repo}", expected "device"')
|
||||
name = pkgbuild.name
|
||||
prefix = 'device-'
|
||||
if name.startswith(prefix):
|
||||
name = name[len(prefix):]
|
||||
return Device(name=name, arch=arch, package=pkgbuild)
|
||||
|
||||
|
||||
_device_cache: dict[str, Device] = {}
|
||||
_device_cache_populated: bool = False
|
||||
|
||||
|
||||
def get_devices(pkgbuilds: Optional[dict[str, Pkgbuild]] = None, lazy: bool = True) -> dict[str, Device]:
|
||||
global _device_cache, _device_cache_populated
|
||||
use_cache = _device_cache_populated and lazy
|
||||
if not use_cache:
|
||||
if not pkgbuilds:
|
||||
pkgbuilds = discover_pkgbuilds(lazy=lazy)
|
||||
_device_cache.clear()
|
||||
for pkgbuild in pkgbuilds.values():
|
||||
if not (pkgbuild.repo == 'device' and check_devicepkg_name(pkgbuild.name, log_level=None)):
|
||||
continue
|
||||
dev = parse_device_pkg(pkgbuild)
|
||||
_device_cache[dev.name] = dev
|
||||
_device_cache_populated = True
|
||||
return _device_cache.copy()
|
||||
|
||||
|
||||
def get_device(name: str, pkgbuilds: Optional[dict[str, Pkgbuild]] = None, lazy: bool = True, scan_all=False) -> Device:
|
||||
global _device_cache, _device_cache_populated
|
||||
assert lazy or pkgbuilds
|
||||
if name in DEVICE_DEPRECATIONS:
|
||||
warning = f"Deprecated device {name}"
|
||||
replacement = DEVICE_DEPRECATIONS[name]
|
||||
if replacement:
|
||||
warning += (f': Device has been renamed to {replacement}! Please adjust your profile config!\n'
|
||||
'This will become an error in a future version!')
|
||||
name = replacement
|
||||
logging.warning(warning)
|
||||
if lazy and name in _device_cache:
|
||||
return _device_cache[name]
|
||||
if scan_all:
|
||||
devices = get_devices(pkgbuilds=pkgbuilds, lazy=lazy)
|
||||
if name not in devices:
|
||||
raise Exception(f'Unknown device {name}!')
|
||||
return devices[name]
|
||||
else:
|
||||
pkgname = f'device-{name}'
|
||||
if pkgbuilds:
|
||||
if pkgname not in pkgbuilds:
|
||||
raise Exception(f'Unknown device {name}!')
|
||||
pkgbuild = pkgbuilds[pkgname]
|
||||
else:
|
||||
if lazy and pkgname in _pkgbuilds_cache:
|
||||
pkgbuild = _pkgbuilds_cache[pkgname]
|
||||
else:
|
||||
relative_path = os.path.join('device', pkgname)
|
||||
assert os.path.exists(os.path.join(config.get_path('pkgbuilds'), relative_path))
|
||||
pkgbuild = [p for p in get_pkgbuild_by_path(relative_path, lazy=lazy, _config=config) if p.name == pkgname][0]
|
||||
device = parse_device_pkg(pkgbuild)
|
||||
if lazy:
|
||||
_device_cache[name] = device
|
||||
return device
|
||||
|
||||
|
||||
def get_profile_device(profile_name: Optional[str] = None, hint_or_set_arch: bool = False):
|
||||
profile = config.enforce_profile_device_set(profile_name, hint_or_set_arch=hint_or_set_arch)
|
||||
return get_device(profile.device)
|
||||
@@ -1,55 +1,206 @@
|
||||
from copy import deepcopy
|
||||
from __future__ import annotations
|
||||
|
||||
import click
|
||||
import logging
|
||||
import multiprocessing
|
||||
import os
|
||||
import subprocess
|
||||
|
||||
from chroot import Chroot
|
||||
from constants import CHROOT_PATHS, MAKEPKG_CMD
|
||||
from joblib import Parallel, delayed
|
||||
from typing import Optional
|
||||
|
||||
from config import config, ConfigStateHolder
|
||||
from constants import REPOSITORIES
|
||||
from exec.cmd import run_cmd
|
||||
from constants import Arch, MAKEPKG_CMD
|
||||
from distro.package import PackageInfo
|
||||
from logger import setup_logging
|
||||
from utils import git
|
||||
from wrapper import check_programs_wrap
|
||||
|
||||
|
||||
def clone_pkbuilds(pkgbuilds_dir: str, repo_url: str, branch: str, interactive=False, update=True):
|
||||
check_programs_wrap(['git'])
|
||||
git_dir = os.path.join(pkgbuilds_dir, '.git')
|
||||
if not os.path.exists(git_dir):
|
||||
logging.info('Cloning branch {branch} from {repo}')
|
||||
result = git(['clone', '-b', branch, repo_url, pkgbuilds_dir])
|
||||
if result.returncode != 0:
|
||||
raise Exception('Error cloning pkgbuilds')
|
||||
else:
|
||||
result = git(['--git-dir', git_dir, 'branch', '--show-current'], capture_output=True)
|
||||
current_branch = result.stdout.decode().strip()
|
||||
if current_branch != branch:
|
||||
logging.warning(f'pkgbuilds repository is on the wrong branch: {current_branch}, requested: {branch}')
|
||||
if interactive and click.confirm('Would you like to switch branches?', default=False):
|
||||
result = git(['switch', branch], dir=pkgbuilds_dir)
|
||||
if result.returncode != 0:
|
||||
raise Exception('failed switching branches')
|
||||
if update:
|
||||
if interactive:
|
||||
if not click.confirm('Would you like to try updating the PKGBUILDs repo?'):
|
||||
return
|
||||
result = git(['pull'], pkgbuilds_dir)
|
||||
if result.returncode != 0:
|
||||
raise Exception('failed to update pkgbuilds')
|
||||
|
||||
|
||||
def init_pkgbuilds(interactive=False):
|
||||
pkgbuilds_dir = config.get_path('pkgbuilds')
|
||||
repo_url = config.file['pkgbuilds']['git_repo']
|
||||
branch = config.file['pkgbuilds']['git_branch']
|
||||
clone_pkbuilds(pkgbuilds_dir, repo_url, branch, interactive=interactive, update=False)
|
||||
|
||||
|
||||
class Pkgbuild(PackageInfo):
|
||||
name: str
|
||||
version: str
|
||||
arches: list[Arch]
|
||||
depends: list[str]
|
||||
provides: list[str]
|
||||
replaces: list[str]
|
||||
local_depends: list[str]
|
||||
repo = ''
|
||||
mode = ''
|
||||
path = ''
|
||||
pkgver = ''
|
||||
pkgrel = ''
|
||||
repo: str
|
||||
mode: str
|
||||
path: str
|
||||
pkgver: str
|
||||
pkgrel: str
|
||||
sources_refreshed: bool
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
relative_path: str,
|
||||
arches: list[Arch] = [],
|
||||
depends: list[str] = [],
|
||||
provides: list[str] = [],
|
||||
replaces: list[str] = [],
|
||||
repo: Optional[str] = None,
|
||||
sources_refreshed: bool = False,
|
||||
) -> None:
|
||||
"""
|
||||
Create new Pkgbuild representation for file located at `{relative_path}/PKGBUILD`.
|
||||
`relative_path` will be stored in `self.path`.
|
||||
"""
|
||||
self.name = os.path.basename(relative_path)
|
||||
self.version = ''
|
||||
self.arches = list(arches)
|
||||
self.depends = list(depends)
|
||||
self.provides = list(provides)
|
||||
self.replaces = list(replaces)
|
||||
self.local_depends = []
|
||||
self.repo = repo or ''
|
||||
self.mode = ''
|
||||
self.path = relative_path
|
||||
self.depends = deepcopy(depends)
|
||||
self.provides = deepcopy(provides)
|
||||
self.replaces = deepcopy(replaces)
|
||||
self.pkgver = ''
|
||||
self.pkgrel = ''
|
||||
self.sources_refreshed = sources_refreshed
|
||||
|
||||
def __repr__(self):
|
||||
return f'Pkgbuild({self.name},{repr(self.path)},{self.version},{self.mode})'
|
||||
return ','.join([
|
||||
'Pkgbuild(' + self.name,
|
||||
repr(self.path),
|
||||
self.version + ("🔄" if self.sources_refreshed else ""),
|
||||
self.mode + ')',
|
||||
])
|
||||
|
||||
def names(self):
|
||||
return list(set([self.name] + self.provides + self.replaces))
|
||||
|
||||
def update_version(self):
|
||||
"""updates `self.version` from `self.pkgver` and `self.pkgrel`"""
|
||||
self.version = f'{self.pkgver}-{self.pkgrel}'
|
||||
|
||||
def update(self, pkg: Pkgbuild):
|
||||
self.version = pkg.version
|
||||
self.arches = list(pkg.arches)
|
||||
self.depends = list(pkg.depends)
|
||||
self.provides = list(pkg.provides)
|
||||
self.replaces = list(pkg.replaces)
|
||||
self.local_depends = list(pkg.local_depends)
|
||||
self.repo = pkg.repo
|
||||
self.mode = pkg.mode
|
||||
self.path = pkg.path
|
||||
self.pkgver = pkg.pkgver
|
||||
self.pkgrel = pkg.pkgrel
|
||||
self.sources_refreshed = self.sources_refreshed or pkg.sources_refreshed
|
||||
self.update_version()
|
||||
|
||||
def refresh_sources(self):
|
||||
raise NotImplementedError()
|
||||
|
||||
|
||||
class Pkgbase(Pkgbuild):
|
||||
subpackages: list[Pkgbuild]
|
||||
subpackages: list[SubPkgbuild]
|
||||
|
||||
def __init__(self, relative_path: str, subpackages: list[Pkgbuild] = [], **args):
|
||||
self.subpackages = deepcopy(subpackages)
|
||||
def __init__(self, relative_path: str, subpackages: list[SubPkgbuild] = [], **args):
|
||||
self.subpackages = list(subpackages)
|
||||
super().__init__(relative_path, **args)
|
||||
|
||||
def update(self, pkg: Pkgbuild):
|
||||
if not isinstance(pkg, Pkgbase):
|
||||
raise Exception(f"Tried to update pkgbase {self.name} with non-base pkg {pkg}")
|
||||
Pkgbuild.update(self, pkg)
|
||||
sub_dict = {p.name: p for p in self.subpackages}
|
||||
self.subpackages.clear()
|
||||
for new_pkg in pkg.subpackages:
|
||||
name = new_pkg.name
|
||||
if name not in sub_dict:
|
||||
sub_dict[name] = new_pkg
|
||||
else:
|
||||
sub_dict[name].update(new_pkg)
|
||||
updated = sub_dict[name]
|
||||
updated.sources_refreshed = self.sources_refreshed
|
||||
self.subpackages.append(updated)
|
||||
|
||||
def parse_pkgbuild(relative_pkg_dir: str, native_chroot: Chroot) -> list[Pkgbuild]:
|
||||
def refresh_sources(self, lazy: bool = True):
|
||||
'''
|
||||
Reloads the pkgbuild from disk.
|
||||
Does **NOT** actually perform the makepkg action to refresh the pkgver() first!
|
||||
'''
|
||||
if lazy and self.sources_refreshed:
|
||||
return
|
||||
parsed = parse_pkgbuild(self.path, sources_refreshed=True)
|
||||
basepkgs = [p for p in parsed if isinstance(p, Pkgbase)]
|
||||
if not len(basepkgs) == 1:
|
||||
raise Exception(f"error refreshing {self.name}: wrong number of base packages found: {basepkgs}")
|
||||
self.sources_refreshed = True
|
||||
self.update(basepkgs[0])
|
||||
|
||||
|
||||
class SubPkgbuild(Pkgbuild):
|
||||
pkgbase: Pkgbase
|
||||
|
||||
def __init__(self, name: str, pkgbase: Pkgbase):
|
||||
|
||||
self.name = name
|
||||
self.pkgbase = pkgbase
|
||||
|
||||
self.sources_refreshed = False
|
||||
self.update(pkgbase)
|
||||
|
||||
self.provides = []
|
||||
self.replaces = []
|
||||
|
||||
def refresh_sources(self, lazy: bool = True):
|
||||
assert self.pkgbase
|
||||
self.pkgbase.refresh_sources(lazy=lazy)
|
||||
|
||||
|
||||
def parse_pkgbuild(relative_pkg_dir: str, _config: Optional[ConfigStateHolder] = None, sources_refreshed: bool = False) -> list[Pkgbuild]:
|
||||
"""
|
||||
Since function may run in a different subprocess, we need to be passed the config via parameter
|
||||
"""
|
||||
global config
|
||||
if _config:
|
||||
config = _config
|
||||
setup_logging(verbose=config.runtime['verbose'], log_setup=False) # different thread needs log setup.
|
||||
logging.info(f"Parsing PKGBUILD for {relative_pkg_dir}")
|
||||
pkgbuilds_dir = config.get_path('pkgbuilds')
|
||||
pkgdir = os.path.join(pkgbuilds_dir, relative_pkg_dir)
|
||||
filename = os.path.join(pkgdir, 'PKGBUILD')
|
||||
logging.debug(f"Parsing {filename}")
|
||||
mode = None
|
||||
with open(os.path.join(native_chroot.get_path(CHROOT_PATHS['pkgbuilds']), relative_pkg_dir, 'PKGBUILD'), 'r') as file:
|
||||
with open(filename, 'r') as file:
|
||||
for line in file.read().split('\n'):
|
||||
if line.startswith('_mode='):
|
||||
mode = line.split('=')[1]
|
||||
@@ -58,18 +209,18 @@ def parse_pkgbuild(relative_pkg_dir: str, native_chroot: Chroot) -> list[Pkgbuil
|
||||
raise Exception((f'{relative_pkg_dir}/PKGBUILD has {"no" if mode is None else "an invalid"} mode configured') +
|
||||
(f': "{mode}"' if mode is not None else ''))
|
||||
|
||||
base_package = Pkgbase(relative_pkg_dir)
|
||||
base_package = Pkgbase(relative_pkg_dir, sources_refreshed=sources_refreshed)
|
||||
base_package.mode = mode
|
||||
base_package.repo = relative_pkg_dir.split('/')[0]
|
||||
srcinfo = native_chroot.run_cmd(
|
||||
srcinfo = run_cmd(
|
||||
MAKEPKG_CMD + ['--printsrcinfo'],
|
||||
cwd=os.path.join(CHROOT_PATHS['pkgbuilds'], base_package.path),
|
||||
cwd=pkgdir,
|
||||
stdout=subprocess.PIPE,
|
||||
)
|
||||
assert (isinstance(srcinfo, subprocess.CompletedProcess))
|
||||
lines = srcinfo.stdout.decode('utf-8').split('\n')
|
||||
|
||||
current = base_package
|
||||
current: Pkgbuild = base_package
|
||||
multi_pkgs = False
|
||||
for line_raw in lines:
|
||||
line = line_raw.strip()
|
||||
@@ -81,26 +232,118 @@ def parse_pkgbuild(relative_pkg_dir: str, native_chroot: Chroot) -> list[Pkgbuil
|
||||
multi_pkgs = True
|
||||
elif line.startswith('pkgname'):
|
||||
if multi_pkgs:
|
||||
if current is not base_package:
|
||||
base_package.subpackages.append(current)
|
||||
current = deepcopy(base_package)
|
||||
current.name = splits[1]
|
||||
current = SubPkgbuild(splits[1], base_package)
|
||||
assert isinstance(base_package.subpackages, list)
|
||||
base_package.subpackages.append(current)
|
||||
else:
|
||||
current.name = splits[1]
|
||||
elif line.startswith('pkgver'):
|
||||
current.pkgver = splits[1]
|
||||
elif line.startswith('pkgrel'):
|
||||
current.pkgrel = splits[1]
|
||||
elif line.startswith('arch'):
|
||||
current.arches.append(splits[1])
|
||||
elif line.startswith('provides'):
|
||||
current.provides.append(splits[1])
|
||||
elif line.startswith('replaces'):
|
||||
current.replaces.append(splits[1])
|
||||
elif line.startswith('depends') or line.startswith('makedepends') or line.startswith('checkdepends') or line.startswith('optdepends'):
|
||||
current.depends.append(splits[1].split('=')[0].split(': ')[0])
|
||||
current.depends = list(set(current.depends))
|
||||
|
||||
results = base_package.subpackages or [base_package]
|
||||
results: list[Pkgbuild] = list(base_package.subpackages)
|
||||
if len(results) > 1:
|
||||
logging.debug(f" Split package detected: {base_package.name}: {results}")
|
||||
base_package.update_version()
|
||||
else:
|
||||
results = [base_package]
|
||||
|
||||
for pkg in results:
|
||||
pkg.version = f'{pkg.pkgver}-{pkg.pkgrel}'
|
||||
if not (pkg.pkgver == base_package.pkgver and pkg.pkgrel == base_package.pkgrel):
|
||||
raise Exception('subpackage malformed! pkgver differs!')
|
||||
|
||||
assert isinstance(pkg, Pkgbuild)
|
||||
pkg.depends = list(set(pkg.depends)) # deduplicate dependencies
|
||||
pkg.update_version()
|
||||
if not (pkg.version == base_package.version):
|
||||
raise Exception(f'Subpackage malformed! Versions differ! base: {base_package}, subpackage: {pkg}')
|
||||
return results
|
||||
|
||||
|
||||
_pkgbuilds_cache = dict[str, Pkgbuild]()
|
||||
_pkgbuilds_paths = dict[str, list[Pkgbuild]]()
|
||||
_pkgbuilds_scanned: bool = False
|
||||
|
||||
|
||||
def get_pkgbuild_by_path(relative_path: str, lazy: bool = True, _config: Optional[ConfigStateHolder] = None) -> list[Pkgbuild]:
|
||||
global _pkgbuilds_cache, _pkgbuilds_paths
|
||||
if lazy and relative_path in _pkgbuilds_paths:
|
||||
return _pkgbuilds_paths[relative_path]
|
||||
parsed = parse_pkgbuild(relative_path, _config=_config)
|
||||
_pkgbuilds_paths[relative_path] = parsed
|
||||
for pkg in parsed:
|
||||
_pkgbuilds_cache[pkg.name] = pkg
|
||||
return parsed
|
||||
|
||||
|
||||
def discover_pkgbuilds(parallel: bool = True, lazy: bool = True) -> dict[str, Pkgbuild]:
|
||||
global _pkgbuilds_cache, _pkgbuilds_scanned
|
||||
if lazy and _pkgbuilds_scanned:
|
||||
logging.debug("Reusing cached pkgbuilds repo")
|
||||
return _pkgbuilds_cache.copy()
|
||||
pkgbuilds_dir = config.get_path('pkgbuilds')
|
||||
packages: dict[str, Pkgbuild] = {}
|
||||
paths = []
|
||||
init_pkgbuilds(interactive=False)
|
||||
for repo in REPOSITORIES:
|
||||
for dir in os.listdir(os.path.join(pkgbuilds_dir, repo)):
|
||||
paths.append(os.path.join(repo, dir))
|
||||
|
||||
logging.info("Parsing PKGBUILDs")
|
||||
|
||||
results = []
|
||||
if parallel:
|
||||
paths_filtered = paths
|
||||
if lazy:
|
||||
# filter out cached packages as the caches don't cross process boundaries
|
||||
paths_filtered = []
|
||||
for p in paths:
|
||||
if p in _pkgbuilds_paths:
|
||||
# use cache
|
||||
results += _pkgbuilds_paths[p]
|
||||
else:
|
||||
paths_filtered += [p]
|
||||
chunks = (Parallel(n_jobs=multiprocessing.cpu_count() * 4)(
|
||||
delayed(get_pkgbuild_by_path)(path, lazy=lazy, _config=config) for path in paths_filtered))
|
||||
else:
|
||||
chunks = (get_pkgbuild_by_path(path, lazy=lazy) for path in paths)
|
||||
|
||||
_pkgbuilds_paths.clear()
|
||||
# one list of packages per path
|
||||
for pkglist in chunks:
|
||||
_pkgbuilds_paths[pkglist[0].path] = pkglist
|
||||
results += pkglist
|
||||
|
||||
logging.debug('Building package dictionary!')
|
||||
for package in results:
|
||||
for name in [package.name] + package.replaces:
|
||||
if name in packages:
|
||||
logging.warning(f'Overriding {packages[package.name]} with {package}')
|
||||
packages[name] = package
|
||||
|
||||
# This filters the deps to only include the ones that are provided in this repo
|
||||
for package in packages.values():
|
||||
package.local_depends = package.depends.copy()
|
||||
for dep in package.depends.copy():
|
||||
found = dep in packages
|
||||
for pkg in packages.values():
|
||||
if found:
|
||||
break
|
||||
if dep in pkg.names():
|
||||
logging.debug(f'Found {pkg.name} that provides {dep}')
|
||||
found = True
|
||||
break
|
||||
if not found:
|
||||
logging.debug(f'Removing {dep} from dependencies')
|
||||
package.local_depends.remove(dep)
|
||||
|
||||
_pkgbuilds_cache.clear()
|
||||
_pkgbuilds_cache.update(packages)
|
||||
_pkgbuilds_scanned = True
|
||||
return packages
|
||||
|
||||
100
packages/test_device.py
Normal file
100
packages/test_device.py
Normal file
@@ -0,0 +1,100 @@
|
||||
import pytest
|
||||
|
||||
import os
|
||||
|
||||
from copy import copy
|
||||
|
||||
from config import ConfigStateHolder, config
|
||||
from .pkgbuild import init_pkgbuilds, discover_pkgbuilds, Pkgbuild, parse_pkgbuild
|
||||
from .device import Device, DEVICE_DEPRECATIONS, get_device, get_devices, parse_device_pkg, check_devicepkg_name
|
||||
|
||||
|
||||
@pytest.fixture(scope='session')
|
||||
def initialise_pkgbuilds_dir() -> ConfigStateHolder:
|
||||
config.try_load_file()
|
||||
init_pkgbuilds(interactive=False)
|
||||
return config
|
||||
|
||||
|
||||
@pytest.fixture()
|
||||
def pkgbuilds_dir(initialise_pkgbuilds_dir: ConfigStateHolder) -> str:
|
||||
global config
|
||||
config = initialise_pkgbuilds_dir
|
||||
return config.get_path('pkgbuilds')
|
||||
|
||||
|
||||
@pytest.fixture(scope='session')
|
||||
def pkgbuilds_repo_cached(initialise_pkgbuilds_dir) -> dict[str, Pkgbuild]:
|
||||
return discover_pkgbuilds()
|
||||
|
||||
|
||||
@pytest.fixture()
|
||||
def pkgbuilds_repo(pkgbuilds_dir, pkgbuilds_repo_cached):
|
||||
# use pkgbuilds_dir to ensure global config gets overriden, can't be done from session scope fixtures
|
||||
return pkgbuilds_repo_cached
|
||||
|
||||
|
||||
ONEPLUS_ENCHILADA = 'sdm845-oneplus-enchilada'
|
||||
ONEPLUS_ENCHILADA_PKG = f'device-{ONEPLUS_ENCHILADA}'
|
||||
|
||||
|
||||
@pytest.fixture(scope='session')
|
||||
def enchilada_pkgbuild(initialise_pkgbuilds_dir: ConfigStateHolder):
|
||||
config = initialise_pkgbuilds_dir
|
||||
config.try_load_file()
|
||||
return parse_pkgbuild(os.path.join('device', ONEPLUS_ENCHILADA_PKG), _config=config)[0]
|
||||
|
||||
|
||||
def validate_oneplus_enchilada(d: Device):
|
||||
assert d
|
||||
assert d.arch == 'aarch64'
|
||||
assert d.package and d.package.name == ONEPLUS_ENCHILADA_PKG
|
||||
|
||||
|
||||
def test_fixture_initialise_pkgbuilds_dir(initialise_pkgbuilds_dir: ConfigStateHolder):
|
||||
assert os.path.exists(os.path.join(config.get_path('pkgbuilds'), 'device'))
|
||||
|
||||
|
||||
def test_fixture_pkgbuilds_dir(pkgbuilds_dir):
|
||||
assert os.path.exists(os.path.join(pkgbuilds_dir, 'device'))
|
||||
|
||||
|
||||
def test_get_device():
|
||||
name = ONEPLUS_ENCHILADA
|
||||
d = get_device(name)
|
||||
validate_oneplus_enchilada(d)
|
||||
|
||||
|
||||
def test_get_device_deprecated():
|
||||
name = 'oneplus-enchilada'
|
||||
assert name in DEVICE_DEPRECATIONS
|
||||
d = get_device(name)
|
||||
# currently redirects to correct package, need to change this test when changed to an exception
|
||||
validate_oneplus_enchilada(d)
|
||||
|
||||
|
||||
def test_parse_device_pkg_enchilada(enchilada_pkgbuild):
|
||||
validate_oneplus_enchilada(parse_device_pkg(enchilada_pkgbuild))
|
||||
|
||||
|
||||
def test_parse_device_pkg_malformed_arch(enchilada_pkgbuild):
|
||||
enchilada_pkgbuild = copy(enchilada_pkgbuild)
|
||||
enchilada_pkgbuild.arches.append('x86_64')
|
||||
with pytest.raises(Exception):
|
||||
parse_device_pkg(enchilada_pkgbuild)
|
||||
|
||||
|
||||
def test_discover_packages_and_warm_cache_sorry_takes_long(pkgbuilds_repo):
|
||||
# mostly used to warm up the cache in a user-visible way
|
||||
assert pkgbuilds_repo
|
||||
assert ONEPLUS_ENCHILADA_PKG in pkgbuilds_repo
|
||||
|
||||
|
||||
def test_get_devices(pkgbuilds_repo: dict[str, Pkgbuild]):
|
||||
d = get_devices(pkgbuilds_repo)
|
||||
assert d
|
||||
assert ONEPLUS_ENCHILADA in d
|
||||
for p in d.values():
|
||||
check_devicepkg_name(p.package.name)
|
||||
assert 'sdm845-oneplus-common' not in d
|
||||
validate_oneplus_enchilada(d[ONEPLUS_ENCHILADA])
|
||||
4
pytest.sh
Executable file
4
pytest.sh
Executable file
@@ -0,0 +1,4 @@
|
||||
#!/bin/bash
|
||||
|
||||
sudo -v
|
||||
python -m pytest --junit-xml=pytest-report.xml -v "$@" ./*/test_*.py
|
||||
@@ -4,3 +4,4 @@ joblib==1.0.1
|
||||
toml
|
||||
typing_extensions
|
||||
coloredlogs
|
||||
munch
|
||||
|
||||
43
ssh.py
43
ssh.py
@@ -2,11 +2,12 @@ from typing import Optional
|
||||
import logging
|
||||
import os
|
||||
import pathlib
|
||||
import subprocess
|
||||
import click
|
||||
|
||||
from config import config
|
||||
from constants import SSH_COMMON_OPTIONS, SSH_DEFAULT_HOST, SSH_DEFAULT_PORT
|
||||
from exec.cmd import run_cmd
|
||||
from wrapper import check_programs_wrap
|
||||
|
||||
|
||||
@click.command(name='ssh')
|
||||
@@ -16,10 +17,15 @@ from constants import SSH_COMMON_OPTIONS, SSH_DEFAULT_HOST, SSH_DEFAULT_PORT
|
||||
@click.option('--port', '-p', help='the SSH port', type=int, default=SSH_DEFAULT_PORT)
|
||||
def cmd_ssh(cmd: list[str], user: str, host: str, port: int):
|
||||
"""Establish SSH connection to device"""
|
||||
run_ssh_command(list(cmd), user=user, host=host, port=port)
|
||||
run_ssh_command(list(cmd), user=user, host=host, port=port, alloc_tty=True)
|
||||
|
||||
|
||||
def run_ssh_command(cmd: list[str] = [], user: Optional[str] = None, host: str = SSH_DEFAULT_HOST, port: int = SSH_DEFAULT_PORT):
|
||||
def run_ssh_command(cmd: list[str] = [],
|
||||
user: Optional[str] = None,
|
||||
host: str = SSH_DEFAULT_HOST,
|
||||
port: int = SSH_DEFAULT_PORT,
|
||||
alloc_tty: bool = True):
|
||||
check_programs_wrap(['ssh'])
|
||||
if not user:
|
||||
user = config.get_profile()['username']
|
||||
keys = find_ssh_keys()
|
||||
@@ -28,32 +34,42 @@ def run_ssh_command(cmd: list[str] = [], user: Optional[str] = None, host: str =
|
||||
extra_args += ['-i', keys[0]]
|
||||
if config.runtime['verbose']:
|
||||
extra_args += ['-v']
|
||||
logging.info(f'Opening SSH connection to {host}')
|
||||
return subprocess.run([
|
||||
if alloc_tty:
|
||||
extra_args += ['-t']
|
||||
hoststr = f'{(user + "@") if user else ""}{host}'
|
||||
logging.info(f'Opening SSH connection to {hoststr} ({port})')
|
||||
logging.debug(f"ssh: trying to run {cmd} on {hoststr}")
|
||||
full_cmd = [
|
||||
'ssh',
|
||||
] + extra_args + SSH_COMMON_OPTIONS + [
|
||||
'-p',
|
||||
str(port),
|
||||
f'{user}@{host}',
|
||||
hoststr,
|
||||
'--',
|
||||
] + cmd)
|
||||
] + cmd
|
||||
logging.debug(f"running cmd: {full_cmd}")
|
||||
return run_cmd(full_cmd)
|
||||
|
||||
|
||||
def scp_put_files(src: list[str], dst: str, user: str = None, host: str = SSH_DEFAULT_HOST, port: int = SSH_DEFAULT_PORT):
|
||||
check_programs_wrap(['scp'])
|
||||
if not user:
|
||||
user = config.get_profile()['username']
|
||||
keys = find_ssh_keys()
|
||||
key_args = []
|
||||
if len(keys) > 0:
|
||||
key_args = ['-i', keys[0]]
|
||||
return subprocess.run([
|
||||
cmd = [
|
||||
'scp',
|
||||
] + key_args + SSH_COMMON_OPTIONS + [
|
||||
'-P',
|
||||
str(port),
|
||||
] + src + [
|
||||
f'{user}@{host}:{dst}',
|
||||
])
|
||||
]
|
||||
logging.info(f"Copying files to {user}@{host}:{dst}:\n{src}")
|
||||
logging.debug(f"running cmd: {cmd}")
|
||||
return run_cmd(cmd)
|
||||
|
||||
|
||||
def find_ssh_keys():
|
||||
@@ -68,6 +84,7 @@ def find_ssh_keys():
|
||||
|
||||
|
||||
def copy_ssh_keys(root_dir: str, user: str):
|
||||
check_programs_wrap(['ssh-keygen'])
|
||||
authorized_keys_file = os.path.join(
|
||||
root_dir,
|
||||
'home',
|
||||
@@ -84,7 +101,7 @@ def copy_ssh_keys(root_dir: str, user: str):
|
||||
create = click.confirm("Do you want me to generate an ssh key for you?", True)
|
||||
if not create:
|
||||
return
|
||||
result = subprocess.run([
|
||||
result = run_cmd([
|
||||
'ssh-keygen',
|
||||
'-f',
|
||||
os.path.join(pathlib.Path.home(), '.ssh', 'id_ed25519_kupfer'),
|
||||
@@ -95,18 +112,18 @@ def copy_ssh_keys(root_dir: str, user: str):
|
||||
'-N',
|
||||
'',
|
||||
])
|
||||
if result.returncode != 0:
|
||||
if result.returncode != 0: # type: ignore
|
||||
logging.fatal("Failed to generate ssh key")
|
||||
keys = find_ssh_keys()
|
||||
|
||||
ssh_dir = os.path.join(root_dir, 'home', user, '.ssh')
|
||||
if not os.path.exists(ssh_dir):
|
||||
os.makedirs(ssh_dir, exist_ok=True)
|
||||
os.makedirs(ssh_dir, exist_ok=True, mode=0o700)
|
||||
|
||||
with open(authorized_keys_file, 'a') as authorized_keys:
|
||||
for key in keys:
|
||||
pub = f'{key}.pub'
|
||||
if not os.path.exists('pub'):
|
||||
if not os.path.exists(pub):
|
||||
logging.debug(f'Skipping key {key}: {pub} not found')
|
||||
continue
|
||||
with open(pub, 'r') as file:
|
||||
|
||||
@@ -1,2 +1,2 @@
|
||||
#!/bin/bash
|
||||
git ls-files \*.py | xargs mypy --pretty --install-types --ignore-missing-imports "$@"
|
||||
git ls-files \*.py | sort -u | xargs mypy --pretty --show-error-codes --install-types --ignore-missing-imports "$@"
|
||||
|
||||
50
utils.py
50
utils.py
@@ -1,21 +1,31 @@
|
||||
import atexit
|
||||
import grp
|
||||
import logging
|
||||
import pwd
|
||||
import subprocess
|
||||
from shutil import which
|
||||
from typing import Optional, Union, Sequence
|
||||
|
||||
from exec.cmd import run_cmd, run_root_cmd
|
||||
|
||||
def programs_available(programs: Union[str, Sequence[str]]) -> bool:
|
||||
_programs_available = dict[str, bool]()
|
||||
|
||||
|
||||
def programs_available(programs: Union[str, Sequence[str]], lazy: bool = True) -> bool:
|
||||
global _programs_available
|
||||
if type(programs) is str:
|
||||
programs = [programs]
|
||||
for program in programs:
|
||||
if not which(program):
|
||||
if program not in _programs_available or not lazy:
|
||||
avail = bool(which(program))
|
||||
_programs_available[program] = avail
|
||||
if not _programs_available[program]:
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
def umount(dest: str, lazy=False):
|
||||
return subprocess.run(
|
||||
return run_root_cmd(
|
||||
[
|
||||
'umount',
|
||||
'-c' + ('l' if lazy else ''),
|
||||
@@ -33,7 +43,7 @@ def mount(src: str, dest: str, options: list[str] = ['bind'], fs_type: Optional[
|
||||
if fs_type:
|
||||
opts += ['-t', fs_type]
|
||||
|
||||
result = subprocess.run(
|
||||
result = run_root_cmd(
|
||||
['mount'] + opts + [
|
||||
src,
|
||||
dest,
|
||||
@@ -46,7 +56,7 @@ def mount(src: str, dest: str, options: list[str] = ['bind'], fs_type: Optional[
|
||||
|
||||
|
||||
def check_findmnt(path: str):
|
||||
result = subprocess.run(
|
||||
result = run_root_cmd(
|
||||
[
|
||||
'findmnt',
|
||||
'-n',
|
||||
@@ -59,8 +69,10 @@ def check_findmnt(path: str):
|
||||
return result.stdout.decode().strip()
|
||||
|
||||
|
||||
def git(cmd: list[str], dir='.', capture_output=False) -> subprocess.CompletedProcess:
|
||||
return subprocess.run(['git'] + cmd, cwd=dir, capture_output=capture_output)
|
||||
def git(cmd: list[str], dir='.', capture_output=False, user: Optional[str] = None) -> subprocess.CompletedProcess:
|
||||
result = run_cmd(['git'] + cmd, cwd=dir, capture_output=capture_output, switch_user=user)
|
||||
assert isinstance(result, subprocess.CompletedProcess)
|
||||
return result
|
||||
|
||||
|
||||
def log_or_exception(raise_exception: bool, msg: str, exc_class=Exception, log_level=logging.WARNING):
|
||||
@@ -68,3 +80,27 @@ def log_or_exception(raise_exception: bool, msg: str, exc_class=Exception, log_l
|
||||
raise exc_class(msg)
|
||||
else:
|
||||
logging.log(log_level, msg)
|
||||
|
||||
|
||||
def get_user_name(uid: Union[str, int]) -> str:
|
||||
if isinstance(uid, int) or uid.isnumeric():
|
||||
return pwd.getpwuid(int(uid)).pw_name
|
||||
return uid
|
||||
|
||||
|
||||
def get_group_name(gid: Union[str, int]) -> str:
|
||||
if isinstance(gid, int) or gid.isnumeric():
|
||||
return grp.getgrgid(int(gid)).gr_name
|
||||
return gid
|
||||
|
||||
|
||||
def get_uid(user: Union[int, str]) -> int:
|
||||
if isinstance(user, int) or user.isnumeric():
|
||||
return int(user)
|
||||
return pwd.getpwnam(user).pw_uid
|
||||
|
||||
|
||||
def get_gid(group: Union[int, str]) -> int:
|
||||
if isinstance(group, int) or group.isnumeric():
|
||||
return int(group)
|
||||
return grp.getgrnam(group).gr_gid
|
||||
|
||||
@@ -1,7 +1,10 @@
|
||||
import click
|
||||
import logging
|
||||
|
||||
from typing import Sequence, Union
|
||||
|
||||
from config import config
|
||||
from constants import Arch
|
||||
from utils import programs_available
|
||||
from .docker import DockerWrapper
|
||||
from .wrapper import Wrapper
|
||||
@@ -36,15 +39,22 @@ def enforce_wrap(no_wrapper=False):
|
||||
wrap()
|
||||
|
||||
|
||||
def check_programs_wrap(programs):
|
||||
def check_programs_wrap(programs: Union[str, Sequence[str]]):
|
||||
if not programs_available(programs):
|
||||
logging.debug(f"Wrapping because one of {[programs] if isinstance(programs, str) else programs} isn't available.")
|
||||
enforce_wrap()
|
||||
|
||||
|
||||
def wrap_if_foreign_arch(arch: Arch):
|
||||
if arch != config.runtime.arch:
|
||||
enforce_wrap()
|
||||
|
||||
|
||||
nowrapper_option = click.option(
|
||||
'--no-wrapper',
|
||||
'no_wrapper',
|
||||
'-w/-W',
|
||||
'--force-wrapper/--no-wrapper',
|
||||
'wrapper_override',
|
||||
is_flag=True,
|
||||
default=False,
|
||||
help='Disable the docker wrapper. Defaults to autodetection.',
|
||||
default=None,
|
||||
help='Force or disable the docker wrapper. Defaults to autodetection.',
|
||||
)
|
||||
|
||||
@@ -6,6 +6,8 @@ import sys
|
||||
|
||||
from config import config
|
||||
from constants import CHROOT_PATHS
|
||||
from exec.file import makedir
|
||||
|
||||
from .wrapper import BaseWrapper
|
||||
|
||||
DOCKER_PATHS = CHROOT_PATHS.copy()
|
||||
@@ -25,66 +27,70 @@ class DockerWrapper(BaseWrapper):
|
||||
script_path = config.runtime['script_source_dir']
|
||||
with open(os.path.join(script_path, 'version.txt')) as version_file:
|
||||
version = version_file.read().replace('\n', '')
|
||||
tag = f'registry.gitlab.com/kupfer/kupferbootstrap:{version}'
|
||||
if version == 'dev':
|
||||
logging.info(f'Building docker image "{tag}"')
|
||||
cmd = [
|
||||
'docker',
|
||||
'build',
|
||||
'.',
|
||||
'-t',
|
||||
tag,
|
||||
] + (['-q'] if not config.runtime['verbose'] else [])
|
||||
logging.debug('Running docker cmd: ' + ' '.join(cmd))
|
||||
result = subprocess.run(cmd, cwd=script_path, capture_output=True)
|
||||
if result.returncode != 0:
|
||||
logging.fatal('Failed to build docker image:\n' + result.stderr.decode())
|
||||
exit(1)
|
||||
else:
|
||||
# Check if the image for the version already exists
|
||||
result = subprocess.run(
|
||||
[
|
||||
'docker',
|
||||
'images',
|
||||
'-q',
|
||||
tag,
|
||||
],
|
||||
capture_output=True,
|
||||
)
|
||||
if result.stdout == b'':
|
||||
logging.info(f'Pulling kupferbootstrap docker image version \'{version}\'')
|
||||
subprocess.run([
|
||||
'docker',
|
||||
'pull',
|
||||
tag,
|
||||
])
|
||||
container_name = f'kupferbootstrap-{self.uuid}'
|
||||
|
||||
wrapped_config = self.generate_wrapper_config()
|
||||
|
||||
ssh_dir = os.path.join(pathlib.Path.home(), '.ssh')
|
||||
if not os.path.exists(ssh_dir):
|
||||
os.makedirs(ssh_dir)
|
||||
volumes = self.get_bind_mounts_default(wrapped_config)
|
||||
volumes |= dict({config.get_path(vol_name): vol_dest for vol_name, vol_dest in DOCKER_PATHS.items()})
|
||||
docker_cmd = [
|
||||
tag = f'registry.gitlab.com/kupfer/kupferbootstrap:{version}'
|
||||
if version == 'dev':
|
||||
logging.info(f'Building docker image "{tag}"')
|
||||
cmd = [
|
||||
'docker',
|
||||
'run',
|
||||
'--name',
|
||||
container_name,
|
||||
'--rm',
|
||||
'--interactive',
|
||||
'--tty',
|
||||
'--privileged',
|
||||
] + docker_volumes_args(volumes) + [tag]
|
||||
'build',
|
||||
'.',
|
||||
'-t',
|
||||
tag,
|
||||
] + (['-q'] if not config.runtime['verbose'] else [])
|
||||
logging.debug('Running docker cmd: ' + ' '.join(cmd))
|
||||
result = subprocess.run(cmd, cwd=script_path, capture_output=True)
|
||||
if result.returncode != 0:
|
||||
logging.fatal('Failed to build docker image:\n' + result.stderr.decode())
|
||||
exit(1)
|
||||
else:
|
||||
# Check if the image for the version already exists
|
||||
result = subprocess.run(
|
||||
[
|
||||
'docker',
|
||||
'images',
|
||||
'-q',
|
||||
tag,
|
||||
],
|
||||
capture_output=True,
|
||||
)
|
||||
if result.stdout == b'':
|
||||
logging.info(f'Pulling kupferbootstrap docker image version \'{version}\'')
|
||||
subprocess.run([
|
||||
'docker',
|
||||
'pull',
|
||||
tag,
|
||||
])
|
||||
container_name = f'kupferbootstrap-{self.uuid}'
|
||||
|
||||
kupfer_cmd = ['kupferbootstrap', '--config', '/root/.config/kupfer/kupferbootstrap.toml'] + self.filter_args_wrapper(sys.argv[1:])
|
||||
wrapped_config = self.generate_wrapper_config()
|
||||
|
||||
cmd = docker_cmd + kupfer_cmd
|
||||
logging.debug('Wrapping in docker:' + repr(cmd))
|
||||
result = subprocess.run(cmd)
|
||||
ssh_dir = os.path.join(pathlib.Path.home(), '.ssh')
|
||||
if not os.path.exists(ssh_dir):
|
||||
os.makedirs(ssh_dir, mode=0o700)
|
||||
|
||||
exit(result.returncode)
|
||||
volumes = self.get_bind_mounts_default(wrapped_config)
|
||||
for vol_name, vol_dest in DOCKER_PATHS.items():
|
||||
vol_src = config.get_path(vol_name)
|
||||
makedir(vol_src)
|
||||
volumes[vol_src] = vol_dest
|
||||
docker_cmd = [
|
||||
'docker',
|
||||
'run',
|
||||
'--name',
|
||||
container_name,
|
||||
'--rm',
|
||||
'--interactive',
|
||||
'--tty',
|
||||
'--privileged',
|
||||
] + docker_volumes_args(volumes) + [tag]
|
||||
|
||||
kupfer_cmd = ['kupferbootstrap', '--config', '/root/.config/kupfer/kupferbootstrap.toml'] + self.filter_args_wrapper(sys.argv[1:])
|
||||
|
||||
cmd = docker_cmd + kupfer_cmd
|
||||
logging.debug('Wrapping in docker:' + repr(cmd))
|
||||
result = subprocess.run(cmd)
|
||||
|
||||
exit(result.returncode)
|
||||
|
||||
def stop(self):
|
||||
subprocess.run(
|
||||
|
||||
@@ -5,7 +5,8 @@ import pathlib
|
||||
|
||||
from typing import Protocol
|
||||
|
||||
from config import config, dump_file as dump_config_file
|
||||
from config import config
|
||||
from config.state import dump_file as dump_config_file
|
||||
from constants import CHROOT_PATHS
|
||||
|
||||
|
||||
|
||||
Reference in New Issue
Block a user