Compare commits
9 Commits
dev
...
prawn/armv
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
645b3b1d2b | ||
|
|
efd1de9b91 | ||
|
|
4c77a16bba | ||
|
|
57d5ed474f | ||
|
|
114755888e | ||
|
|
b154f835e6 | ||
|
|
bc31f9822a | ||
|
|
08fc10bf11 | ||
|
|
6e8fd9f622 |
3
.gitignore
vendored
3
.gitignore
vendored
@@ -1,5 +1,2 @@
|
||||
*.kate-swp
|
||||
venv/
|
||||
__pycache__/
|
||||
.coverage*
|
||||
*.xml
|
||||
|
||||
@@ -7,7 +7,7 @@ format:
|
||||
stage: check
|
||||
image: python
|
||||
before_script:
|
||||
- pip install yapf autoflake --break-system-packages
|
||||
- pip install yapf autoflake
|
||||
script:
|
||||
- ./format.sh --check
|
||||
|
||||
@@ -15,7 +15,7 @@ typecheck:
|
||||
stage: check
|
||||
image: python
|
||||
before_script:
|
||||
- pip install mypy --break-system-packages
|
||||
- pip install mypy
|
||||
script:
|
||||
- ./typecheck.sh --non-interactive --junit-xml mypy-report.xml
|
||||
artifacts:
|
||||
@@ -26,13 +26,13 @@ pytest:
|
||||
stage: check
|
||||
image: archlinux
|
||||
before_script:
|
||||
- pacman -Sy --noconfirm --needed archlinux-keyring && pacman -Su --noconfirm python python-pip sudo git base-devel arch-install-scripts rsync
|
||||
- pip install -r test_requirements.txt -r requirements.txt --break-system-packages
|
||||
- pacman -Sy --noconfirm --needed archlinux-keyring && pacman -Su --noconfirm python python-pip sudo git base-devel
|
||||
- pip install pytest pytest-cov -r requirements.txt
|
||||
- 'echo "kupfer ALL = (ALL) NOPASSWD: ALL" > /etc/sudoers.d/kupfer_all'
|
||||
- useradd -m kupfer
|
||||
- chmod 777 .
|
||||
script:
|
||||
- script -e -c 'su kupfer -s /bin/bash -c "INTEGRATION_TESTS_USE_GLOBAL_CONFIG=TRUE KUPFERBOOTSTRAP_WRAPPED=DOCKER ./pytest.sh --junit-xml=pytest-report.xml --cov-report=xml:coverage.xml integration_tests.py"'
|
||||
- script -e -c 'su kupfer -s /bin/bash -c "./pytest.sh --cov=. --cov-branch --cov-report=term --cov-report=xml:coverage.xml"'
|
||||
coverage: '/(?i)total.*? (100(?:\.0+)?\%|[1-9]?\d(?:\.\d+)?\%)$/'
|
||||
artifacts:
|
||||
reports:
|
||||
@@ -40,97 +40,39 @@ pytest:
|
||||
coverage_report:
|
||||
coverage_format: cobertura
|
||||
path: coverage.xml
|
||||
|
||||
build_docker:
|
||||
stage: build
|
||||
image: docker:latest
|
||||
services:
|
||||
- name: docker:dind
|
||||
command: ["--mtu=1100"] # very low, safe value -.-
|
||||
variables:
|
||||
DOCKER_TLS_CERTDIR: ""
|
||||
script:
|
||||
- 'docker build --pull -t "${CI_REGISTRY_IMAGE}:${CI_COMMIT_SHA}" -t "${CI_REGISTRY_IMAGE}:${CI_COMMIT_REF_SLUG}" .'
|
||||
only:
|
||||
- branches
|
||||
except:
|
||||
- main
|
||||
- dev
|
||||
|
||||
push_docker:
|
||||
extends: build_docker
|
||||
services: ['docker:dind']
|
||||
before_script:
|
||||
- echo "$CI_REGISTRY_PASSWORD" | docker login -u "$CI_REGISTRY_USER" --password-stdin "$CI_REGISTRY"
|
||||
script:
|
||||
- !reference [build_docker, script]
|
||||
- docker build --pull -t "${CI_REGISTRY_IMAGE}:${CI_COMMIT_SHA}" -t "${CI_REGISTRY_IMAGE}:${CI_COMMIT_REF_SLUG}" .
|
||||
- if [[ "$CI_COMMIT_REF_NAME" == "main" ]]; then docker image tag "${CI_REGISTRY_IMAGE}:${CI_COMMIT_SHA}" "${CI_REGISTRY_IMAGE}:latest"; fi
|
||||
- docker push -a "${CI_REGISTRY_IMAGE}"
|
||||
only:
|
||||
- main
|
||||
- dev
|
||||
except:
|
||||
|
||||
.docs:
|
||||
image: "registry.gitlab.com/kupfer/kupferbootstrap:dev"
|
||||
variables:
|
||||
DOCS_SPHINXARGS: '-W'
|
||||
DOCS_MAKE_TARGET: "html"
|
||||
DOCS_MAKE_THREADS: 6
|
||||
before_script: &docs_before_script
|
||||
- pip install -r requirements.txt -r docs/requirements.txt --break-system-packages
|
||||
script: &docs_script
|
||||
- make -C docs -j$DOCS_MAKE_THREADS SPHINXARGS="$DOCS_SPHINXARGS" $DOCS_MAKE_TARGET
|
||||
- mv "docs/$DOCS_MAKE_TARGET" public
|
||||
- if [[ -e docs/archived ]]; then cp -r docs/archived public/ ; fi
|
||||
- rm -vf docs/archived/{main,dev,"$CI_COMMIT_REF_NAME"}.tar.gz # we want to cache only old tags as they won't change
|
||||
after_script:
|
||||
image: "${CI_REGISTRY_IMAGE}:dev"
|
||||
before_script:
|
||||
- pacman -Sy --noconfirm python-sphinx-{click,furo}
|
||||
script:
|
||||
- (cd docs && make)
|
||||
- mv docs/html public
|
||||
artifacts:
|
||||
paths:
|
||||
- public
|
||||
cache:
|
||||
key: docs
|
||||
paths:
|
||||
- docs/archived/*.tar.gz
|
||||
|
||||
build_docs:
|
||||
stage: build
|
||||
extends: .docs
|
||||
except:
|
||||
refs:
|
||||
- main
|
||||
- dev
|
||||
- docs
|
||||
variables:
|
||||
- '$CI_COMMIT_MESSAGE =~ /ci-kbs-docs-build-full/'
|
||||
- '$KBS_DOCS_FULL_BUILD == "1"'
|
||||
|
||||
build_docs_all:
|
||||
stage: build
|
||||
extends: pages
|
||||
resource_group: $CI_COMMIT_SHA
|
||||
script:
|
||||
- (cd docs && make SPHINXARGS="$DOCS_SPHINXARGS -D 'version=$CI_COMMIT_REF_NAME'" && mkdir -p versions && cp -r html versions/$CI_COMMIT_REF_SLUG)
|
||||
- *docs_script
|
||||
only:
|
||||
refs:
|
||||
- branches
|
||||
variables:
|
||||
- '$CI_COMMIT_MESSAGE =~ /ci-kbs-docs-build-full/'
|
||||
- '$KBS_DOCS_FULL_BUILD == "1"'
|
||||
- '$CI_COMMIT_REF_NAME == "docs"'
|
||||
except:
|
||||
- main
|
||||
- dev
|
||||
|
||||
pages:
|
||||
stage: deploy
|
||||
extends: .docs
|
||||
only:
|
||||
- main
|
||||
- dev
|
||||
variables:
|
||||
DOCS_MAKE_TARGET: versions
|
||||
resource_group: docs
|
||||
before_script:
|
||||
- git remote update
|
||||
- *docs_before_script
|
||||
|
||||
17
Dockerfile
17
Dockerfile
@@ -2,15 +2,21 @@ FROM archlinux:base-devel
|
||||
|
||||
RUN pacman-key --init && \
|
||||
pacman -Sy --noconfirm archlinux-keyring && \
|
||||
pacman -Su --noconfirm --needed \
|
||||
pacman -Su --noconfirm \
|
||||
python python-pip \
|
||||
arch-install-scripts rsync \
|
||||
aarch64-linux-gnu-gcc aarch64-linux-gnu-binutils aarch64-linux-gnu-glibc aarch64-linux-gnu-linux-api-headers \
|
||||
git sudo \
|
||||
git \
|
||||
android-tools openssh inetutils \
|
||||
parted
|
||||
|
||||
RUN sed -i "s/EUID == 0/EUID == -1/g" "$(which makepkg)"
|
||||
RUN sed -i "s/EUID == 0/EUID == -1/g" $(which makepkg)
|
||||
|
||||
RUN cd /tmp && \
|
||||
git clone https://aur.archlinux.org/aarch64-linux-gnu-pkg-config.git && \
|
||||
cd aarch64-linux-gnu-pkg-config && \
|
||||
makepkg -s --skippgpcheck && \
|
||||
pacman -U --noconfirm *.pkg*
|
||||
|
||||
RUN yes | pacman -Scc
|
||||
|
||||
@@ -21,13 +27,10 @@ ENV PATH=/app/bin:/app/local/bin:$PATH
|
||||
WORKDIR /app
|
||||
|
||||
COPY requirements.txt .
|
||||
# TODO: pip packaging so we don't need --break-system-packages
|
||||
RUN pip install -r requirements.txt --break-system-packages
|
||||
RUN pip install -r requirements.txt
|
||||
|
||||
COPY . .
|
||||
|
||||
RUN python -c "from distro import distro; distro.get_kupfer_local(arch=None,in_chroot=False).repos_config_snippet()" | tee -a /etc/pacman.conf
|
||||
RUN useradd -m -g users kupfer
|
||||
RUN echo "kupfer ALL=(ALL) NOPASSWD: ALL" | tee /etc/sudoers.d/kupfer
|
||||
|
||||
WORKDIR /
|
||||
|
||||
23
README.md
23
README.md
@@ -1,29 +1,14 @@
|
||||
# kupferbootstrap
|
||||
|
||||
Kupfer Linux bootstrapping tool - drives pacstrap, makepkg, chroot, mkfs and fastboot, just to name a few.
|
||||
|
||||
|
||||
## Documentation
|
||||
|
||||
Detailed docs for the main branch are available online at https://kupfer.gitlab.io/kupferbootstrap/
|
||||
|
||||
You can also build and view the docs locally:
|
||||
```sh
|
||||
cd docs/ && \
|
||||
make && \
|
||||
make serve
|
||||
```
|
||||
|
||||
This will run a webserver on localhost:9999. Access it like `firefox http://localhost:9999/`
|
||||
|
||||
Kupfer Linux bootstrapping tool - drives pacstrap, makepkg, mkfs and fastboot, just to name a few.
|
||||
|
||||
## Installation
|
||||
Install Docker, Python 3 with the libraries from `requirements.txt` and put `bin/` into your `PATH`.
|
||||
Then use `kupferbootstrap`.
|
||||
|
||||
|
||||
## Quickstart
|
||||
1. Initialize config with defaults, configure your device and flavour: `kupferbootstrap config init`
|
||||
## Usage
|
||||
1. Initialize config with defaults: `kupferbootstrap config init -N`
|
||||
1. Configure your device profile: `kupferbootstrap config profile init`
|
||||
1. Build an image and packages along the way: `kupferbootstrap image build`
|
||||
|
||||
|
||||
|
||||
85
binfmt.py
Normal file
85
binfmt.py
Normal file
@@ -0,0 +1,85 @@
|
||||
# modifed from pmbootstrap's binfmt.py, Copyright 2018 Oliver Smith, GPL-licensed
|
||||
|
||||
import os
|
||||
import logging
|
||||
|
||||
from constants import Arch, QEMU_ARCHES
|
||||
from exec.cmd import run_root_cmd
|
||||
from utils import mount
|
||||
|
||||
|
||||
def binfmt_info():
|
||||
# Parse the info file
|
||||
full = {}
|
||||
info = "/usr/lib/binfmt.d/qemu-static.conf"
|
||||
logging.debug("parsing: " + info)
|
||||
with open(info, "r") as handle:
|
||||
for line in handle:
|
||||
if line.startswith('#') or ":" not in line:
|
||||
continue
|
||||
splitted = line.split(":")
|
||||
result = {
|
||||
# _ = splitted[0] # empty
|
||||
'name': splitted[1],
|
||||
'type': splitted[2],
|
||||
'offset': splitted[3],
|
||||
'magic': splitted[4],
|
||||
'mask': splitted[5],
|
||||
'interpreter': splitted[6],
|
||||
'flags': splitted[7],
|
||||
'line': line,
|
||||
}
|
||||
if not result['name'].startswith('qemu-'):
|
||||
logging.fatal(f'Unknown binfmt handler "{result["name"]}"')
|
||||
logging.debug(f'binfmt line: {line}')
|
||||
continue
|
||||
arch = ''.join(result['name'].split('-')[1:])
|
||||
full[arch] = result
|
||||
|
||||
return full
|
||||
|
||||
|
||||
def is_registered(arch: Arch) -> bool:
|
||||
qemu_arch = QEMU_ARCHES[arch]
|
||||
return os.path.exists("/proc/sys/fs/binfmt_misc/qemu-" + qemu_arch)
|
||||
|
||||
|
||||
def register(arch: Arch):
|
||||
if arch not in QEMU_ARCHES:
|
||||
raise Exception(f'binfmt.register(): unknown arch {arch} (not in QEMU_ARCHES)')
|
||||
qemu_arch = QEMU_ARCHES[arch]
|
||||
if is_registered(arch):
|
||||
return
|
||||
|
||||
lines = binfmt_info()
|
||||
|
||||
# Build registration string
|
||||
# https://en.wikipedia.org/wiki/Binfmt_misc
|
||||
# :name:type:offset:magic:mask:interpreter:flags
|
||||
info = lines[qemu_arch]
|
||||
code = info['line']
|
||||
binfmt = '/proc/sys/fs/binfmt_misc'
|
||||
register = binfmt + '/register'
|
||||
if not os.path.exists(register):
|
||||
logging.info('mounting binfmt_misc')
|
||||
result = mount('binfmt_misc', binfmt, options=[], fs_type='binfmt_misc')
|
||||
if result.returncode != 0:
|
||||
raise Exception(f'Failed mounting binfmt_misc to {binfmt}')
|
||||
|
||||
# Register in binfmt_misc
|
||||
logging.info(f"Registering qemu binfmt ({arch})")
|
||||
run_root_cmd(["sh", "-c", f'echo "{code}" > {register} 2>/dev/null'])
|
||||
if not is_registered(arch):
|
||||
logging.debug(f'binfmt line: {code}')
|
||||
raise Exception(f'Failed to register qemu-user for {arch} with binfmt_misc, {binfmt}/{info["name"]} not found')
|
||||
|
||||
|
||||
def unregister(arch):
|
||||
if arch not in QEMU_ARCHES:
|
||||
raise Exception(f'binfmt.unregister(): unknown arch {arch} (not in QEMU_ARCHES)')
|
||||
qemu_arch = QEMU_ARCHES[arch]
|
||||
binfmt_file = "/proc/sys/fs/binfmt_misc/qemu-" + qemu_arch
|
||||
if not os.path.exists(binfmt_file):
|
||||
return
|
||||
logging.info(f"Unregistering qemu binfmt ({arch})")
|
||||
run_root_cmd(["sh", "-c", f"echo -1 > {binfmt_file}"])
|
||||
125
binfmt/binfmt.py
125
binfmt/binfmt.py
@@ -1,125 +0,0 @@
|
||||
# modifed from pmbootstrap's binfmt.py, Copyright 2018 Oliver Smith, GPL-licensed
|
||||
|
||||
import os
|
||||
import logging
|
||||
|
||||
from typing import Optional
|
||||
|
||||
from chroot.abstract import Chroot
|
||||
from constants import Arch, QEMU_ARCHES
|
||||
from exec.cmd import run_root_cmd, CompletedProcess
|
||||
from utils import mount
|
||||
|
||||
|
||||
def binfmt_info(chroot: Optional[Chroot] = None):
|
||||
# Parse the info file
|
||||
full = {}
|
||||
info = "/usr/lib/binfmt.d/qemu-static.conf"
|
||||
if chroot:
|
||||
info = chroot.get_path(info)
|
||||
logging.debug("parsing: " + info)
|
||||
with open(info, "r") as handle:
|
||||
for line in handle:
|
||||
if line.startswith('#') or ":" not in line:
|
||||
continue
|
||||
splitted = line.split(":")
|
||||
result = {
|
||||
# _ = splitted[0] # empty
|
||||
'name': splitted[1],
|
||||
'type': splitted[2],
|
||||
'offset': splitted[3],
|
||||
'magic': splitted[4],
|
||||
'mask': splitted[5],
|
||||
'interpreter': splitted[6],
|
||||
'flags': splitted[7],
|
||||
'line': line,
|
||||
}
|
||||
if not result['name'].startswith('qemu-'):
|
||||
logging.fatal(f'Unknown binfmt handler "{result["name"]}"')
|
||||
logging.debug(f'binfmt line: {line}')
|
||||
continue
|
||||
arch = ''.join(result['name'].split('-')[1:])
|
||||
full[arch] = result
|
||||
|
||||
return full
|
||||
|
||||
|
||||
def is_arch_known(arch: Arch, raise_exception: bool = False, action: Optional[str] = None) -> bool:
|
||||
if arch not in QEMU_ARCHES:
|
||||
if raise_exception:
|
||||
raise Exception(f'binfmt{f".{action}()" if action else ""}: unknown arch {arch} (not in QEMU_ARCHES)')
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
def binfmt_is_registered(arch: Arch, chroot: Optional[Chroot] = None) -> bool:
|
||||
is_arch_known(arch, True, 'is_registered')
|
||||
qemu_arch = QEMU_ARCHES[arch]
|
||||
path = "/proc/sys/fs/binfmt_misc/qemu-" + qemu_arch
|
||||
binfmt_ensure_mounted(chroot)
|
||||
if chroot:
|
||||
path = chroot.get_path(path)
|
||||
return os.path.exists(path)
|
||||
|
||||
|
||||
def binfmt_ensure_mounted(chroot: Optional[Chroot] = None):
|
||||
binfmt_path = '/proc/sys/fs/binfmt_misc'
|
||||
register_path = binfmt_path + '/register'
|
||||
if chroot:
|
||||
register_path = chroot.get_path(register_path)
|
||||
if not os.path.exists(register_path):
|
||||
logging.info('mounting binfmt_misc')
|
||||
result = (chroot.mount if chroot else mount)('binfmt_misc', binfmt_path, options=[], fs_type='binfmt_misc') # type: ignore[operator]
|
||||
if (isinstance(result, CompletedProcess) and result.returncode != 0) or not result:
|
||||
raise Exception(f'Failed mounting binfmt_misc to {binfmt_path}')
|
||||
|
||||
|
||||
def binfmt_register(arch: Arch, chroot: Optional[Chroot] = None):
|
||||
binfmt_path = '/proc/sys/fs/binfmt_misc'
|
||||
register_path = binfmt_path + '/register'
|
||||
is_arch_known(arch, True, 'register')
|
||||
qemu_arch = QEMU_ARCHES[arch]
|
||||
if binfmt_is_registered(arch, chroot=chroot):
|
||||
return
|
||||
|
||||
lines = binfmt_info(chroot=chroot)
|
||||
|
||||
_runcmd = run_root_cmd
|
||||
if chroot:
|
||||
_runcmd = chroot.run_cmd
|
||||
chroot.activate()
|
||||
|
||||
binfmt_ensure_mounted(chroot)
|
||||
|
||||
# Build registration string
|
||||
# https://en.wikipedia.org/wiki/Binfmt_misc
|
||||
# :name:type:offset:magic:mask:interpreter:flags
|
||||
info = lines[qemu_arch]
|
||||
code = info['line']
|
||||
|
||||
if arch == os.uname().machine:
|
||||
logging.fatal("Attempted to register qemu binfmt for host architecture, skipping!")
|
||||
return
|
||||
|
||||
# Register in binfmt_misc
|
||||
logging.info(f"Registering qemu binfmt ({arch})")
|
||||
_runcmd(f'echo "{code}" > "{register_path}" 2>/dev/null') # use path without chroot path prefix
|
||||
if not binfmt_is_registered(arch, chroot=chroot):
|
||||
logging.debug(f'binfmt line: {code}')
|
||||
raise Exception(f'Failed to register qemu-user for {arch} with binfmt_misc, {binfmt_path}/{info["name"]} not found')
|
||||
|
||||
|
||||
def binfmt_unregister(arch, chroot: Optional[Chroot] = None):
|
||||
is_arch_known(arch, True, 'unregister')
|
||||
qemu_arch = QEMU_ARCHES[arch]
|
||||
binfmt_ensure_mounted(chroot)
|
||||
binfmt_file = "/proc/sys/fs/binfmt_misc/qemu-" + qemu_arch
|
||||
if chroot:
|
||||
binfmt_file = chroot.get_path(binfmt_file)
|
||||
if not os.path.exists(binfmt_file):
|
||||
logging.debug(f"qemu binfmt for {arch} not registered")
|
||||
return
|
||||
logging.info(f"Unregistering qemu binfmt ({arch})")
|
||||
run_root_cmd(f"echo -1 > {binfmt_file}")
|
||||
if binfmt_is_registered(arch, chroot=chroot):
|
||||
raise Exception(f'Failed to UNregister qemu-user for {arch} with binfmt_misc, {chroot=}')
|
||||
@@ -1,44 +0,0 @@
|
||||
import click
|
||||
import os
|
||||
|
||||
from typing import Optional
|
||||
|
||||
from constants import Arch, ARCHES
|
||||
|
||||
from .binfmt import binfmt_unregister, binfmt_is_registered
|
||||
|
||||
cmd_binfmt = click.Group('binfmt', help='Manage qemu binfmt for executing foreign architecture binaries')
|
||||
arches_arg = click.argument('arches', type=click.Choice(ARCHES), nargs=-1, required=True)
|
||||
arches_arg_optional = click.argument('arches', type=click.Choice(ARCHES), nargs=-1, required=False)
|
||||
|
||||
|
||||
@cmd_binfmt.command('register', help='Register a binfmt handler with the kernel')
|
||||
@arches_arg
|
||||
def cmd_register(arches: list[Arch], disable_chroot: bool = False):
|
||||
from packages.build import build_enable_qemu_binfmt
|
||||
for arch in arches:
|
||||
build_enable_qemu_binfmt(arch)
|
||||
|
||||
|
||||
@cmd_binfmt.command('unregister', help='Unregister a binfmt handler from the kernel')
|
||||
@arches_arg_optional
|
||||
def cmd_unregister(arches: Optional[list[Arch]]):
|
||||
for arch in arches or ARCHES:
|
||||
binfmt_unregister(arch)
|
||||
|
||||
|
||||
@cmd_binfmt.command('status', help='Get the status of a binfmt handler from the kernel')
|
||||
@arches_arg_optional
|
||||
def cmd_status(arches: Optional[list[Arch]]):
|
||||
for arch in arches or ARCHES:
|
||||
native = arch == os.uname().machine
|
||||
active = binfmt_is_registered(arch)
|
||||
if native and not active:
|
||||
# boooring
|
||||
continue
|
||||
verb = click.style(
|
||||
"is" if active else "is NOT",
|
||||
fg='green' if (active ^ native) else 'red',
|
||||
bold=True,
|
||||
)
|
||||
click.echo(f'Binfmt for {arch} {verb} set up! {"(host architecture!)" if native else ""}')
|
||||
45
boot.py
Normal file
45
boot.py
Normal file
@@ -0,0 +1,45 @@
|
||||
import os
|
||||
import urllib.request
|
||||
import click
|
||||
|
||||
from config import config
|
||||
from constants import BOOT_STRATEGIES, FLASH_PARTS, FASTBOOT, JUMPDRIVE, JUMPDRIVE_VERSION
|
||||
from exec.file import makedir
|
||||
from fastboot import fastboot_boot, fastboot_erase_dtbo
|
||||
from image import get_device_and_flavour, losetup_rootfs_image, get_image_path, dump_aboot, dump_lk2nd
|
||||
from wrapper import enforce_wrap
|
||||
|
||||
LK2ND = FLASH_PARTS['LK2ND']
|
||||
ABOOT = FLASH_PARTS['ABOOT']
|
||||
|
||||
TYPES = [LK2ND, JUMPDRIVE, ABOOT]
|
||||
|
||||
|
||||
@click.command(name='boot')
|
||||
@click.argument('type', required=False, default=ABOOT, type=click.Choice(TYPES))
|
||||
def cmd_boot(type):
|
||||
"""Boot JumpDrive or the Kupfer aboot image. Erases Android DTBO in the process."""
|
||||
enforce_wrap()
|
||||
device, flavour = get_device_and_flavour()
|
||||
# TODO: parse arch and sector size
|
||||
sector_size = 4096
|
||||
image_path = get_image_path(device, flavour)
|
||||
strategy = BOOT_STRATEGIES[device]
|
||||
|
||||
if strategy == FASTBOOT:
|
||||
if type == JUMPDRIVE:
|
||||
file = f'boot-{device}.img'
|
||||
path = os.path.join(config.get_path('jumpdrive'), file)
|
||||
makedir(os.path.dirname(path))
|
||||
if not os.path.exists(path):
|
||||
urllib.request.urlretrieve(f'https://github.com/dreemurrs-embedded/Jumpdrive/releases/download/{JUMPDRIVE_VERSION}/{file}', path)
|
||||
else:
|
||||
loop_device = losetup_rootfs_image(image_path, sector_size)
|
||||
if type == LK2ND:
|
||||
path = dump_lk2nd(loop_device + 'p1')
|
||||
elif type == ABOOT:
|
||||
path = dump_aboot(loop_device + 'p1')
|
||||
else:
|
||||
raise Exception(f'Unknown boot image type {type}')
|
||||
fastboot_erase_dtbo()
|
||||
fastboot_boot(path)
|
||||
41
cache.py
Normal file
41
cache.py
Normal file
@@ -0,0 +1,41 @@
|
||||
import click
|
||||
import os
|
||||
import logging
|
||||
|
||||
from config import config
|
||||
from exec.file import remove_file
|
||||
from wrapper import enforce_wrap
|
||||
|
||||
PATHS = ['chroots', 'pacman', 'jumpdrive', 'packages', 'images']
|
||||
|
||||
|
||||
@click.group(name='cache')
|
||||
def cmd_cache():
|
||||
"""Clean caches and chroots"""
|
||||
|
||||
|
||||
@cmd_cache.command(name='clean')
|
||||
@click.option('--force', default=False)
|
||||
@click.argument('paths', nargs=-1, required=False)
|
||||
def cmd_clean(paths: list[str], force=False):
|
||||
if unknown_paths := (set(paths) - set(PATHS + ['all'])):
|
||||
raise Exception(f"Unknown paths: {' ,'.join(unknown_paths)}")
|
||||
if 'all' in paths or (not paths and force):
|
||||
paths = PATHS.copy()
|
||||
|
||||
enforce_wrap()
|
||||
|
||||
clear = {path: (path in paths) for path in PATHS}
|
||||
query = not paths
|
||||
if not query or force:
|
||||
click.confirm(f'Really clear {", ".join(paths)}?', abort=True)
|
||||
for path_name in PATHS:
|
||||
if query:
|
||||
clear[path_name] = click.confirm(f'Clear {path_name}?')
|
||||
if clear[path_name]:
|
||||
logging.info(f'Clearing {path_name}')
|
||||
dir = config.get_path(path_name)
|
||||
for file in os.listdir(dir):
|
||||
path = os.path.join(dir, file)
|
||||
logging.debug(f'Removing "{path_name}/{file}"')
|
||||
remove_file(path, recursive=True)
|
||||
0
cache/__init__.py
vendored
0
cache/__init__.py
vendored
51
cache/cli.py
vendored
51
cache/cli.py
vendored
@@ -1,51 +0,0 @@
|
||||
import click
|
||||
import os
|
||||
import logging
|
||||
|
||||
from config.state import config
|
||||
from constants import CHROOT_PATHS
|
||||
from exec.file import remove_file
|
||||
from packages.cli import cmd_clean as cmd_clean_pkgbuilds
|
||||
from wrapper import enforce_wrap
|
||||
|
||||
PATHS = list(CHROOT_PATHS.keys())
|
||||
|
||||
|
||||
@click.group(name='cache')
|
||||
def cmd_cache():
|
||||
"""Clean various cache directories"""
|
||||
|
||||
|
||||
@cmd_cache.command(name='clean')
|
||||
@click.option('--force', is_flag=True, default=False, help="Don't ask for any confirmation")
|
||||
@click.option('-n', '--noop', is_flag=True, default=False, help="Print what would be removed but dont execute")
|
||||
@click.argument('paths', nargs=-1, type=click.Choice(['all'] + PATHS), required=False)
|
||||
@click.pass_context
|
||||
def cmd_clean(ctx: click.Context, paths: list[str], force: bool = False, noop: bool = False):
|
||||
"""Clean various working directories"""
|
||||
if unknown_paths := (set(paths) - set(PATHS + ['all'])):
|
||||
raise Exception(f"Unknown paths: {' ,'.join(unknown_paths)}")
|
||||
if 'all' in paths or (not paths and force):
|
||||
paths = PATHS.copy()
|
||||
|
||||
enforce_wrap()
|
||||
|
||||
clear = {path: (path in paths) for path in PATHS}
|
||||
query = not paths
|
||||
if not query and not force:
|
||||
click.confirm(f'Really clear {", ".join(paths)}?', abort=True)
|
||||
for path_name in PATHS:
|
||||
if query and not force:
|
||||
clear[path_name] = click.confirm(f'{"(Noop) " if noop else ""}Clear {path_name}?')
|
||||
if clear[path_name]:
|
||||
logging.info(f'Clearing {path_name}')
|
||||
if path_name == 'pkgbuilds':
|
||||
ctx.invoke(cmd_clean_pkgbuilds, force=force, noop=noop)
|
||||
continue
|
||||
dir = config.get_path(path_name)
|
||||
for file in os.listdir(dir):
|
||||
path = os.path.join(dir, file)
|
||||
log = logging.info if noop else logging.debug
|
||||
log(f'{"Would remove" if noop else "Removing"} "{path_name}/{file}"')
|
||||
if not noop:
|
||||
remove_file(path, recursive=True)
|
||||
@@ -0,0 +1,60 @@
|
||||
import click
|
||||
import logging
|
||||
import os
|
||||
|
||||
from config import config
|
||||
from wrapper import enforce_wrap
|
||||
|
||||
from .abstract import Chroot
|
||||
from .base import get_base_chroot
|
||||
from .build import get_build_chroot, BuildChroot
|
||||
from .helpers import get_chroot_path
|
||||
|
||||
# export Chroot class
|
||||
Chroot = Chroot
|
||||
|
||||
|
||||
@click.command('chroot')
|
||||
@click.argument('type', required=False, default='build')
|
||||
@click.argument('arch', required=False, default=None)
|
||||
def cmd_chroot(type: str = 'build', arch: str = None, enable_crossdirect=True):
|
||||
"""Open a shell in a chroot"""
|
||||
chroot_path = ''
|
||||
if type not in ['base', 'build', 'rootfs']:
|
||||
raise Exception('Unknown chroot type: ' + type)
|
||||
|
||||
enforce_wrap()
|
||||
chroot: Chroot
|
||||
if type == 'rootfs':
|
||||
if arch:
|
||||
name = 'rootfs_' + arch
|
||||
else:
|
||||
raise Exception('"rootfs" without args not yet implemented, sorry!')
|
||||
# TODO: name = config.get_profile()[...]
|
||||
chroot_path = get_chroot_path(name)
|
||||
if not os.path.exists(chroot_path):
|
||||
raise Exception(f"rootfs {name} doesn't exist")
|
||||
else:
|
||||
if not arch:
|
||||
# TODO: arch = config.get_profile()[...]
|
||||
arch = 'aarch64'
|
||||
if type == 'base':
|
||||
chroot = get_base_chroot(arch)
|
||||
if not os.path.exists(chroot.get_path('/bin')):
|
||||
chroot.initialize()
|
||||
chroot.initialized = True
|
||||
elif type == 'build':
|
||||
build_chroot: BuildChroot = get_build_chroot(arch, activate=True)
|
||||
chroot = build_chroot # type safety
|
||||
if not os.path.exists(build_chroot.get_path('/bin')):
|
||||
build_chroot.initialize()
|
||||
build_chroot.initialized = True
|
||||
build_chroot.mount_pkgbuilds()
|
||||
if config.file['build']['crossdirect'] and enable_crossdirect:
|
||||
build_chroot.mount_crossdirect()
|
||||
else:
|
||||
raise Exception('Really weird bug')
|
||||
|
||||
chroot.activate()
|
||||
logging.debug(f'Starting shell in {chroot.name}:')
|
||||
chroot.run_cmd('bash', attach_tty=True)
|
||||
|
||||
@@ -2,17 +2,15 @@ import atexit
|
||||
import logging
|
||||
import os
|
||||
import subprocess
|
||||
import sys
|
||||
|
||||
from copy import deepcopy
|
||||
from shlex import quote as shell_quote
|
||||
from typing import ClassVar, Iterable, Protocol, Union, Optional, Mapping
|
||||
from typing import Protocol, Union, Optional, Mapping
|
||||
from uuid import uuid4
|
||||
|
||||
from config.state import config
|
||||
from config import config
|
||||
from constants import Arch, CHROOT_PATHS, GCC_HOSTSPECS
|
||||
from distro.distro import get_base_distro, get_kupfer_local, RepoInfo
|
||||
from exec.cmd import FileDescriptor, run_root_cmd, generate_env_cmd, flatten_shell_script, wrap_in_bash, generate_cmd_su
|
||||
from exec.cmd import run_root_cmd, generate_env_cmd, flatten_shell_script, wrap_in_bash
|
||||
from exec.file import makedir, root_makedir, root_write_file, write_file
|
||||
from generator import generate_makepkg_conf
|
||||
from utils import mount, umount, check_findmnt, log_or_exception
|
||||
@@ -36,9 +34,10 @@ class AbstractChroot(Protocol):
|
||||
name: str,
|
||||
arch: Arch,
|
||||
copy_base: bool,
|
||||
initialize: bool,
|
||||
extra_repos: Mapping[str, RepoInfo],
|
||||
base_packages: list[str],
|
||||
path_override: Optional[str] = None,
|
||||
path_override: str = None,
|
||||
):
|
||||
pass
|
||||
|
||||
@@ -60,8 +59,7 @@ class AbstractChroot(Protocol):
|
||||
capture_output: bool,
|
||||
cwd: str,
|
||||
fail_inactive: bool,
|
||||
stdout: Optional[FileDescriptor],
|
||||
stderr: Optional[FileDescriptor],
|
||||
stdout: Optional[int],
|
||||
):
|
||||
pass
|
||||
|
||||
@@ -80,9 +78,6 @@ class AbstractChroot(Protocol):
|
||||
|
||||
class Chroot(AbstractChroot):
|
||||
|
||||
_copy_base: ClassVar[bool] = False
|
||||
copy_base: bool
|
||||
|
||||
def __repr__(self):
|
||||
return f'Chroot({self.name})'
|
||||
|
||||
@@ -90,10 +85,11 @@ class Chroot(AbstractChroot):
|
||||
self,
|
||||
name: str,
|
||||
arch: Arch,
|
||||
copy_base: Optional[bool] = None,
|
||||
copy_base: bool = None,
|
||||
initialize: bool = False,
|
||||
extra_repos: Mapping[str, RepoInfo] = {},
|
||||
base_packages: list[str] = ['base', 'base-devel', 'git'],
|
||||
path_override: Optional[str] = None,
|
||||
path_override: str = None,
|
||||
):
|
||||
self.uuid = uuid4()
|
||||
if copy_base is None:
|
||||
@@ -105,9 +101,11 @@ class Chroot(AbstractChroot):
|
||||
self.name = name
|
||||
self.arch = arch
|
||||
self.path = path_override or os.path.join(config.get_path('chroots'), name)
|
||||
self.copy_base = copy_base if copy_base is not None else self._copy_base
|
||||
self.copy_base = copy_base
|
||||
self.extra_repos = deepcopy(extra_repos)
|
||||
self.base_packages = base_packages.copy()
|
||||
if initialize:
|
||||
self.initialize()
|
||||
if self.name.startswith(BASE_CHROOT_PREFIX) and set(get_kupfer_local(self.arch).repos).intersection(set(self.extra_repos)):
|
||||
raise Exception(f'Base chroot {self.name} had local repos specified: {self.extra_repos}')
|
||||
|
||||
@@ -131,7 +129,6 @@ class Chroot(AbstractChroot):
|
||||
|
||||
def get_path(self, *joins: str) -> str:
|
||||
if joins:
|
||||
# no need to check for len(joins) > 1 because [1:] will just return []
|
||||
joins = (joins[0].lstrip('/'),) + joins[1:]
|
||||
|
||||
return os.path.join(self.path, *joins)
|
||||
@@ -141,7 +138,7 @@ class Chroot(AbstractChroot):
|
||||
absolute_source: str,
|
||||
relative_destination: str,
|
||||
options=['bind'],
|
||||
fs_type: Optional[str] = None,
|
||||
fs_type: str = None,
|
||||
fail_if_mounted: bool = True,
|
||||
mkdir: bool = True,
|
||||
strict_cache_consistency: bool = False,
|
||||
@@ -182,7 +179,7 @@ class Chroot(AbstractChroot):
|
||||
self.active_mounts.remove(relative_path)
|
||||
return result
|
||||
|
||||
def umount_many(self, relative_paths: Iterable[str]):
|
||||
def umount_many(self, relative_paths: list[str]):
|
||||
# make sure paths start with '/'. Important: also copies the collection and casts to list, which will be sorted!
|
||||
mounts = [make_abs_path(path) for path in relative_paths]
|
||||
mounts.sort(reverse=True)
|
||||
@@ -225,16 +222,13 @@ class Chroot(AbstractChroot):
|
||||
capture_output: bool = False,
|
||||
cwd: Optional[str] = None,
|
||||
fail_inactive: bool = True,
|
||||
stdout: Optional[FileDescriptor] = None,
|
||||
stderr: Optional[FileDescriptor] = None,
|
||||
switch_user: Optional[str] = None,
|
||||
stdout: Optional[int] = None,
|
||||
) -> Union[int, subprocess.CompletedProcess]:
|
||||
if not self.active and fail_inactive:
|
||||
raise Exception(f'Chroot {self.name} is inactive, not running command! Hint: pass `fail_inactive=False`')
|
||||
if outer_env is None:
|
||||
outer_env = {}
|
||||
native = config.runtime.arch
|
||||
assert native
|
||||
native = config.runtime['arch']
|
||||
if self.arch != native and 'QEMU_LD_PREFIX' not in outer_env:
|
||||
outer_env = dict(outer_env) # copy dict for modification
|
||||
outer_env |= {'QEMU_LD_PREFIX': f'/usr/{GCC_HOSTSPECS[native][self.arch]}'}
|
||||
@@ -244,13 +238,9 @@ class Chroot(AbstractChroot):
|
||||
script = flatten_shell_script(script, shell_quote_items=False, wrap_in_shell_quote=False)
|
||||
if cwd:
|
||||
script = f"cd {shell_quote(cwd)} && ( {script} )"
|
||||
if switch_user:
|
||||
inner_cmd = generate_cmd_su(script, switch_user=switch_user, elevation_method='none', force_su=True)
|
||||
else:
|
||||
inner_cmd = wrap_in_bash(script, flatten_result=False)
|
||||
cmd = flatten_shell_script(['chroot', self.path] + env_cmd + inner_cmd, shell_quote_items=True)
|
||||
cmd = flatten_shell_script(['chroot', self.path] + env_cmd + wrap_in_bash(script, flatten_result=False), shell_quote_items=True)
|
||||
|
||||
return run_root_cmd(cmd, env=outer_env, attach_tty=attach_tty, capture_output=capture_output, stdout=stdout, stderr=stderr)
|
||||
return run_root_cmd(cmd, env=outer_env, attach_tty=attach_tty, capture_output=capture_output, stdout=stdout)
|
||||
|
||||
def mount_pkgbuilds(self, fail_if_mounted: bool = False) -> str:
|
||||
return self.mount(
|
||||
@@ -260,12 +250,12 @@ class Chroot(AbstractChroot):
|
||||
)
|
||||
|
||||
def mount_pacman_cache(self, fail_if_mounted: bool = False) -> str:
|
||||
shared_cache = os.path.join(config.get_path('pacman'), self.arch)
|
||||
rel_target = 'var/cache/pacman/pkg'
|
||||
makedir(shared_cache)
|
||||
arch_cache = os.path.join(config.get_path('pacman'), self.arch)
|
||||
rel_target = os.path.join(CHROOT_PATHS['pacman'].lstrip('/'), self.arch)
|
||||
makedir(arch_cache)
|
||||
root_makedir(self.get_path(rel_target))
|
||||
return self.mount(
|
||||
shared_cache,
|
||||
arch_cache,
|
||||
rel_target,
|
||||
fail_if_mounted=fail_if_mounted,
|
||||
)
|
||||
@@ -277,13 +267,6 @@ class Chroot(AbstractChroot):
|
||||
fail_if_mounted=fail_if_mounted,
|
||||
)
|
||||
|
||||
def mount_chroots(self, fail_if_mounted: bool = False) -> str:
|
||||
return self.mount(
|
||||
absolute_source=config.get_path('chroots'),
|
||||
relative_destination=CHROOT_PATHS['chroots'].lstrip('/'),
|
||||
fail_if_mounted=fail_if_mounted,
|
||||
)
|
||||
|
||||
def write_makepkg_conf(self, target_arch: Arch, cross_chroot_relative: Optional[str], cross: bool = True) -> str:
|
||||
"""
|
||||
Generate a `makepkg.conf` or `makepkg_cross_$arch.conf` file in /etc.
|
||||
@@ -298,11 +281,11 @@ class Chroot(AbstractChroot):
|
||||
root_write_file(makepkg_conf_path, makepkg_cross_conf)
|
||||
return makepkg_conf_path_relative
|
||||
|
||||
def write_pacman_conf(self, check_space: Optional[bool] = None, in_chroot: bool = True, absolute_path: Optional[str] = None):
|
||||
def write_pacman_conf(self, check_space: Optional[bool] = None, in_chroot: bool = True, absolute_path: str = None):
|
||||
user = None
|
||||
group = None
|
||||
if check_space is None:
|
||||
check_space = config.file.pacman.check_space
|
||||
check_space = config.file['pacman']['check_space']
|
||||
if not absolute_path:
|
||||
path = self.get_path('/etc')
|
||||
root_makedir(path)
|
||||
@@ -322,75 +305,47 @@ class Chroot(AbstractChroot):
|
||||
|
||||
def create_user(
|
||||
self,
|
||||
user: str = 'kupfer',
|
||||
password: Optional[str] = None,
|
||||
groups: list[str] = ['network', 'video', 'audio', 'optical', 'storage', 'input', 'scanner', 'games', 'lp', 'rfkill', 'wheel'],
|
||||
primary_group: Optional[str] = 'users',
|
||||
uid: Optional[int] = None,
|
||||
non_unique: bool = False,
|
||||
user='kupfer',
|
||||
password='123456',
|
||||
groups=['network', 'video', 'audio', 'optical', 'storage', 'input', 'scanner', 'games', 'lp', 'rfkill', 'wheel'],
|
||||
):
|
||||
user = user or 'kupfer'
|
||||
uid_param = f'-u {uid}' if uid is not None else ''
|
||||
unique_param = '--non-unique' if non_unique else ''
|
||||
pgroup_param = f'-g {primary_group}' if primary_group else ''
|
||||
install_script = f'''
|
||||
set -e
|
||||
if ! id -u "{user}" >/dev/null 2>&1; then
|
||||
useradd -m {unique_param} {uid_param} {pgroup_param} {user}
|
||||
useradd -m {user}
|
||||
fi
|
||||
usermod -a -G {",".join(groups)} {unique_param} {uid_param} {pgroup_param} {user}
|
||||
chown {user}:{primary_group if primary_group else user} /home/{user} -R
|
||||
usermod -a -G {",".join(groups)} {user}
|
||||
chown {user}:{user} /home/{user} -R
|
||||
'''
|
||||
if password:
|
||||
install_script += f'echo "{user}:{password}" | chpasswd'
|
||||
else:
|
||||
install_script += f'echo "Set user password:" && passwd {user}'
|
||||
result = self.run_cmd(install_script)
|
||||
assert isinstance(result, subprocess.CompletedProcess)
|
||||
if result.returncode != 0:
|
||||
raise Exception(f'Failed to setup user {user} in self.name')
|
||||
|
||||
def get_uid(self, user: Union[str, int]) -> int:
|
||||
if isinstance(user, int):
|
||||
return user
|
||||
if user == 'root':
|
||||
return 0
|
||||
res = self.run_cmd(['id', '-u', user], capture_output=True)
|
||||
assert isinstance(res, subprocess.CompletedProcess)
|
||||
if res.returncode or not res.stdout:
|
||||
raise Exception(f"chroot {self.name}: Couldnt detect uid for user {user}: {repr(res.stdout)}")
|
||||
uid = res.stdout.decode()
|
||||
return int(uid)
|
||||
|
||||
def add_sudo_config(self, config_name: str = 'wheel', privilegee: str = '%wheel', password_required: bool = True):
|
||||
if '.' in config_name:
|
||||
raise Exception(f"won't create sudoers.d file {config_name} since it will be ignored by sudo because it contains a dot!")
|
||||
comment = ('# allow ' + (f'members of group {privilegee.strip("%")}' if privilegee.startswith('%') else f'user {privilegee}') +
|
||||
'to run any program as root' + ('' if password_required else ' without a password'))
|
||||
line = privilegee + (' ALL=(ALL:ALL) ALL' if password_required else ' ALL=(ALL) NOPASSWD: ALL')
|
||||
root_write_file(self.get_path(f'/etc/sudoers.d/{config_name}'), f'{comment}\n{line}')
|
||||
raise Exception('Failed to setup user')
|
||||
|
||||
def try_install_packages(
|
||||
self,
|
||||
packages: list[str],
|
||||
refresh: bool = False,
|
||||
allow_fail: bool = True,
|
||||
redirect_stderr: bool = True,
|
||||
) -> dict[str, Union[int, subprocess.CompletedProcess]]:
|
||||
"""Try installing packages, fall back to installing one by one"""
|
||||
results = {}
|
||||
stderr = sys.stdout if redirect_stderr else sys.stderr
|
||||
if refresh:
|
||||
results['refresh'] = self.run_cmd('pacman -Syy --noconfirm', stderr=stderr)
|
||||
results['refresh'] = self.run_cmd('pacman -Syy --noconfirm')
|
||||
cmd = "pacman -S --noconfirm --needed --overwrite='/*'"
|
||||
result = self.run_cmd(f'{cmd} -y {" ".join(packages)}', stderr=stderr)
|
||||
result = self.run_cmd(f'{cmd} -y {" ".join(packages)}')
|
||||
assert isinstance(result, subprocess.CompletedProcess)
|
||||
results |= {package: result for package in packages}
|
||||
if result.returncode != 0 and allow_fail:
|
||||
results = {}
|
||||
logging.debug('Falling back to serial installation')
|
||||
for pkg in set(packages):
|
||||
results[pkg] = self.run_cmd(f'{cmd} {pkg}', stderr=stderr)
|
||||
# Don't check for errors here because there might be packages that are listed as dependencies but are not available on x86_64
|
||||
results[pkg] = self.run_cmd(f'{cmd} {pkg}')
|
||||
return results
|
||||
|
||||
|
||||
@@ -399,29 +354,26 @@ chroots: dict[str, Chroot] = {}
|
||||
|
||||
def get_chroot(
|
||||
name: str,
|
||||
chroot_class: type[Chroot],
|
||||
chroot_args: dict,
|
||||
initialize: bool = False,
|
||||
activate: bool = False,
|
||||
fail_if_exists: bool = False,
|
||||
extra_repos: Optional[Mapping[str, RepoInfo]] = None,
|
||||
default: Chroot = None,
|
||||
) -> Chroot:
|
||||
global chroots
|
||||
if name not in chroots:
|
||||
chroot = chroot_class(name, **chroot_args)
|
||||
logging.debug(f'Adding chroot {name} to chroot map: {chroot.uuid}')
|
||||
chroots[name] = chroot
|
||||
if default and name not in chroots:
|
||||
logging.debug(f'Adding chroot {name} to chroot map: {default.uuid}')
|
||||
chroots[name] = default
|
||||
else:
|
||||
existing = chroots[name]
|
||||
if fail_if_exists:
|
||||
raise Exception(f'chroot {name} already exists: {existing.uuid}')
|
||||
logging.debug(f"returning existing chroot {name}: {existing.uuid}")
|
||||
assert isinstance(existing, chroot_class)
|
||||
chroot = chroots[name]
|
||||
if extra_repos is not None:
|
||||
chroot.extra_repos = dict(extra_repos) # copy to new dict
|
||||
if initialize:
|
||||
chroot.initialize()
|
||||
if activate:
|
||||
chroot.activate()
|
||||
chroot.activate(fail_if_active=False)
|
||||
return chroot
|
||||
|
||||
@@ -1,15 +1,13 @@
|
||||
import logging
|
||||
import os
|
||||
import sys
|
||||
|
||||
from glob import glob
|
||||
from shutil import rmtree
|
||||
from typing import ClassVar
|
||||
|
||||
from constants import Arch
|
||||
from exec.cmd import run_root_cmd
|
||||
from exec.file import makedir, root_makedir
|
||||
from config.state import config
|
||||
from config import config
|
||||
|
||||
from .abstract import Chroot, get_chroot
|
||||
from .helpers import base_chroot_name
|
||||
@@ -17,7 +15,7 @@ from .helpers import base_chroot_name
|
||||
|
||||
class BaseChroot(Chroot):
|
||||
|
||||
_copy_base: ClassVar[bool] = False
|
||||
copy_base: bool = False
|
||||
|
||||
def create_rootfs(self, reset, pacman_conf_target, active_previously):
|
||||
if reset:
|
||||
@@ -32,20 +30,17 @@ class BaseChroot(Chroot):
|
||||
|
||||
logging.info(f'Pacstrapping chroot {self.name}: {", ".join(self.base_packages)}')
|
||||
|
||||
result = run_root_cmd(
|
||||
[
|
||||
'pacstrap',
|
||||
'-C',
|
||||
pacman_conf_target,
|
||||
'-G',
|
||||
self.path,
|
||||
*self.base_packages,
|
||||
'--needed',
|
||||
'--overwrite=*',
|
||||
'-yyuu',
|
||||
],
|
||||
stderr=sys.stdout,
|
||||
)
|
||||
result = run_root_cmd([
|
||||
'pacstrap',
|
||||
'-C',
|
||||
pacman_conf_target,
|
||||
'-G',
|
||||
self.path,
|
||||
] + self.base_packages + [
|
||||
'--needed',
|
||||
'--overwrite=*',
|
||||
'-yyuu',
|
||||
])
|
||||
if result.returncode != 0:
|
||||
raise Exception(f'Failed to initialize chroot "{self.name}"')
|
||||
self.initialized = True
|
||||
@@ -53,7 +48,7 @@ class BaseChroot(Chroot):
|
||||
|
||||
def get_base_chroot(arch: Arch) -> BaseChroot:
|
||||
name = base_chroot_name(arch)
|
||||
args = dict(arch=arch, copy_base=False)
|
||||
chroot = get_chroot(name, initialize=False, chroot_class=BaseChroot, chroot_args=args)
|
||||
default = BaseChroot(name, arch, copy_base=False, initialize=False)
|
||||
chroot = get_chroot(name, initialize=False, default=default)
|
||||
assert isinstance(chroot, BaseChroot)
|
||||
return chroot
|
||||
|
||||
@@ -2,9 +2,9 @@ import logging
|
||||
import os
|
||||
import subprocess
|
||||
from glob import glob
|
||||
from typing import ClassVar, Optional
|
||||
from typing import Optional
|
||||
|
||||
from config.state import config
|
||||
from config import config
|
||||
from constants import Arch, GCC_HOSTSPECS, CROSSDIRECT_PKGS, CHROOT_PATHS
|
||||
from distro.distro import get_kupfer_local
|
||||
from exec.cmd import run_root_cmd
|
||||
@@ -17,7 +17,7 @@ from .base import get_base_chroot
|
||||
|
||||
class BuildChroot(Chroot):
|
||||
|
||||
_copy_base: ClassVar[bool] = True
|
||||
copy_base: bool = True
|
||||
|
||||
def create_rootfs(self, reset: bool, pacman_conf_target: str, active_previously: bool):
|
||||
makedir(config.get_path('chroots'))
|
||||
@@ -69,8 +69,7 @@ class BuildChroot(Chroot):
|
||||
"""
|
||||
target_arch = self.arch
|
||||
if not native_chroot:
|
||||
assert config.runtime.arch
|
||||
native_chroot = get_build_chroot(config.runtime.arch)
|
||||
native_chroot = get_build_chroot(config.runtime['arch'])
|
||||
host_arch = native_chroot.arch
|
||||
hostspec = GCC_HOSTSPECS[host_arch][target_arch]
|
||||
cc = f'{hostspec}-cc'
|
||||
@@ -82,7 +81,6 @@ class BuildChroot(Chroot):
|
||||
native_chroot.mount_pacman_cache()
|
||||
native_chroot.mount_packages()
|
||||
native_chroot.activate()
|
||||
logging.debug(f"Installing {CROSSDIRECT_PKGS=} + {gcc=}")
|
||||
results = dict(native_chroot.try_install_packages(
|
||||
CROSSDIRECT_PKGS + [gcc],
|
||||
refresh=True,
|
||||
@@ -104,8 +102,8 @@ class BuildChroot(Chroot):
|
||||
target_include_dir = os.path.join(self.path, 'include')
|
||||
|
||||
for target, source in {cc_path: gcc, target_lib_dir: 'lib', target_include_dir: 'usr/include'}.items():
|
||||
if not (os.path.exists(target) or os.path.islink(target)):
|
||||
logging.debug(f'Symlinking {source=} at {target=}')
|
||||
if not os.path.exists(target):
|
||||
logging.debug(f'Symlinking {source} at {target}')
|
||||
symlink(source, target)
|
||||
ld_so = os.path.basename(glob(f"{os.path.join(native_chroot.path, 'usr', 'lib', 'ld-linux-')}*")[0])
|
||||
ld_so_target = os.path.join(target_lib_dir, ld_so)
|
||||
@@ -133,39 +131,13 @@ class BuildChroot(Chroot):
|
||||
fail_if_mounted=fail_if_mounted,
|
||||
)
|
||||
|
||||
def mount_ccache(self, user: str = 'kupfer', fail_if_mounted: bool = False):
|
||||
mount_source = os.path.join(config.get_path('ccache'), self.arch)
|
||||
mount_dest = os.path.join(f'/home/{user}' if user != 'root' else '/root', '.ccache')
|
||||
uid = self.get_uid(user)
|
||||
makedir(mount_source, user=uid)
|
||||
return self.mount(
|
||||
absolute_source=mount_source,
|
||||
relative_destination=mount_dest,
|
||||
fail_if_mounted=fail_if_mounted,
|
||||
)
|
||||
|
||||
def mount_rust(self, user: str = 'kupfer', fail_if_mounted: bool = False) -> list[str]:
|
||||
results = []
|
||||
uid = self.get_uid(user)
|
||||
mount_source_base = config.get_path('rust') # apparently arch-agnostic
|
||||
for rust_dir in ['cargo', 'rustup']:
|
||||
mount_source = os.path.join(mount_source_base, rust_dir)
|
||||
mount_dest = os.path.join(f'/home/{user}' if user != 'root' else '/root', f'.{rust_dir}')
|
||||
makedir(mount_source, user=uid)
|
||||
results.append(self.mount(
|
||||
absolute_source=mount_source,
|
||||
relative_destination=mount_dest,
|
||||
fail_if_mounted=fail_if_mounted,
|
||||
))
|
||||
return results
|
||||
|
||||
|
||||
def get_build_chroot(arch: Arch, add_kupfer_repos: bool = True, **kwargs) -> BuildChroot:
|
||||
name = build_chroot_name(arch)
|
||||
if 'extra_repos' in kwargs:
|
||||
raise Exception('extra_repos!')
|
||||
repos = get_kupfer_local(arch).repos if add_kupfer_repos else {}
|
||||
args = dict(arch=arch)
|
||||
chroot = get_chroot(name, **kwargs, extra_repos=repos, chroot_class=BuildChroot, chroot_args=args)
|
||||
default = BuildChroot(name, arch, initialize=False, copy_base=True, extra_repos=repos)
|
||||
chroot = get_chroot(name, **kwargs, extra_repos=repos, default=default)
|
||||
assert isinstance(chroot, BuildChroot)
|
||||
return chroot
|
||||
|
||||
@@ -1,67 +0,0 @@
|
||||
import click
|
||||
import logging
|
||||
import os
|
||||
|
||||
from typing import Optional
|
||||
|
||||
from config.state import config
|
||||
from wrapper import enforce_wrap
|
||||
from devices.device import get_profile_device
|
||||
|
||||
from .abstract import Chroot
|
||||
from .base import get_base_chroot
|
||||
from .build import get_build_chroot, BuildChroot
|
||||
|
||||
CHROOT_TYPES = ['base', 'build', 'rootfs']
|
||||
|
||||
|
||||
@click.command('chroot')
|
||||
@click.argument('type', required=False, type=click.Choice(CHROOT_TYPES), default='build')
|
||||
@click.argument(
|
||||
'name',
|
||||
required=False,
|
||||
default=None,
|
||||
)
|
||||
@click.pass_context
|
||||
def cmd_chroot(ctx: click.Context, type: str = 'build', name: Optional[str] = None, enable_crossdirect=True):
|
||||
"""Open a shell in a chroot. For rootfs NAME is a profile name, for others the architecture (e.g. aarch64)."""
|
||||
|
||||
if type not in CHROOT_TYPES:
|
||||
raise Exception(f'Unknown chroot type: "{type}"')
|
||||
|
||||
if type == 'rootfs':
|
||||
from image.image import cmd_inspect
|
||||
assert isinstance(cmd_inspect, click.Command)
|
||||
ctx.invoke(cmd_inspect, profile=name, shell=True)
|
||||
return
|
||||
|
||||
enforce_wrap()
|
||||
|
||||
chroot: Chroot
|
||||
arch = name
|
||||
if not arch:
|
||||
arch = get_profile_device().arch
|
||||
assert arch
|
||||
if type == 'base':
|
||||
chroot = get_base_chroot(arch)
|
||||
if not os.path.exists(chroot.get_path('/bin')):
|
||||
chroot.initialize()
|
||||
chroot.initialized = True
|
||||
elif type == 'build':
|
||||
build_chroot: BuildChroot = get_build_chroot(arch, activate=True)
|
||||
chroot = build_chroot # type safety
|
||||
if not os.path.exists(build_chroot.get_path('/bin')):
|
||||
build_chroot.initialize()
|
||||
build_chroot.initialized = True
|
||||
build_chroot.mount_pkgbuilds()
|
||||
build_chroot.mount_chroots()
|
||||
assert arch and config.runtime.arch
|
||||
if config.file.build.crossdirect and enable_crossdirect and arch != config.runtime.arch:
|
||||
build_chroot.mount_crossdirect()
|
||||
else:
|
||||
raise Exception('Really weird bug')
|
||||
|
||||
chroot.mount_packages()
|
||||
chroot.activate()
|
||||
logging.debug(f'Starting shell in {chroot.name}:')
|
||||
chroot.run_cmd('bash', attach_tty=True)
|
||||
@@ -1,11 +1,10 @@
|
||||
import atexit
|
||||
import os
|
||||
|
||||
from typing import ClassVar, Optional
|
||||
from typing import Optional
|
||||
|
||||
from config.state import config
|
||||
from config import config
|
||||
from constants import Arch, BASE_PACKAGES
|
||||
from distro.repo import RepoInfo
|
||||
from distro.distro import get_kupfer_local, get_kupfer_https
|
||||
from exec.file import get_temp_dir, makedir, root_makedir
|
||||
from utils import check_findmnt
|
||||
@@ -17,7 +16,7 @@ from .abstract import get_chroot
|
||||
|
||||
class DeviceChroot(BuildChroot):
|
||||
|
||||
_copy_base: ClassVar[bool] = False
|
||||
copy_base: bool = False
|
||||
|
||||
def create_rootfs(self, reset, pacman_conf_target, active_previously):
|
||||
clss = BuildChroot if self.copy_base else BaseChroot
|
||||
@@ -30,7 +29,7 @@ class DeviceChroot(BuildChroot):
|
||||
|
||||
clss.create_rootfs(self, reset, pacman_conf_target, active_previously)
|
||||
|
||||
def mount_rootfs(self, source_path: str, fs_type: Optional[str] = None, options: list[str] = [], allow_overlay: bool = False):
|
||||
def mount_rootfs(self, source_path: str, fs_type: str = None, options: list[str] = [], allow_overlay: bool = False):
|
||||
if self.active:
|
||||
raise Exception(f'{self.name}: Chroot is marked as active, not mounting a rootfs over it.')
|
||||
if not os.path.exists(source_path):
|
||||
@@ -57,15 +56,14 @@ def get_device_chroot(
|
||||
arch: Arch,
|
||||
packages: list[str] = BASE_PACKAGES,
|
||||
use_local_repos: bool = True,
|
||||
extra_repos: Optional[dict[str, RepoInfo]] = None,
|
||||
extra_repos: Optional[dict] = None,
|
||||
**kwargs,
|
||||
) -> DeviceChroot:
|
||||
name = f'rootfs_{device}-{flavour}'
|
||||
repos: dict[str, RepoInfo] = get_kupfer_local(arch).repos if use_local_repos else get_kupfer_https(arch).repos # type: ignore
|
||||
|
||||
repos = dict(get_kupfer_local(arch).repos if use_local_repos else get_kupfer_https(arch).repos)
|
||||
repos.update(extra_repos or {})
|
||||
|
||||
args = dict(arch=arch, base_packages=packages, extra_repos=repos)
|
||||
chroot = get_chroot(name, **kwargs, extra_repos=repos, chroot_class=DeviceChroot, chroot_args=args)
|
||||
default = DeviceChroot(name, arch, initialize=False, copy_base=False, base_packages=packages, extra_repos=repos)
|
||||
chroot = get_chroot(name, **kwargs, extra_repos=repos, default=default)
|
||||
assert isinstance(chroot, DeviceChroot)
|
||||
return chroot
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import os
|
||||
from typing import Optional, TypedDict
|
||||
|
||||
from config.state import config
|
||||
from config import config
|
||||
from constants import Arch
|
||||
|
||||
BIND_BUILD_DIRS = 'BINDBUILDDIRS'
|
||||
@@ -61,7 +61,7 @@ def make_abs_path(path: str) -> str:
|
||||
return '/' + path.lstrip('/')
|
||||
|
||||
|
||||
def get_chroot_path(chroot_name, override_basepath: Optional[str] = None) -> str:
|
||||
def get_chroot_path(chroot_name, override_basepath: str = None) -> str:
|
||||
base_path = config.get_path('chroots') if not override_basepath else override_basepath
|
||||
return os.path.join(base_path, chroot_name)
|
||||
|
||||
|
||||
@@ -0,0 +1,265 @@
|
||||
import click
|
||||
import logging
|
||||
from copy import deepcopy
|
||||
from typing import Any, Optional, Union
|
||||
|
||||
from .scheme import Profile
|
||||
from .profile import PROFILE_EMPTY, PROFILE_DEFAULTS
|
||||
from .state import ConfigStateHolder, CONFIG_DEFAULTS, CONFIG_SECTIONS, merge_configs
|
||||
|
||||
|
||||
def list_to_comma_str(str_list: list[str], default='') -> str:
|
||||
if str_list is None:
|
||||
return default
|
||||
return ','.join(str_list)
|
||||
|
||||
|
||||
def comma_str_to_list(s: str, default=None) -> list[str]:
|
||||
if not s:
|
||||
return default
|
||||
return [a for a in s.split(',') if a]
|
||||
|
||||
|
||||
def prompt_config(
|
||||
text: str,
|
||||
default: Any,
|
||||
field_type: type = str,
|
||||
bold: bool = True,
|
||||
echo_changes: bool = True,
|
||||
) -> tuple[Any, bool]:
|
||||
"""
|
||||
prompts for a new value for a config key. returns the result and a boolean that indicates
|
||||
whether the result is different, considering empty strings and None equal to each other.
|
||||
"""
|
||||
|
||||
original_default = default
|
||||
|
||||
def true_or_zero(to_check) -> bool:
|
||||
"""returns true if the value is truthy or int(0)"""
|
||||
zero = 0 # compiler complains about 'is with literal' otherwise
|
||||
return to_check or to_check is zero # can't do == due to boolean<->int casting
|
||||
|
||||
if type(None) == field_type:
|
||||
field_type = str
|
||||
|
||||
if field_type == dict:
|
||||
raise Exception('Dictionaries not supported by config_prompt, this is likely a bug in kupferbootstrap')
|
||||
elif field_type == list:
|
||||
default = list_to_comma_str(default)
|
||||
value_conv = comma_str_to_list
|
||||
else:
|
||||
value_conv = None
|
||||
default = '' if default is None else default
|
||||
|
||||
if bold:
|
||||
text = click.style(text, bold=True)
|
||||
|
||||
result = click.prompt(text, type=field_type, default=default, value_proc=value_conv, show_default=True) # type: ignore
|
||||
changed = result != (original_default if field_type == list else default) and (true_or_zero(default) or true_or_zero(result))
|
||||
if changed and echo_changes:
|
||||
print(f'value changed: "{text}" = "{result}"')
|
||||
return result, changed
|
||||
|
||||
|
||||
def prompt_profile(name: str, create: bool = True, defaults: Union[Profile, dict] = {}) -> tuple[Profile, bool]:
|
||||
"""Prompts the user for every field in `defaults`. Set values to None for an empty profile."""
|
||||
|
||||
profile: Any = PROFILE_EMPTY | defaults
|
||||
# don't use get_profile() here because we need the sparse profile
|
||||
if name in config.file['profiles']:
|
||||
profile |= config.file['profiles'][name]
|
||||
elif create:
|
||||
logging.info(f"Profile {name} doesn't exist yet, creating new profile.")
|
||||
else:
|
||||
raise Exception(f'Unknown profile "{name}"')
|
||||
logging.info(f'Configuring profile "{name}"')
|
||||
changed = False
|
||||
for key, current in profile.items():
|
||||
current = profile[key]
|
||||
text = f'{name}.{key}'
|
||||
result, _changed = prompt_config(text=text, default=current, field_type=type(PROFILE_DEFAULTS[key])) # type: ignore
|
||||
if _changed:
|
||||
profile[key] = result
|
||||
changed = True
|
||||
return profile, changed
|
||||
|
||||
|
||||
def config_dot_name_get(name: str, config: dict[str, Any], prefix: str = '') -> Any:
|
||||
if not isinstance(config, dict):
|
||||
raise Exception(f"Couldn't resolve config name: passed config is not a dict: {repr(config)}")
|
||||
split_name = name.split('.')
|
||||
name = split_name[0]
|
||||
if name not in config:
|
||||
raise Exception(f"Couldn't resolve config name: key {prefix + name} not found")
|
||||
value = config[name]
|
||||
if len(split_name) == 1:
|
||||
return value
|
||||
else:
|
||||
rest_name = '.'.join(split_name[1:])
|
||||
return config_dot_name_get(name=rest_name, config=value, prefix=prefix + name + '.')
|
||||
|
||||
|
||||
def config_dot_name_set(name: str, value: Any, config: dict[str, Any]):
|
||||
split_name = name.split('.')
|
||||
if len(split_name) > 1:
|
||||
config = config_dot_name_get('.'.join(split_name[:-1]), config)
|
||||
config[split_name[-1]] = value
|
||||
|
||||
|
||||
def prompt_for_save(retry_ctx: Optional[click.Context] = None):
|
||||
"""
|
||||
Prompt whether to save the config file. If no is answered, `False` is returned.
|
||||
|
||||
If `retry_ctx` is passed, the context's command will be reexecuted with the same arguments if the user chooses to retry.
|
||||
False will still be returned as the retry is expected to either save, perform another retry or arbort.
|
||||
"""
|
||||
if click.confirm(f'Do you want to save your changes to {config.runtime["config_file"]}?', default=True):
|
||||
return True
|
||||
if retry_ctx:
|
||||
if click.confirm('Retry? ("n" to quit without saving)', default=True):
|
||||
retry_ctx.forward(retry_ctx.command)
|
||||
return False
|
||||
|
||||
|
||||
config: ConfigStateHolder = ConfigStateHolder(file_conf_base=CONFIG_DEFAULTS)
|
||||
|
||||
config_option = click.option(
|
||||
'-C',
|
||||
'--config',
|
||||
'config_file',
|
||||
help='Override path to config file',
|
||||
)
|
||||
|
||||
|
||||
@click.group(name='config')
|
||||
def cmd_config():
|
||||
"""Manage the configuration and -profiles"""
|
||||
|
||||
|
||||
noninteractive_flag = click.option('-N', '--non-interactive', is_flag=True)
|
||||
noop_flag = click.option('--noop', '-n', help="Don't write changes to file", is_flag=True)
|
||||
|
||||
|
||||
@cmd_config.command(name='init')
|
||||
@noninteractive_flag
|
||||
@noop_flag
|
||||
@click.option(
|
||||
'--sections',
|
||||
'-s',
|
||||
multiple=True,
|
||||
type=click.Choice(CONFIG_SECTIONS),
|
||||
default=CONFIG_SECTIONS,
|
||||
show_choices=True,
|
||||
)
|
||||
@click.pass_context
|
||||
def cmd_config_init(ctx, sections: list[str] = CONFIG_SECTIONS, non_interactive: bool = False, noop: bool = False):
|
||||
"""Initialize the config file"""
|
||||
if not non_interactive:
|
||||
results: dict[str, dict] = {}
|
||||
for section in sections:
|
||||
if section not in CONFIG_SECTIONS:
|
||||
raise Exception(f'Unknown section: {section}')
|
||||
if section == 'profiles':
|
||||
continue
|
||||
|
||||
results[section] = {}
|
||||
for key, current in config.file[section].items():
|
||||
text = f'{section}.{key}'
|
||||
result, changed = prompt_config(text=text, default=current, field_type=type(CONFIG_DEFAULTS[section][key]))
|
||||
if changed:
|
||||
results[section][key] = result
|
||||
|
||||
config.update(results)
|
||||
if 'profiles' in sections:
|
||||
current_profile = 'default' if 'current' not in config.file['profiles'] else config.file['profiles']['current']
|
||||
new_current, _ = prompt_config('profile.current', default=current_profile, field_type=str)
|
||||
profile, changed = prompt_profile(new_current, create=True)
|
||||
config.update_profile(new_current, profile)
|
||||
if not noop:
|
||||
if not prompt_for_save(ctx):
|
||||
return
|
||||
|
||||
if not noop:
|
||||
config.write()
|
||||
else:
|
||||
logging.info(f'--noop passed, not writing to {config.runtime["config_file"]}!')
|
||||
|
||||
|
||||
@cmd_config.command(name='set')
|
||||
@noninteractive_flag
|
||||
@noop_flag
|
||||
@click.argument('key_vals', nargs=-1)
|
||||
@click.pass_context
|
||||
def cmd_config_set(ctx, key_vals: list[str], non_interactive: bool = False, noop: bool = False):
|
||||
"""
|
||||
Set config entries. Pass entries as `key=value` pairs, with keys as dot-separated identifiers,
|
||||
like `build.clean_mode=false` or alternatively just keys to get prompted if run interactively.
|
||||
"""
|
||||
config.enforce_config_loaded()
|
||||
config_copy = deepcopy(config.file)
|
||||
for pair in key_vals:
|
||||
split_pair = pair.split('=')
|
||||
if len(split_pair) == 2:
|
||||
key: str = split_pair[0]
|
||||
value: Any = split_pair[1]
|
||||
value_type = type(config_dot_name_get(key, CONFIG_DEFAULTS))
|
||||
if value_type != list:
|
||||
value = click.types.convert_type(value_type)(value)
|
||||
else:
|
||||
value = comma_str_to_list(value, default=[])
|
||||
elif len(split_pair) == 1 and not non_interactive:
|
||||
key = split_pair[0]
|
||||
value_type = type(config_dot_name_get(key, CONFIG_DEFAULTS))
|
||||
current = config_dot_name_get(key, config.file)
|
||||
value, _ = prompt_config(text=key, default=current, field_type=value_type, echo_changes=False)
|
||||
else:
|
||||
raise Exception(f'Invalid key=value pair "{pair}"')
|
||||
print('%s = %s' % (key, value))
|
||||
config_dot_name_set(key, value, config_copy)
|
||||
if merge_configs(config_copy, warn_missing_defaultprofile=False) != config_copy:
|
||||
raise Exception('Config "{key}" = "{value}" failed to evaluate')
|
||||
if not noop:
|
||||
if not non_interactive and not prompt_for_save(ctx):
|
||||
return
|
||||
config.update(config_copy)
|
||||
config.write()
|
||||
|
||||
|
||||
@cmd_config.command(name='get')
|
||||
@click.argument('keys', nargs=-1)
|
||||
def cmd_config_get(keys: list[str]):
|
||||
"""Get config entries.
|
||||
Get entries for keys passed as dot-separated identifiers, like `build.clean_mode`"""
|
||||
if len(keys) == 1:
|
||||
print(config_dot_name_get(keys[0], config.file))
|
||||
return
|
||||
for key in keys:
|
||||
print('%s = %s' % (key, config_dot_name_get(key, config.file)))
|
||||
|
||||
|
||||
@cmd_config.group(name='profile')
|
||||
def cmd_profile():
|
||||
"""Manage config profiles"""
|
||||
|
||||
|
||||
@cmd_profile.command(name='init')
|
||||
@noninteractive_flag
|
||||
@noop_flag
|
||||
@click.argument('name', required=True)
|
||||
@click.pass_context
|
||||
def cmd_profile_init(ctx, name: str, non_interactive: bool = False, noop: bool = False):
|
||||
"""Create or edit a profile"""
|
||||
profile = deepcopy(PROFILE_EMPTY)
|
||||
if name in config.file['profiles']:
|
||||
profile |= config.file['profiles'][name]
|
||||
|
||||
if not non_interactive:
|
||||
profile, _changed = prompt_profile(name, create=True)
|
||||
|
||||
config.update_profile(name, profile)
|
||||
if not noop:
|
||||
if not prompt_for_save(ctx):
|
||||
return
|
||||
config.write()
|
||||
else:
|
||||
logging.info(f'--noop passed, not writing to {config.runtime["config_file"]}!')
|
||||
|
||||
387
config/cli.py
387
config/cli.py
@@ -1,387 +0,0 @@
|
||||
import click
|
||||
import logging
|
||||
|
||||
from copy import deepcopy
|
||||
from typing import Any, Callable, Iterable, Mapping, Optional, Union
|
||||
|
||||
from devices.device import get_devices, sanitize_device_name
|
||||
from flavours.flavour import get_flavours
|
||||
from utils import color_bold, colors_supported, color_mark_selected
|
||||
from wrapper import execute_without_exit
|
||||
|
||||
from .scheme import Profile
|
||||
from .profile import PROFILE_EMPTY, PROFILE_DEFAULTS, resolve_profile_attr, SparseProfile
|
||||
from .state import config, CONFIG_DEFAULTS, CONFIG_SECTIONS, merge_configs
|
||||
|
||||
|
||||
def list_to_comma_str(str_list: list[str], default='') -> str:
|
||||
if str_list is None:
|
||||
return default
|
||||
return ','.join(str_list)
|
||||
|
||||
|
||||
def comma_str_to_list(s: str, default=None) -> list[str]:
|
||||
if not s:
|
||||
return default
|
||||
return [a for a in s.split(',') if a]
|
||||
|
||||
|
||||
def prompt_config(
|
||||
text: str,
|
||||
default: Any,
|
||||
field_type: Union[type, click.Choice] = str,
|
||||
bold: bool = True,
|
||||
echo_changes: bool = True,
|
||||
show_choices: bool = False,
|
||||
) -> tuple[Any, bool]:
|
||||
"""
|
||||
prompts for a new value for a config key. returns the result and a boolean that indicates
|
||||
whether the result is different, considering empty strings and None equal to each other.
|
||||
"""
|
||||
|
||||
original_default = default
|
||||
|
||||
def true_or_zero(to_check) -> bool:
|
||||
"""returns true if the value is truthy or int(0)"""
|
||||
zero = 0 # compiler complains about 'is with literal' otherwise
|
||||
return to_check or to_check is zero # can't do == due to boolean<->int casting
|
||||
|
||||
if type(None) == field_type:
|
||||
field_type = str
|
||||
|
||||
if field_type == dict:
|
||||
raise Exception('Dictionaries not supported by config_prompt, this is likely a bug in kupferbootstrap')
|
||||
elif field_type == list:
|
||||
default = list_to_comma_str(default)
|
||||
value_conv = comma_str_to_list
|
||||
else:
|
||||
value_conv = None
|
||||
default = '' if default is None else default
|
||||
|
||||
if bold:
|
||||
text = click.style(text, bold=True)
|
||||
|
||||
result = click.prompt(
|
||||
text,
|
||||
type=field_type, # type: ignore
|
||||
default=default,
|
||||
value_proc=value_conv,
|
||||
show_default=True,
|
||||
show_choices=show_choices,
|
||||
) # type: ignore
|
||||
changed = result != (original_default if field_type == list else default) and (true_or_zero(default) or true_or_zero(result))
|
||||
if changed and echo_changes:
|
||||
print(f'value changed: "{text}" = "{result}"')
|
||||
return result, changed
|
||||
|
||||
|
||||
def prompt_profile(
|
||||
name: str,
|
||||
create: bool = True,
|
||||
defaults: Union[Profile, dict] = {},
|
||||
no_parse: bool = True,
|
||||
) -> tuple[Profile, bool]:
|
||||
"""Prompts the user for every field in `defaults`. Set values to None for an empty profile."""
|
||||
PARSEABLE_FIELDS = ['device', 'flavour']
|
||||
profile: Any = PROFILE_EMPTY | defaults
|
||||
if name == 'current':
|
||||
raise Exception("profile name 'current' not allowed")
|
||||
# don't use get_profile() here because we need the sparse profile
|
||||
if name in config.file.profiles:
|
||||
logging.debug(f"Merging with existing profile config for {name}")
|
||||
profile |= config.file.profiles[name]
|
||||
elif create:
|
||||
logging.info(f"Profile {name} doesn't exist yet, creating new profile.")
|
||||
else:
|
||||
raise Exception(f'Unknown profile "{name}"')
|
||||
logging.info(f'Configuring profile "{name}"')
|
||||
changed = False
|
||||
for key, current in profile.items():
|
||||
current = profile[key]
|
||||
text = f'profiles.{name}.{key}'
|
||||
if not no_parse and key in PARSEABLE_FIELDS:
|
||||
parse_prompt = None
|
||||
sanitize_func = None
|
||||
if key == 'device':
|
||||
parse_prompt = prompt_profile_device
|
||||
sanitize_func = sanitize_device_name
|
||||
elif key == 'flavour':
|
||||
parse_prompt = prompt_profile_flavour
|
||||
else:
|
||||
raise Exception(f'config: Unhandled parseable field {key}, this is a bug in kupferbootstrap.')
|
||||
result, _changed = parse_prompt(
|
||||
current=current,
|
||||
profile_name=name,
|
||||
sparse_profiles=config.file.profiles,
|
||||
use_colors=config.runtime.colors,
|
||||
sanitize_func=sanitize_func,
|
||||
) # type: ignore
|
||||
else:
|
||||
result, _changed = prompt_config(text=text, default=current, field_type=type(PROFILE_DEFAULTS[key])) # type: ignore
|
||||
if _changed:
|
||||
profile[key] = result
|
||||
changed = True
|
||||
return profile, changed
|
||||
|
||||
|
||||
def prompt_choice(current: Optional[Any], key: str, choices: Iterable[Any], allow_none: bool = True, show_choices: bool = False) -> tuple[Any, bool]:
|
||||
choices = list(choices) + ([''] if allow_none else [])
|
||||
res, _ = prompt_config(text=key, default=current, field_type=click.Choice(choices), show_choices=show_choices)
|
||||
if allow_none and res == '':
|
||||
res = None
|
||||
return res, res != current
|
||||
|
||||
|
||||
def resolve_profile_field(current: Any, *kargs):
|
||||
try:
|
||||
return resolve_profile_attr(*kargs)
|
||||
except KeyError as err:
|
||||
logging.debug(err)
|
||||
return current, None
|
||||
|
||||
|
||||
def prompt_wrappable(
|
||||
attr_name: str,
|
||||
native_cmd: Callable,
|
||||
cli_cmd: list[str],
|
||||
current: Optional[str],
|
||||
profile_name: str,
|
||||
sparse_profiles: Mapping[str, SparseProfile],
|
||||
sanitize_func: Optional[Callable[[str], str]] = None,
|
||||
use_colors: Optional[bool] = None,
|
||||
) -> tuple[str, bool]:
|
||||
use_colors = colors_supported(use_colors)
|
||||
|
||||
print(color_bold(f"Pick your {attr_name}!\nThese are the available choices:", use_colors=use_colors))
|
||||
items = execute_without_exit(native_cmd, cli_cmd)
|
||||
if items is None:
|
||||
logging.warning("(wrapper mode, input for this field will not be checked for correctness)")
|
||||
return prompt_config(text=f'profiles.{profile_name}.{attr_name}', default=current)
|
||||
selected, inherited_from = resolve_profile_field(current, profile_name, attr_name, sparse_profiles)
|
||||
if selected and sanitize_func:
|
||||
selected = sanitize_func(selected)
|
||||
for key in sorted(items.keys()):
|
||||
text = items[key].nice_str(newlines=True, colors=use_colors)
|
||||
if key == selected:
|
||||
text = color_mark_selected(text, profile_name, inherited_from)
|
||||
print(text + '\n')
|
||||
return prompt_choice(current, f'profiles.{profile_name}.{attr_name}', items.keys())
|
||||
|
||||
|
||||
def prompt_profile_device(*kargs, **kwargs) -> tuple[str, bool]:
|
||||
return prompt_wrappable('device', get_devices, ['devices'], *kargs, **kwargs)
|
||||
|
||||
|
||||
def prompt_profile_flavour(*kargs, **kwargs) -> tuple[str, bool]:
|
||||
return prompt_wrappable('flavour', get_flavours, ['flavours'], *kargs, **kwargs)
|
||||
|
||||
|
||||
def config_dot_name_get(name: str, config: dict[str, Any], prefix: str = '') -> Any:
|
||||
if not isinstance(config, dict):
|
||||
raise Exception(f"Couldn't resolve config name: passed config is not a dict: {repr(config)}")
|
||||
split_name = name.split('.')
|
||||
name = split_name[0]
|
||||
if name not in config:
|
||||
raise Exception(f"Couldn't resolve config name: key {prefix + name} not found")
|
||||
value = config[name]
|
||||
if len(split_name) == 1:
|
||||
return value
|
||||
else:
|
||||
rest_name = '.'.join(split_name[1:])
|
||||
return config_dot_name_get(name=rest_name, config=value, prefix=prefix + name + '.')
|
||||
|
||||
|
||||
def config_dot_name_set(name: str, value: Any, config: dict[str, Any]):
|
||||
split_name = name.split('.')
|
||||
if len(split_name) > 1:
|
||||
config = config_dot_name_get('.'.join(split_name[:-1]), config)
|
||||
config[split_name[-1]] = value
|
||||
|
||||
|
||||
def prompt_for_save(retry_ctx: Optional[click.Context] = None):
|
||||
"""
|
||||
Prompt whether to save the config file. If no is answered, `False` is returned.
|
||||
|
||||
If `retry_ctx` is passed, the context's command will be reexecuted with the same arguments if the user chooses to retry.
|
||||
False will still be returned as the retry is expected to either save, perform another retry or arbort.
|
||||
"""
|
||||
from wrapper import is_wrapped
|
||||
if click.confirm(f'Do you want to save your changes to {config.runtime.config_file}?', default=True):
|
||||
if is_wrapped():
|
||||
logging.warning("Writing to config file inside wrapper."
|
||||
"This is pointless and probably a bug."
|
||||
"Your host config file will not be modified.")
|
||||
return True
|
||||
if retry_ctx:
|
||||
if click.confirm('Retry? ("n" to quit without saving)', default=True):
|
||||
retry_ctx.forward(retry_ctx.command)
|
||||
return False
|
||||
|
||||
|
||||
config_option = click.option(
|
||||
'-C',
|
||||
'--config',
|
||||
'config_file',
|
||||
help='Override path to config file',
|
||||
)
|
||||
|
||||
|
||||
@click.group(name='config')
|
||||
def cmd_config():
|
||||
"""Manage the configuration and -profiles"""
|
||||
|
||||
|
||||
noninteractive_flag = click.option('-N', '--non-interactive', is_flag=True)
|
||||
noop_flag = click.option('--noop', '-n', help="Don't write changes to file", is_flag=True)
|
||||
noparse_flag = click.option('--no-parse', help="Don't search PKGBUILDs for devices and flavours", is_flag=True)
|
||||
|
||||
CONFIG_MSG = ("Leave fields empty to leave them at their currently displayed value.")
|
||||
|
||||
|
||||
@cmd_config.command(name='init')
|
||||
@noninteractive_flag
|
||||
@noop_flag
|
||||
@noparse_flag
|
||||
@click.option(
|
||||
'--sections',
|
||||
'-s',
|
||||
multiple=True,
|
||||
type=click.Choice(CONFIG_SECTIONS),
|
||||
default=CONFIG_SECTIONS,
|
||||
show_choices=True,
|
||||
)
|
||||
@click.pass_context
|
||||
def cmd_config_init(
|
||||
ctx,
|
||||
sections: list[str] = CONFIG_SECTIONS,
|
||||
non_interactive: bool = False,
|
||||
noop: bool = False,
|
||||
no_parse: bool = False,
|
||||
):
|
||||
"""Initialize the config file"""
|
||||
if not non_interactive:
|
||||
logging.info(CONFIG_MSG)
|
||||
results: dict[str, dict] = {}
|
||||
for section in sections:
|
||||
if section not in CONFIG_SECTIONS:
|
||||
raise Exception(f'Unknown section: {section}')
|
||||
if section == 'profiles':
|
||||
continue
|
||||
|
||||
results[section] = {}
|
||||
for key, current in config.file[section].items():
|
||||
text = f'{section}.{key}'
|
||||
result, changed = prompt_config(text=text, default=current, field_type=type(CONFIG_DEFAULTS[section][key]))
|
||||
if changed:
|
||||
results[section][key] = result
|
||||
|
||||
config.update(results)
|
||||
print("Main configuration complete")
|
||||
if not noop:
|
||||
if prompt_for_save(ctx):
|
||||
config.write()
|
||||
else:
|
||||
return
|
||||
if 'profiles' in sections:
|
||||
print("Configuring profiles")
|
||||
current_profile = 'default' if 'current' not in config.file.profiles else config.file.profiles.current
|
||||
new_current, _ = prompt_config('profiles.current', default=current_profile, field_type=str)
|
||||
profile, changed = prompt_profile(new_current, create=True, no_parse=no_parse)
|
||||
config.update_profile(new_current, profile)
|
||||
if not noop:
|
||||
if not prompt_for_save(ctx):
|
||||
return
|
||||
|
||||
if not noop:
|
||||
config.write()
|
||||
else:
|
||||
logging.info(f'--noop passed, not writing to {config.runtime.config_file}!')
|
||||
|
||||
|
||||
@cmd_config.command(name='set')
|
||||
@noninteractive_flag
|
||||
@noop_flag
|
||||
@noparse_flag
|
||||
@click.argument('key_vals', nargs=-1)
|
||||
@click.pass_context
|
||||
def cmd_config_set(ctx, key_vals: list[str], non_interactive: bool = False, noop: bool = False, no_parse: bool = False):
|
||||
"""
|
||||
Set config entries. Pass entries as `key=value` pairs, with keys as dot-separated identifiers,
|
||||
like `build.clean_mode=false` or alternatively just keys to get prompted if run interactively.
|
||||
"""
|
||||
config.enforce_config_loaded()
|
||||
logging.info(CONFIG_MSG)
|
||||
config_copy = deepcopy(config.file)
|
||||
for pair in key_vals:
|
||||
split_pair = pair.split('=')
|
||||
if len(split_pair) == 2:
|
||||
key: str = split_pair[0]
|
||||
value: Any = split_pair[1]
|
||||
value_type = type(config_dot_name_get(key, CONFIG_DEFAULTS))
|
||||
if value_type != list:
|
||||
value = click.types.convert_type(value_type)(value)
|
||||
else:
|
||||
value = comma_str_to_list(value, default=[])
|
||||
elif len(split_pair) == 1 and not non_interactive:
|
||||
key = split_pair[0]
|
||||
value_type = type(config_dot_name_get(key, CONFIG_DEFAULTS))
|
||||
current = config_dot_name_get(key, config.file)
|
||||
value, _ = prompt_config(text=key, default=current, field_type=value_type, echo_changes=False)
|
||||
else:
|
||||
raise Exception(f'Invalid key=value pair "{pair}"')
|
||||
print('%s = %s' % (key, value))
|
||||
config_dot_name_set(key, value, config_copy)
|
||||
if merge_configs(config_copy, warn_missing_defaultprofile=False) != config_copy:
|
||||
raise Exception('Config "{key}" = "{value}" failed to evaluate')
|
||||
if not noop:
|
||||
if not non_interactive and not prompt_for_save(ctx):
|
||||
return
|
||||
config.update(config_copy)
|
||||
config.write()
|
||||
|
||||
|
||||
@cmd_config.command(name='get')
|
||||
@click.argument('keys', nargs=-1)
|
||||
def cmd_config_get(keys: list[str]):
|
||||
"""Get config entries.
|
||||
Get entries for keys passed as dot-separated identifiers, like `build.clean_mode`"""
|
||||
if len(keys) == 1:
|
||||
print(config_dot_name_get(keys[0], config.file))
|
||||
return
|
||||
for key in keys:
|
||||
print('%s = %s' % (key, config_dot_name_get(key, config.file)))
|
||||
|
||||
|
||||
@cmd_config.group(name='profile')
|
||||
def cmd_profile():
|
||||
"""Manage config profiles"""
|
||||
|
||||
|
||||
@cmd_profile.command(name='init')
|
||||
@noninteractive_flag
|
||||
@noop_flag
|
||||
@noparse_flag
|
||||
@click.argument('name', required=False)
|
||||
@click.pass_context
|
||||
def cmd_profile_init(ctx, name: Optional[str] = None, non_interactive: bool = False, noop: bool = False, no_parse: bool = False):
|
||||
"""Create or edit a profile"""
|
||||
profile = deepcopy(PROFILE_EMPTY)
|
||||
if name == 'current':
|
||||
raise Exception("profile name 'current' not allowed")
|
||||
logging.info(CONFIG_MSG)
|
||||
name = name or config.file.profiles.current
|
||||
if name in config.file.profiles:
|
||||
profile |= config.file.profiles[name]
|
||||
|
||||
if not non_interactive:
|
||||
profile, _changed = prompt_profile(name, create=True, no_parse=no_parse)
|
||||
|
||||
config.update_profile(name, profile)
|
||||
if not noop:
|
||||
if not prompt_for_save(ctx):
|
||||
logging.info("Not saving.")
|
||||
return
|
||||
|
||||
config.write()
|
||||
else:
|
||||
logging.info(f'--noop passed, not writing to {config.runtime.config_file}!')
|
||||
@@ -1,7 +1,6 @@
|
||||
import logging
|
||||
|
||||
from copy import deepcopy
|
||||
from typing import Optional
|
||||
|
||||
from .scheme import Profile, SparseProfile
|
||||
|
||||
@@ -21,14 +20,10 @@ PROFILE_DEFAULTS = Profile.fromDict(PROFILE_DEFAULTS_DICT)
|
||||
PROFILE_EMPTY: Profile = {key: None for key in PROFILE_DEFAULTS.keys()} # type: ignore
|
||||
|
||||
|
||||
class ProfileNotFoundException(Exception):
|
||||
pass
|
||||
|
||||
|
||||
def resolve_profile(
|
||||
name: str,
|
||||
sparse_profiles: dict[str, SparseProfile],
|
||||
resolved: Optional[dict[str, Profile]] = None,
|
||||
resolved: dict[str, Profile] = None,
|
||||
_visited=None,
|
||||
) -> dict[str, Profile]:
|
||||
"""
|
||||
@@ -89,40 +84,3 @@ def resolve_profile(
|
||||
|
||||
resolved[name] = Profile.fromDict(full)
|
||||
return resolved
|
||||
|
||||
|
||||
def resolve_profile_attr(
|
||||
profile_name: str,
|
||||
attr_name: str,
|
||||
profiles_sparse: dict[str, SparseProfile],
|
||||
) -> tuple[str, str]:
|
||||
"""
|
||||
This function tries to resolve a profile attribute recursively,
|
||||
and throws KeyError if the key is not found anywhere in the hierarchy.
|
||||
Throws a ProfileNotFoundException if the profile is not in profiles_sparse
|
||||
"""
|
||||
if profile_name not in profiles_sparse:
|
||||
raise ProfileNotFoundException(f"Unknown profile {profile_name}")
|
||||
profile: Profile = profiles_sparse[profile_name]
|
||||
if attr_name in profile:
|
||||
return profile[attr_name], profile_name
|
||||
|
||||
if 'parent' not in profile:
|
||||
raise KeyError(f'Profile attribute {attr_name} not found in {profile_name} and no parents')
|
||||
parent = profile
|
||||
parent_name = profile_name
|
||||
seen = []
|
||||
while True:
|
||||
if attr_name in parent:
|
||||
return parent[attr_name], parent_name
|
||||
|
||||
seen.append(parent_name)
|
||||
|
||||
if not parent.get('parent', None):
|
||||
raise KeyError(f'Profile attribute {attr_name} not found in inheritance chain, '
|
||||
f'we went down to {parent_name}.')
|
||||
parent_name = parent['parent']
|
||||
if parent_name in seen:
|
||||
raise RecursionError(f"Profile recursion loop: profile {profile_name} couldn't be resolved"
|
||||
f"because of a dependency loop:\n{' -> '.join([*seen, parent_name])}")
|
||||
parent = profiles_sparse[parent_name]
|
||||
|
||||
127
config/scheme.py
127
config/scheme.py
@@ -1,13 +1,83 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from dataclasses import dataclass
|
||||
from typing import Optional, Union, Mapping, Any, get_type_hints, get_origin, get_args, Iterable
|
||||
from munch import Munch
|
||||
from typing import Any, Optional, Mapping, Union
|
||||
|
||||
from dictscheme import DictScheme
|
||||
from constants import Arch
|
||||
|
||||
|
||||
class SparseProfile(DictScheme):
|
||||
def munchclass(*args, init=False, **kwargs):
|
||||
return dataclass(*args, init=init, slots=True, **kwargs)
|
||||
|
||||
|
||||
def resolve_type_hint(hint: type):
|
||||
origin = get_origin(hint)
|
||||
args: Iterable[type] = get_args(hint)
|
||||
if origin is Optional:
|
||||
args = set(list(args) + [type(None)])
|
||||
if origin in [Union, Optional]:
|
||||
results = []
|
||||
for arg in args:
|
||||
results += resolve_type_hint(arg)
|
||||
return results
|
||||
return [origin or hint]
|
||||
|
||||
|
||||
class DataClass(Munch):
|
||||
|
||||
def __init__(self, d: dict = {}, validate: bool = True, **kwargs):
|
||||
self.update(d | kwargs, validate=validate)
|
||||
|
||||
@classmethod
|
||||
def transform(cls, values: Mapping[str, Any], validate: bool = True) -> Any:
|
||||
results = {}
|
||||
values = dict(values)
|
||||
for key in list(values.keys()):
|
||||
value = values.pop(key)
|
||||
type_hints = cls._type_hints
|
||||
if key in type_hints:
|
||||
_classes = tuple(resolve_type_hint(type_hints[key]))
|
||||
if issubclass(_classes[0], dict):
|
||||
assert isinstance(value, dict)
|
||||
target_class = _classes[0]
|
||||
if not issubclass(_classes[0], Munch):
|
||||
target_class = DataClass
|
||||
if not isinstance(value, target_class):
|
||||
value = target_class.fromDict(value, validate=validate)
|
||||
if validate:
|
||||
if not isinstance(value, _classes):
|
||||
raise Exception(f'key "{key}" has value of wrong type {_classes}: {value}')
|
||||
elif validate:
|
||||
raise Exception(f'Unknown key "{key}"')
|
||||
else:
|
||||
if isinstance(value, dict) and not isinstance(value, Munch):
|
||||
value = Munch.fromDict(value)
|
||||
results[key] = value
|
||||
if values:
|
||||
if validate:
|
||||
raise Exception(f'values contained unknown keys: {list(values.keys())}')
|
||||
results |= values
|
||||
|
||||
return results
|
||||
|
||||
@classmethod
|
||||
def fromDict(cls, values: Mapping[str, Any], validate: bool = True):
|
||||
return cls(**cls.transform(values, validate))
|
||||
|
||||
def update(self, d: Mapping[str, Any], validate: bool = True):
|
||||
Munch.update(self, type(self).transform(d, validate))
|
||||
|
||||
def __init_subclass__(cls):
|
||||
super().__init_subclass__()
|
||||
cls._type_hints = get_type_hints(cls)
|
||||
|
||||
def __repr__(self):
|
||||
return f'{type(self)}{dict.__repr__(self.toDict())}'
|
||||
|
||||
|
||||
@munchclass()
|
||||
class SparseProfile(DataClass):
|
||||
parent: Optional[str]
|
||||
device: Optional[str]
|
||||
flavour: Optional[str]
|
||||
@@ -22,6 +92,7 @@ class SparseProfile(DictScheme):
|
||||
return f'{type(self)}{dict.__repr__(self.toDict())}'
|
||||
|
||||
|
||||
@munchclass()
|
||||
class Profile(SparseProfile):
|
||||
parent: Optional[str]
|
||||
device: str
|
||||
@@ -34,11 +105,13 @@ class Profile(SparseProfile):
|
||||
size_extra_mb: Union[str, int]
|
||||
|
||||
|
||||
class WrapperSection(DictScheme):
|
||||
@munchclass()
|
||||
class WrapperSection(DataClass):
|
||||
type: str # NOTE: rename to 'wrapper_type' if this causes problems
|
||||
|
||||
|
||||
class BuildSection(DictScheme):
|
||||
@munchclass()
|
||||
class BuildSection(DataClass):
|
||||
ccache: bool
|
||||
clean_mode: bool
|
||||
crosscompile: bool
|
||||
@@ -46,18 +119,21 @@ class BuildSection(DictScheme):
|
||||
threads: int
|
||||
|
||||
|
||||
class PkgbuildsSection(DictScheme):
|
||||
@munchclass()
|
||||
class PkgbuildsSection(DataClass):
|
||||
git_repo: str
|
||||
git_branch: str
|
||||
|
||||
|
||||
class PacmanSection(DictScheme):
|
||||
@munchclass()
|
||||
class PacmanSection(DataClass):
|
||||
parallel_downloads: int
|
||||
check_space: bool
|
||||
repo_branch: str
|
||||
|
||||
|
||||
class PathsSection(DictScheme):
|
||||
@munchclass()
|
||||
class PathsSection(DataClass):
|
||||
cache_dir: str
|
||||
chroots: str
|
||||
pacman: str
|
||||
@@ -65,23 +141,19 @@ class PathsSection(DictScheme):
|
||||
pkgbuilds: str
|
||||
jumpdrive: str
|
||||
images: str
|
||||
ccache: str
|
||||
rust: str
|
||||
|
||||
|
||||
class ProfilesSection(DictScheme):
|
||||
class ProfilesSection(DataClass):
|
||||
current: str
|
||||
default: SparseProfile
|
||||
|
||||
@classmethod
|
||||
def transform(cls, values: Mapping[str, Any], validate: bool = True, allow_extra: bool = True, type_hints: Optional[dict[str, Any]] = None):
|
||||
def transform(cls, values: Mapping[str, Any], validate: bool = True):
|
||||
results = {}
|
||||
for k, v in values.items():
|
||||
if k == 'current':
|
||||
results[k] = v
|
||||
continue
|
||||
if not allow_extra and k != 'default':
|
||||
raise Exception(f'Unknown key {k} in profiles section (Hint: extra_keys not allowed for some reason)')
|
||||
if not isinstance(v, dict):
|
||||
raise Exception(f'profile {v} is not a dict!')
|
||||
results[k] = SparseProfile.fromDict(v, validate=True)
|
||||
@@ -94,7 +166,8 @@ class ProfilesSection(DictScheme):
|
||||
return f'{type(self)}{dict.__repr__(self.toDict())}'
|
||||
|
||||
|
||||
class Config(DictScheme):
|
||||
@munchclass()
|
||||
class Config(DataClass):
|
||||
wrapper: WrapperSection
|
||||
build: BuildSection
|
||||
pkgbuilds: PkgbuildsSection
|
||||
@@ -103,13 +176,7 @@ class Config(DictScheme):
|
||||
profiles: ProfilesSection
|
||||
|
||||
@classmethod
|
||||
def fromDict(
|
||||
cls,
|
||||
values: Mapping[str, Any],
|
||||
validate: bool = True,
|
||||
allow_extra: bool = False,
|
||||
allow_incomplete: bool = False,
|
||||
):
|
||||
def fromDict(cls, values: Mapping[str, Any], validate: bool = True, allow_incomplete: bool = False):
|
||||
values = dict(values) # copy for later modification
|
||||
_vals = {}
|
||||
for name, _class in cls._type_hints.items():
|
||||
@@ -127,22 +194,20 @@ class Config(DictScheme):
|
||||
raise Exception(f'values contained unknown keys: {list(values.keys())}')
|
||||
_vals |= values
|
||||
|
||||
return Config(_vals, validate=validate)
|
||||
return Config(**_vals, validate=validate)
|
||||
|
||||
|
||||
class RuntimeConfiguration(DictScheme):
|
||||
@munchclass()
|
||||
class RuntimeConfiguration(DataClass):
|
||||
verbose: bool
|
||||
no_wrap: bool
|
||||
error_shell: bool
|
||||
config_file: Optional[str]
|
||||
script_source_dir: Optional[str]
|
||||
arch: Optional[Arch]
|
||||
uid: Optional[int]
|
||||
progress_bars: Optional[bool]
|
||||
colors: Optional[bool]
|
||||
no_wrap: bool
|
||||
script_source_dir: str
|
||||
error_shell: bool
|
||||
|
||||
|
||||
class ConfigLoadState(DictScheme):
|
||||
class ConfigLoadState(DataClass):
|
||||
load_finished: bool
|
||||
exception: Optional[Exception]
|
||||
|
||||
|
||||
@@ -7,7 +7,7 @@ from typing import Mapping, Optional
|
||||
|
||||
from constants import DEFAULT_PACKAGE_BRANCH
|
||||
|
||||
from .scheme import Config, ConfigLoadState, DictScheme, Profile, RuntimeConfiguration
|
||||
from .scheme import Config, ConfigLoadState, DataClass, Profile, RuntimeConfiguration
|
||||
from .profile import PROFILE_DEFAULTS, PROFILE_DEFAULTS_DICT, resolve_profile
|
||||
|
||||
CONFIG_DIR = appdirs.user_config_dir('kupfer')
|
||||
@@ -42,8 +42,6 @@ CONFIG_DEFAULTS_DICT = {
|
||||
'pkgbuilds': os.path.join('%cache_dir%', 'pkgbuilds'),
|
||||
'jumpdrive': os.path.join('%cache_dir%', 'jumpdrive'),
|
||||
'images': os.path.join('%cache_dir%', 'images'),
|
||||
'ccache': os.path.join('%cache_dir%', 'ccache'),
|
||||
'rust': os.path.join('%cache_dir%', 'rust'),
|
||||
},
|
||||
'profiles': {
|
||||
'current': 'default',
|
||||
@@ -55,14 +53,11 @@ CONFIG_SECTIONS = list(CONFIG_DEFAULTS.keys())
|
||||
|
||||
CONFIG_RUNTIME_DEFAULTS: RuntimeConfiguration = RuntimeConfiguration.fromDict({
|
||||
'verbose': False,
|
||||
'no_wrap': False,
|
||||
'error_shell': False,
|
||||
'config_file': None,
|
||||
'script_source_dir': None,
|
||||
'arch': None,
|
||||
'uid': None,
|
||||
'progress_bars': None,
|
||||
'colors': None,
|
||||
'no_wrap': False,
|
||||
'script_source_dir': os.path.dirname(os.path.dirname(os.path.realpath(__file__))),
|
||||
'error_shell': False,
|
||||
})
|
||||
|
||||
|
||||
@@ -86,7 +81,7 @@ def merge_configs(conf_new: Mapping[str, dict], conf_base={}, warn_missing_defau
|
||||
Pass `conf_base={}` to get a sanitized version of `conf_new`.
|
||||
NOTE: `conf_base` is NOT checked for invalid keys. Sanitize beforehand.
|
||||
"""
|
||||
parsed = deepcopy(dict(conf_base))
|
||||
parsed = deepcopy(conf_base)
|
||||
|
||||
for outer_name, outer_conf in deepcopy(conf_new).items():
|
||||
# only handle known config sections
|
||||
@@ -95,7 +90,7 @@ def merge_configs(conf_new: Mapping[str, dict], conf_base={}, warn_missing_defau
|
||||
continue
|
||||
logging.debug(f'Parsing config section "{outer_name}"')
|
||||
# check if outer_conf is a dict
|
||||
if not (isinstance(outer_conf, (dict, DictScheme))):
|
||||
if not (isinstance(outer_conf, (dict, DataClass))):
|
||||
parsed[outer_name] = outer_conf
|
||||
else:
|
||||
# init section
|
||||
@@ -115,7 +110,7 @@ def merge_configs(conf_new: Mapping[str, dict], conf_base={}, warn_missing_defau
|
||||
if profile_name == 'current':
|
||||
parsed[outer_name][profile_name] = profile_conf
|
||||
else:
|
||||
logging.warning(f'Skipped key "{profile_name}" in profile section: only subsections and "current" allowed')
|
||||
logging.warning('Skipped key "{profile_name}" in profile section: only subsections and "current" allowed')
|
||||
continue
|
||||
|
||||
# init profile
|
||||
@@ -136,7 +131,7 @@ def merge_configs(conf_new: Mapping[str, dict], conf_base={}, warn_missing_defau
|
||||
# handle generic inner config dict
|
||||
for inner_name, inner_conf in outer_conf.items():
|
||||
if inner_name not in CONFIG_DEFAULTS[outer_name].keys():
|
||||
logging.warning(f'Skipped unknown config item "{inner_name}" in section "{outer_name}"')
|
||||
logging.warning(f'Skipped unknown config item "{inner_name}" in "{outer_name}"')
|
||||
continue
|
||||
parsed[outer_name][inner_name] = inner_conf
|
||||
|
||||
@@ -176,7 +171,7 @@ def parse_file(config_file: str, base: dict = CONFIG_DEFAULTS) -> dict:
|
||||
class ConfigLoadException(Exception):
|
||||
inner = None
|
||||
|
||||
def __init__(self, extra_msg='', inner_exception: Optional[Exception] = None):
|
||||
def __init__(self, extra_msg='', inner_exception: Exception = None):
|
||||
msg: list[str] = ['Config load failed!']
|
||||
if extra_msg:
|
||||
msg.append(extra_msg)
|
||||
@@ -192,27 +187,24 @@ class ConfigStateHolder:
|
||||
# runtime config not persisted anywhere
|
||||
runtime: RuntimeConfiguration
|
||||
file_state: ConfigLoadState
|
||||
_profile_cache: Optional[dict[str, Profile]]
|
||||
_profile_cache: dict[str, Profile]
|
||||
|
||||
def __init__(self, file_conf_path: Optional[str] = None, runtime_conf={}, file_conf_base: dict = {}):
|
||||
"""init a stateholder, optionally loading `file_conf_path`"""
|
||||
self.file = Config.fromDict(merge_configs(conf_new=file_conf_base, conf_base=CONFIG_DEFAULTS))
|
||||
self.file_state = ConfigLoadState()
|
||||
self.runtime = RuntimeConfiguration.fromDict(CONFIG_RUNTIME_DEFAULTS | runtime_conf)
|
||||
self.runtime.arch = os.uname().machine
|
||||
self.runtime.script_source_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
|
||||
self.runtime.uid = os.getuid()
|
||||
self.runtime['arch'] = os.uname().machine
|
||||
self._profile_cache = {}
|
||||
if file_conf_path:
|
||||
self.try_load_file(file_conf_path)
|
||||
|
||||
def try_load_file(self, config_file=None, base=CONFIG_DEFAULTS_DICT):
|
||||
def try_load_file(self, config_file=None, base=CONFIG_DEFAULTS):
|
||||
config_file = config_file or CONFIG_DEFAULT_PATH
|
||||
self.runtime.config_file = config_file
|
||||
self.runtime['config_file'] = config_file
|
||||
self._profile_cache = None
|
||||
try:
|
||||
self.file = Config.fromDict(parse_file(config_file=config_file, base=base), validate=True)
|
||||
self.file_state.exception = None
|
||||
self.file = parse_file(config_file=config_file, base=base)
|
||||
except Exception as ex:
|
||||
self.file_state.exception = ex
|
||||
self.file_state.load_finished = True
|
||||
@@ -232,13 +224,11 @@ class ConfigStateHolder:
|
||||
raise ex
|
||||
|
||||
def get_profile(self, name: Optional[str] = None) -> Profile:
|
||||
name = name or self.file.profiles.current
|
||||
self._profile_cache = resolve_profile(name=name, sparse_profiles=self.file.profiles, resolved=self._profile_cache)
|
||||
name = name or self.file['profiles']['current']
|
||||
self._profile_cache = resolve_profile(name=name, sparse_profiles=self.file['profiles'], resolved=self._profile_cache)
|
||||
return self._profile_cache[name]
|
||||
|
||||
def _enforce_profile_field(self, field: str, profile_name: Optional[str] = None, hint_or_set_arch: bool = False) -> Profile:
|
||||
# TODO: device
|
||||
profile_name = profile_name if profile_name is not None else self.file.profiles.current
|
||||
def enforce_profile_device_set(self, profile_name: Optional[str] = None, hint_or_set_arch: bool = False) -> Profile:
|
||||
arch_hint = ''
|
||||
if not hint_or_set_arch:
|
||||
self.enforce_config_loaded()
|
||||
@@ -247,28 +237,22 @@ class ConfigStateHolder:
|
||||
'e.g. `kupferbootstrap packages build --arch x86_64`')
|
||||
if not self.is_loaded():
|
||||
if not self.file_state.exception:
|
||||
raise Exception(f'Error enforcing config profile {field}: config hadn\'t even been loaded yet.\n'
|
||||
raise Exception('Error enforcing/ config profile device: config hadnt even been loaded yet.\n'
|
||||
'This is a bug in kupferbootstrap!')
|
||||
raise Exception(f"Profile {field} couldn't be resolved because the config file couldn't be loaded.\n"
|
||||
raise Exception("Profile device couldn't be resolved because the config file couldn't be loaded.\n"
|
||||
"If the config doesn't exist, try running `kupferbootstrap config init`.\n"
|
||||
f"Error: {self.file_state.exception}")
|
||||
if profile_name and profile_name not in self.file.profiles:
|
||||
raise Exception(f'Unknown profile "{profile_name}". Please run `kupferbootstrap config profile init`{arch_hint}')
|
||||
profile = self.get_profile(profile_name)
|
||||
if field not in profile or not profile[field]:
|
||||
m = (f'Profile "{profile_name}" has no {field.upper()} configured.\n'
|
||||
f'Please run `kupferbootstrap config profile init {profile_name}`{arch_hint}')
|
||||
if not profile.device:
|
||||
m = (f'Profile "{profile_name}" has no device configured.\n'
|
||||
f'Please run `kupferbootstrap config profile init device`{arch_hint}')
|
||||
raise Exception(m)
|
||||
return profile
|
||||
|
||||
def enforce_profile_device_set(self, **kwargs) -> Profile:
|
||||
return self._enforce_profile_field(field='device', **kwargs)
|
||||
|
||||
def enforce_profile_flavour_set(self, **kwargs) -> Profile:
|
||||
return self._enforce_profile_field(field='flavour', **kwargs)
|
||||
|
||||
def get_path(self, path_name: str) -> str:
|
||||
paths = self.file.paths
|
||||
paths = self.file['paths']
|
||||
return resolve_path_template(paths[path_name], paths)
|
||||
|
||||
def get_package_dir(self, arch: str):
|
||||
@@ -281,12 +265,10 @@ class ConfigStateHolder:
|
||||
def write(self, path=None):
|
||||
"""write toml representation of `self.file` to `path`"""
|
||||
if path is None:
|
||||
path = self.runtime.config_file
|
||||
assert path
|
||||
path = self.runtime['config_file']
|
||||
os.makedirs(os.path.dirname(path), exist_ok=True)
|
||||
new = not os.path.exists(path)
|
||||
dump_file(path, self.file)
|
||||
logging.info(f'{"Created" if new else "Written changes to"} config file at {path}')
|
||||
logging.info(f'Created config file at {path}')
|
||||
|
||||
def invalidate_profile_cache(self):
|
||||
"""Clear the profile cache (usually after modification)"""
|
||||
@@ -295,20 +277,20 @@ class ConfigStateHolder:
|
||||
def update(self, config_fragment: dict[str, dict], warn_missing_defaultprofile: bool = True) -> bool:
|
||||
"""Update `self.file` with `config_fragment`. Returns `True` if the config was changed"""
|
||||
merged = merge_configs(config_fragment, conf_base=self.file, warn_missing_defaultprofile=warn_missing_defaultprofile)
|
||||
changed = self.file.toDict() != merged
|
||||
changed = self.file != merged
|
||||
self.file.update(merged)
|
||||
if changed and 'profiles' in config_fragment and self.file.profiles.toDict() != config_fragment['profiles']:
|
||||
if changed and 'profiles' in config_fragment and self.file['profiles'] != config_fragment['profiles']:
|
||||
self.invalidate_profile_cache()
|
||||
return changed
|
||||
|
||||
def update_profile(self, name: str, profile: Profile, merge: bool = False, create: bool = True, prune: bool = True):
|
||||
new = {}
|
||||
if name not in self.file.profiles:
|
||||
if name not in self.file['profiles']:
|
||||
if not create:
|
||||
raise Exception(f'Unknown profile: {name}')
|
||||
else:
|
||||
if merge:
|
||||
new = deepcopy(self.file.profiles[name])
|
||||
new = deepcopy(self.file['profiles'][name])
|
||||
|
||||
logging.debug(f'new: {new}')
|
||||
logging.debug(f'profile: {profile}')
|
||||
@@ -316,8 +298,5 @@ class ConfigStateHolder:
|
||||
|
||||
if prune:
|
||||
new = {key: val for key, val in new.items() if val is not None}
|
||||
self.file.profiles[name] = new
|
||||
self.file['profiles'][name] = new
|
||||
self.invalidate_profile_cache()
|
||||
|
||||
|
||||
config: ConfigStateHolder = ConfigStateHolder(file_conf_base=CONFIG_DEFAULTS)
|
||||
|
||||
@@ -5,7 +5,7 @@ import pickle
|
||||
import toml
|
||||
|
||||
from tempfile import mktemp, gettempdir as get_system_tempdir
|
||||
from typing import Any, Optional
|
||||
from typing import Optional
|
||||
|
||||
from config.profile import PROFILE_DEFAULTS
|
||||
from config.scheme import Config, Profile
|
||||
@@ -53,7 +53,7 @@ def validate_ConfigStateHolder(c: ConfigStateHolder, should_load: Optional[bool]
|
||||
def test_fixture_configstate(conf_fixture: str, exists: bool, request):
|
||||
configstate = request.getfixturevalue(conf_fixture)
|
||||
assert 'config_file' in configstate.runtime
|
||||
confpath = configstate.runtime.config_file
|
||||
confpath = configstate.runtime['config_file']
|
||||
assert isinstance(confpath, str)
|
||||
assert confpath
|
||||
assert exists == os.path.exists(confpath)
|
||||
@@ -124,13 +124,12 @@ def load_toml_file(path) -> dict:
|
||||
|
||||
|
||||
def get_path_from_stateholder(c: ConfigStateHolder):
|
||||
return c.runtime.config_file
|
||||
return c.runtime['config_file']
|
||||
|
||||
|
||||
def test_config_save_nonexistant(configstate_nonexistant: ConfigStateHolder):
|
||||
c = configstate_nonexistant
|
||||
confpath = c.runtime.config_file
|
||||
assert confpath
|
||||
confpath = c.runtime['config_file']
|
||||
assert not os.path.exists(confpath)
|
||||
c.write()
|
||||
assert confpath
|
||||
@@ -154,16 +153,8 @@ def test_config_save_modified(configstate_emptyfile: ConfigStateHolder):
|
||||
compare_to_defaults(load_toml_file(get_path_from_stateholder(c)), defaults_modified)
|
||||
|
||||
|
||||
def get_config_scheme(data: dict[str, Any], validate=True, allow_incomplete=False) -> Config:
|
||||
"""
|
||||
helper func to ignore a false type error.
|
||||
for some reason, mypy argues about DictScheme.fromDict() instead of Config.fromDict() here
|
||||
"""
|
||||
return Config.fromDict(data, validate=validate, allow_incomplete=allow_incomplete) # type: ignore[call-arg]
|
||||
|
||||
|
||||
def test_config_scheme_defaults():
|
||||
c = get_config_scheme(CONFIG_DEFAULTS, validate=True, allow_incomplete=False)
|
||||
c = Config.fromDict(CONFIG_DEFAULTS, validate=True, allow_incomplete=False)
|
||||
assert c
|
||||
compare_to_defaults(c)
|
||||
|
||||
@@ -172,7 +163,7 @@ def test_config_scheme_modified():
|
||||
modifications = {'wrapper': {'type': 'none'}, 'build': {'crossdirect': False}}
|
||||
assert set(modifications.keys()).issubset(CONFIG_DEFAULTS.keys())
|
||||
d = {section_name: (section | modifications.get(section_name, {})) for section_name, section in CONFIG_DEFAULTS.items()}
|
||||
c = get_config_scheme(d, validate=True, allow_incomplete=False)
|
||||
c = Config.fromDict(d, validate=True, allow_incomplete=False)
|
||||
assert c
|
||||
assert c.build.crossdirect is False
|
||||
assert c.wrapper.type == 'none'
|
||||
|
||||
96
constants.py
96
constants.py
@@ -1,9 +1,10 @@
|
||||
from typehelpers import TypeAlias
|
||||
from typing_extensions import TypeAlias
|
||||
from typing import TypedDict
|
||||
|
||||
FASTBOOT = 'fastboot'
|
||||
FLASH_PARTS = {
|
||||
'FULL': 'full',
|
||||
'ABOOT': 'abootimg',
|
||||
'ROOTFS': 'rootfs',
|
||||
'ABOOT': 'aboot',
|
||||
'LK2ND': 'lk2nd',
|
||||
'QHYPSTUB': 'qhypstub',
|
||||
}
|
||||
@@ -14,22 +15,64 @@ LOCATIONS = [EMMC, MICROSD]
|
||||
JUMPDRIVE = 'jumpdrive'
|
||||
JUMPDRIVE_VERSION = '0.8'
|
||||
|
||||
BASE_LOCAL_PACKAGES: list[str] = [
|
||||
'base-kupfer',
|
||||
]
|
||||
BOOT_STRATEGIES: dict[str, str] = {
|
||||
'oneplus-enchilada': FASTBOOT,
|
||||
'oneplus-fajita': FASTBOOT,
|
||||
'xiaomi-beryllium-ebbg': FASTBOOT,
|
||||
'xiaomi-beryllium-tianma': FASTBOOT,
|
||||
'bq-paella': FASTBOOT,
|
||||
}
|
||||
|
||||
BASE_PACKAGES: list[str] = BASE_LOCAL_PACKAGES + [
|
||||
DEVICES: dict[str, list[str]] = {
|
||||
'oneplus-enchilada': ['device-sdm845-oneplus-enchilada'],
|
||||
'oneplus-fajita': ['device-sdm845-oneplus-fajita'],
|
||||
'xiaomi-beryllium-ebbg': ['device-sdm845-xiaomi-beryllium-ebbg'],
|
||||
'xiaomi-beryllium-tianma': ['device-sdm845-xiaomi-beryllium-tianma'],
|
||||
'bq-paella': ['device-msm8916-bq-paella'],
|
||||
}
|
||||
|
||||
BASE_PACKAGES: list[str] = [
|
||||
'base',
|
||||
'base-kupfer',
|
||||
'nano',
|
||||
'vim',
|
||||
]
|
||||
|
||||
POST_INSTALL_CMDS = [
|
||||
'kupfer-config apply',
|
||||
'kupfer-config --user apply',
|
||||
]
|
||||
|
||||
REPOS_CONFIG_FILE = "repos.yml"
|
||||
class Flavour(TypedDict, total=False):
|
||||
packages: list[str]
|
||||
post_cmds: list[str]
|
||||
size: int
|
||||
|
||||
|
||||
FLAVOURS: dict[str, Flavour] = {
|
||||
'barebone': {
|
||||
'packages': [],
|
||||
},
|
||||
'debug-shell': {
|
||||
'packages': ['hook-debug-shell'],
|
||||
},
|
||||
'gnome': {
|
||||
'packages': ['gnome', 'archlinux-appstream-data', 'gnome-software-packagekit-plugin'],
|
||||
'post_cmds': ['systemctl enable gdm'],
|
||||
'size': 8,
|
||||
},
|
||||
'phosh': {
|
||||
'packages': [
|
||||
'phosh',
|
||||
'phosh-osk-stub', # temporary replacement for 'squeekboard',
|
||||
'gnome-control-center',
|
||||
'gnome-software',
|
||||
'gnome-software-packagekit-plugin',
|
||||
'archlinux-appstream-data',
|
||||
'gnome-initial-setup',
|
||||
'kgx',
|
||||
'iio-sensor-proxy',
|
||||
],
|
||||
'post_cmds': ['systemctl enable phosh'],
|
||||
'size': 5,
|
||||
}
|
||||
}
|
||||
|
||||
REPOSITORIES = [
|
||||
'boot',
|
||||
@@ -42,9 +85,7 @@ REPOSITORIES = [
|
||||
]
|
||||
|
||||
DEFAULT_PACKAGE_BRANCH = 'dev'
|
||||
KUPFER_BRANCH_MARKER = '%kupfer_branch%'
|
||||
KUPFER_HTTPS_BASE = f'https://gitlab.com/kupfer/packages/prebuilts/-/raw/{KUPFER_BRANCH_MARKER}'
|
||||
KUPFER_HTTPS = KUPFER_HTTPS_BASE + '/$arch/$repo'
|
||||
KUPFER_HTTPS = 'https://gitlab.com/kupfer/packages/prebuilts/-/raw/%branch%/$arch/$repo'
|
||||
|
||||
Arch: TypeAlias = str
|
||||
ARCHES = [
|
||||
@@ -67,9 +108,9 @@ ALARM_REPOS = {
|
||||
BASE_DISTROS: dict[DistroArch, dict[str, dict[str, str]]] = {
|
||||
'x86_64': {
|
||||
'repos': {
|
||||
'core': 'https://geo.mirror.pkgbuild.com/$repo/os/$arch',
|
||||
'extra': 'https://geo.mirror.pkgbuild.com/$repo/os/$arch',
|
||||
'community': 'https://geo.mirror.pkgbuild.com/$repo/os/$arch',
|
||||
'core': 'http://ftp.halifax.rwth-aachen.de/archlinux/$repo/os/$arch',
|
||||
'extra': 'http://ftp.halifax.rwth-aachen.de/archlinux/$repo/os/$arch',
|
||||
'community': 'http://ftp.halifax.rwth-aachen.de/archlinux/$repo/os/$arch',
|
||||
},
|
||||
},
|
||||
'aarch64': {
|
||||
@@ -89,7 +130,7 @@ COMPILE_ARCHES: dict[Arch, str] = {
|
||||
GCC_HOSTSPECS: dict[DistroArch, dict[TargetArch, str]] = {
|
||||
'x86_64': {
|
||||
'x86_64': 'x86_64-pc-linux-gnu',
|
||||
'aarch64': 'aarch64-unknown-linux-gnu',
|
||||
'aarch64': 'aarch64-linux-gnu',
|
||||
'armv7h': 'arm-unknown-linux-gnueabihf'
|
||||
},
|
||||
'aarch64': {
|
||||
@@ -142,10 +183,10 @@ SSH_COMMON_OPTIONS = [
|
||||
]
|
||||
|
||||
CHROOT_PATHS = {
|
||||
'chroots': '/chroots',
|
||||
'chroots': '/chroot',
|
||||
'jumpdrive': '/var/cache/jumpdrive',
|
||||
'pacman': '/pacman',
|
||||
'packages': '/packages',
|
||||
'pacman': '/var/cache/pacman',
|
||||
'packages': '/prebuilts',
|
||||
'pkgbuilds': '/pkgbuilds',
|
||||
'images': '/images',
|
||||
}
|
||||
@@ -154,7 +195,6 @@ WRAPPER_TYPES = [
|
||||
'none',
|
||||
'docker',
|
||||
]
|
||||
WRAPPER_ENV_VAR = 'KUPFERBOOTSTRAP_WRAPPED'
|
||||
|
||||
MAKEPKG_CMD = [
|
||||
'makepkg',
|
||||
@@ -162,13 +202,3 @@ MAKEPKG_CMD = [
|
||||
'--ignorearch',
|
||||
'--needed',
|
||||
]
|
||||
|
||||
SRCINFO_FILE = '.SRCINFO'
|
||||
SRCINFO_METADATA_FILE = '.srcinfo_meta.json'
|
||||
SRCINFO_INITIALISED_FILE = ".srcinfo_initialised.json"
|
||||
|
||||
SRCINFO_TARBALL_FILE = "srcinfos.tar.gz"
|
||||
SRCINFO_TARBALL_URL = f'{KUPFER_HTTPS_BASE}/{SRCINFO_TARBALL_FILE}'
|
||||
|
||||
FLAVOUR_INFO_FILE = 'flavourinfo.json'
|
||||
FLAVOUR_DESCRIPTION_PREFIX = 'kupfer flavour:'
|
||||
|
||||
@@ -1,80 +0,0 @@
|
||||
import click
|
||||
import logging
|
||||
|
||||
from json import dumps as json_dump
|
||||
from typing import Optional
|
||||
|
||||
from config.state import config
|
||||
from config.cli import resolve_profile_field
|
||||
from utils import color_mark_selected, colors_supported
|
||||
|
||||
from .device import get_devices, get_device
|
||||
|
||||
|
||||
@click.command(name='devices')
|
||||
@click.option('-j', '--json', is_flag=True, help='output machine-parsable JSON format')
|
||||
@click.option(
|
||||
'--force-parse-deviceinfo/--no-parse-deviceinfo',
|
||||
is_flag=True,
|
||||
default=None,
|
||||
help="Force or disable deviceinfo parsing. The default is to try but continue if it fails.",
|
||||
)
|
||||
@click.option(
|
||||
'--download-packages/--no-download-packages',
|
||||
is_flag=True,
|
||||
default=False,
|
||||
help='Download packages while trying to parse deviceinfo',
|
||||
)
|
||||
@click.option('--output-file', type=click.Path(exists=False, file_okay=True), help="Dump JSON to file")
|
||||
def cmd_devices(
|
||||
json: bool = False,
|
||||
force_parse_deviceinfo: Optional[bool] = True,
|
||||
download_packages: bool = False,
|
||||
output_file: Optional[str] = None,
|
||||
):
|
||||
'list the available devices and descriptions'
|
||||
devices = get_devices()
|
||||
if not devices:
|
||||
raise Exception("No devices found!")
|
||||
profile_device = None
|
||||
profile_name = config.file.profiles.current
|
||||
selected, inherited_from = None, None
|
||||
try:
|
||||
selected, inherited_from = resolve_profile_field(None, profile_name, 'device', config.file.profiles)
|
||||
if selected:
|
||||
profile_device = get_device(selected)
|
||||
except Exception as ex:
|
||||
logging.debug(f"Failed to get profile device for marking as currently selected, continuing anyway. Exception: {ex}")
|
||||
output = ['']
|
||||
json_output = {}
|
||||
interactive_json = json and not output_file
|
||||
if output_file:
|
||||
json = True
|
||||
use_colors = colors_supported(False if interactive_json else config.runtime.colors)
|
||||
for name in sorted(devices.keys()):
|
||||
device = devices[name]
|
||||
assert device
|
||||
if force_parse_deviceinfo in [None, True]:
|
||||
try:
|
||||
device.parse_deviceinfo(try_download=download_packages)
|
||||
except Exception as ex:
|
||||
if not force_parse_deviceinfo:
|
||||
logging.debug(f"Failed to parse deviceinfo for extended description, not a problem: {ex}")
|
||||
else:
|
||||
raise ex
|
||||
|
||||
if json:
|
||||
json_output[name] = device.get_summary().toDict()
|
||||
if interactive_json:
|
||||
continue
|
||||
snippet = device.nice_str(colors=use_colors, newlines=True)
|
||||
if profile_device and profile_device.name == device.name:
|
||||
snippet = color_mark_selected(snippet, profile_name or '[unknown]', inherited_from)
|
||||
output.append(f"{snippet}\n")
|
||||
if interactive_json:
|
||||
output = ['\n' + json_dump(json_output, indent=4)]
|
||||
if output_file:
|
||||
with open(output_file, 'w') as fd:
|
||||
fd.write(json_dump(json_output))
|
||||
for line in output:
|
||||
print(line)
|
||||
@@ -1,209 +0,0 @@
|
||||
import logging
|
||||
import os
|
||||
|
||||
from typing import Optional
|
||||
|
||||
from config.state import config
|
||||
from constants import Arch, ARCHES
|
||||
from dictscheme import DictScheme
|
||||
from distro.distro import get_kupfer_local
|
||||
from distro.package import LocalPackage
|
||||
from packages.pkgbuild import Pkgbuild, _pkgbuilds_cache, discover_pkgbuilds, get_pkgbuild_by_path, init_pkgbuilds
|
||||
from utils import read_files_from_tar, color_str
|
||||
|
||||
from .deviceinfo import DEFAULT_IMAGE_SECTOR_SIZE, DeviceInfo, parse_deviceinfo
|
||||
|
||||
DEVICE_DEPRECATIONS = {
|
||||
"oneplus-enchilada": "sdm845-oneplus-enchilada",
|
||||
"oneplus-fajita": "sdm845-oneplus-fajita",
|
||||
"xiaomi-beryllium-ebbg": "sdm845-xiaomi-beryllium-ebbg",
|
||||
"xiaomi-beryllium-tianma": "sdm845-xiaomi-beryllium-tianma",
|
||||
"bq-paella": "msm8916-bq-paella",
|
||||
}
|
||||
|
||||
|
||||
class DeviceSummary(DictScheme):
|
||||
name: str
|
||||
description: str
|
||||
arch: str
|
||||
package_name: Optional[str]
|
||||
package_path: Optional[str]
|
||||
|
||||
def nice_str(self, newlines: bool = False, colors: bool = False) -> str:
|
||||
separator = '\n' if newlines else ', '
|
||||
assert bool(self.package_path) == bool(self.package_name)
|
||||
package_path = {"Package Path": self.package_path} if self.package_path else {}
|
||||
fields = {
|
||||
"Device": self.name,
|
||||
"Description": self.description or f"[no package {'description' if self.package_name else 'associated (?!)'} and deviceinfo not parsed]",
|
||||
"Architecture": self.arch,
|
||||
"Package Name": self.package_name or "no package associated. PROBABLY A BUG!",
|
||||
**package_path,
|
||||
}
|
||||
return separator.join([f"{color_str(name, bold=True, use_colors=colors)}: {value}" for name, value in fields.items()])
|
||||
|
||||
|
||||
class Device(DictScheme):
|
||||
name: str
|
||||
arch: Arch
|
||||
package: Pkgbuild
|
||||
deviceinfo: Optional[DeviceInfo]
|
||||
|
||||
def __repr__(self):
|
||||
return f'Device<{self.name},{self.arch},{self.package.path if self.package else "[no package]"}>'
|
||||
|
||||
def __str__(self):
|
||||
return self.nice_str(newlines=True)
|
||||
|
||||
def nice_str(self, *args, **kwargs) -> str:
|
||||
return self.get_summary().nice_str(*args, **kwargs)
|
||||
|
||||
def get_summary(self) -> DeviceSummary:
|
||||
result: dict[str, Optional[str]] = {}
|
||||
description = ((self.package.description if self.package else "").strip() or
|
||||
(self.deviceinfo.get("name", "[No name in deviceinfo]") if self.deviceinfo else "")).strip()
|
||||
result["name"] = self.name
|
||||
result["description"] = description
|
||||
result["arch"] = self.arch
|
||||
result["package_name"] = self.package.name if self.package else None
|
||||
result["package_path"] = self.package.path if self.package else None
|
||||
return DeviceSummary(result)
|
||||
|
||||
def parse_deviceinfo(self, try_download: bool = True, lazy: bool = True) -> DeviceInfo:
|
||||
if not lazy or 'deviceinfo' not in self or self.deviceinfo is None:
|
||||
# avoid import loop
|
||||
from packages.build import check_package_version_built
|
||||
is_built = check_package_version_built(self.package, self.arch, try_download=try_download)
|
||||
if not is_built:
|
||||
raise Exception(f"device package {self.package.name} for device {self.name} couldn't be acquired!")
|
||||
pkgs: dict[str, LocalPackage] = get_kupfer_local(arch=self.arch, in_chroot=False, scan=True).get_packages()
|
||||
if self.package.name not in pkgs:
|
||||
raise Exception(f"device package {self.package.name} somehow not in repos, this is a kupferbootstrap bug")
|
||||
pkg = pkgs[self.package.name]
|
||||
file_path = pkg.acquire()
|
||||
assert file_path
|
||||
assert os.path.exists(file_path)
|
||||
deviceinfo_path = 'etc/kupfer/deviceinfo'
|
||||
for path, f in read_files_from_tar(file_path, [deviceinfo_path]):
|
||||
if path != deviceinfo_path:
|
||||
raise Exception(f'Somehow, we got a wrong file: expected: "{deviceinfo_path}", got: "{path}"')
|
||||
with f as fd:
|
||||
lines = fd.readlines()
|
||||
assert lines
|
||||
if lines and isinstance(lines[0], bytes):
|
||||
lines = [line.decode() for line in lines]
|
||||
info = parse_deviceinfo(lines, self.name)
|
||||
assert info.arch
|
||||
assert info.arch == self.arch
|
||||
self['deviceinfo'] = info
|
||||
assert self.deviceinfo
|
||||
return self.deviceinfo
|
||||
|
||||
def get_image_sectorsize(self, **kwargs) -> Optional[int]:
|
||||
"""Gets the deviceinfo_rootfs_image_sector_size if defined, otherwise None"""
|
||||
return self.parse_deviceinfo(**kwargs).get('rootfs_image_sector_size', None)
|
||||
|
||||
def get_image_sectorsize_default(self, **kwargs) -> int:
|
||||
return self.get_image_sectorsize(**kwargs) or DEFAULT_IMAGE_SECTOR_SIZE
|
||||
|
||||
|
||||
def check_devicepkg_name(name: str, log_level: Optional[int] = None):
|
||||
valid = True
|
||||
if not name.startswith('device-'):
|
||||
valid = False
|
||||
if log_level is not None:
|
||||
logging.log(log_level, f'invalid device package name "{name}": doesn\'t start with "device-"')
|
||||
if name.endswith('-common'):
|
||||
valid = False
|
||||
if log_level is not None:
|
||||
logging.log(log_level, f'invalid device package name "{name}": ends with "-common"')
|
||||
return valid
|
||||
|
||||
|
||||
def parse_device_pkg(pkgbuild: Pkgbuild) -> Device:
|
||||
if len(pkgbuild.arches) != 1:
|
||||
raise Exception(f"{pkgbuild.name}: Device package must have exactly one arch, but has {pkgbuild.arches}")
|
||||
arch = pkgbuild.arches[0]
|
||||
if arch == 'any' or arch not in ARCHES:
|
||||
raise Exception(f'unknown arch for device package: {arch}')
|
||||
if pkgbuild.repo != 'device':
|
||||
logging.warning(f'device package {pkgbuild.name} is in unexpected repo "{pkgbuild.repo}", expected "device"')
|
||||
name = pkgbuild.name
|
||||
prefix = 'device-'
|
||||
if name.startswith(prefix):
|
||||
name = name[len(prefix):]
|
||||
return Device(name=name, arch=arch, package=pkgbuild, deviceinfo=None)
|
||||
|
||||
|
||||
def sanitize_device_name(name: str, warn: bool = True) -> str:
|
||||
if name not in DEVICE_DEPRECATIONS:
|
||||
return name
|
||||
warning = f"Deprecated device {name}"
|
||||
replacement = DEVICE_DEPRECATIONS[name]
|
||||
if replacement:
|
||||
warning += (f': Device has been renamed to {replacement}! Please adjust your profile config!\n'
|
||||
'This will become an error in a future version!')
|
||||
name = replacement
|
||||
if warn:
|
||||
logging.warning(warning)
|
||||
return name
|
||||
|
||||
|
||||
_device_cache: dict[str, Device] = {}
|
||||
_device_cache_populated: bool = False
|
||||
|
||||
|
||||
def get_devices(pkgbuilds: Optional[dict[str, Pkgbuild]] = None, lazy: bool = True) -> dict[str, Device]:
|
||||
global _device_cache, _device_cache_populated
|
||||
use_cache = _device_cache_populated and lazy
|
||||
if not use_cache:
|
||||
logging.info("Searching PKGBUILDs for device packages")
|
||||
if not pkgbuilds:
|
||||
pkgbuilds = discover_pkgbuilds(lazy=lazy, repositories=['device'])
|
||||
_device_cache.clear()
|
||||
for pkgbuild in pkgbuilds.values():
|
||||
if not (pkgbuild.repo == 'device' and check_devicepkg_name(pkgbuild.name, log_level=None)):
|
||||
continue
|
||||
dev = parse_device_pkg(pkgbuild)
|
||||
_device_cache[dev.name] = dev
|
||||
_device_cache_populated = True
|
||||
return _device_cache.copy()
|
||||
|
||||
|
||||
def get_device(name: str, pkgbuilds: Optional[dict[str, Pkgbuild]] = None, lazy: bool = True, scan_all=False) -> Device:
|
||||
global _device_cache, _device_cache_populated
|
||||
assert lazy or pkgbuilds
|
||||
name = sanitize_device_name(name)
|
||||
if lazy and name in _device_cache:
|
||||
return _device_cache[name]
|
||||
if scan_all:
|
||||
devices = get_devices(pkgbuilds=pkgbuilds, lazy=lazy)
|
||||
if name not in devices:
|
||||
raise Exception(f'Unknown device {name}!\n'
|
||||
f'Available: {list(devices.keys())}')
|
||||
return devices[name]
|
||||
else:
|
||||
pkgname = f'device-{name}'
|
||||
if pkgbuilds:
|
||||
if pkgname not in pkgbuilds:
|
||||
raise Exception(f'Unknown device {name}!')
|
||||
pkgbuild = pkgbuilds[pkgname]
|
||||
else:
|
||||
if lazy and pkgname in _pkgbuilds_cache:
|
||||
pkgbuild = _pkgbuilds_cache[pkgname]
|
||||
else:
|
||||
init_pkgbuilds()
|
||||
relative_path = os.path.join('device', pkgname)
|
||||
if not os.path.exists(os.path.join(config.get_path('pkgbuilds'), relative_path)):
|
||||
logging.debug(f'Exact device pkgbuild path "pkgbuilds/{relative_path}" doesn\'t exist, scanning entire repo')
|
||||
return get_device(name, pkgbuilds=pkgbuilds, lazy=lazy, scan_all=True)
|
||||
pkgbuild = [p for p in get_pkgbuild_by_path(relative_path, lazy=lazy) if p.name == pkgname][0]
|
||||
device = parse_device_pkg(pkgbuild)
|
||||
if lazy:
|
||||
_device_cache[name] = device
|
||||
return device
|
||||
|
||||
|
||||
def get_profile_device(profile_name: Optional[str] = None, hint_or_set_arch: bool = False):
|
||||
profile = config.enforce_profile_device_set(profile_name=profile_name, hint_or_set_arch=hint_or_set_arch)
|
||||
return get_device(profile.device)
|
||||
@@ -1,273 +0,0 @@
|
||||
# Copyright 2022 Oliver Smith
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
# Taken from postmarketOS/pmbootstrap, modified for kupferbootstrap by Prawn
|
||||
import copy
|
||||
import logging
|
||||
import os
|
||||
|
||||
from typing import Mapping, Optional
|
||||
|
||||
from config.state import config
|
||||
from constants import Arch
|
||||
from dictscheme import DictScheme
|
||||
|
||||
PMOS_ARCHES_OVERRIDES: dict[str, Arch] = {
|
||||
"armv7": 'armv7h',
|
||||
}
|
||||
|
||||
DEFAULT_IMAGE_SECTOR_SIZE = 512
|
||||
|
||||
|
||||
class DeviceInfo(DictScheme):
|
||||
arch: Arch
|
||||
name: str
|
||||
manufacturer: str
|
||||
codename: str
|
||||
chassis: str
|
||||
flash_pagesize: int
|
||||
flash_method: str
|
||||
rootfs_image_sector_size: Optional[int]
|
||||
|
||||
@classmethod
|
||||
def transform(cls, values: Mapping[str, Optional[str]], **kwargs):
|
||||
kwargs = {'allow_extra': True} | kwargs
|
||||
return super().transform(values, **kwargs)
|
||||
|
||||
|
||||
# Variables from deviceinfo. Reference: <https://postmarketos.org/deviceinfo>
|
||||
deviceinfo_attributes = [
|
||||
# general
|
||||
"format_version",
|
||||
"name",
|
||||
"manufacturer",
|
||||
"codename",
|
||||
"year",
|
||||
"dtb",
|
||||
"modules_initfs",
|
||||
"arch",
|
||||
|
||||
# device
|
||||
"chassis",
|
||||
"keyboard",
|
||||
"external_storage",
|
||||
"screen_width",
|
||||
"screen_height",
|
||||
"dev_touchscreen",
|
||||
"dev_touchscreen_calibration",
|
||||
"append_dtb",
|
||||
|
||||
# bootloader
|
||||
"flash_method",
|
||||
"boot_filesystem",
|
||||
|
||||
# flash
|
||||
"flash_heimdall_partition_kernel",
|
||||
"flash_heimdall_partition_initfs",
|
||||
"flash_heimdall_partition_system",
|
||||
"flash_heimdall_partition_vbmeta",
|
||||
"flash_heimdall_partition_dtbo",
|
||||
"flash_fastboot_partition_kernel",
|
||||
"flash_fastboot_partition_system",
|
||||
"flash_fastboot_partition_vbmeta",
|
||||
"flash_fastboot_partition_dtbo",
|
||||
"generate_legacy_uboot_initfs",
|
||||
"kernel_cmdline",
|
||||
"generate_bootimg",
|
||||
"bootimg_qcdt",
|
||||
"bootimg_mtk_mkimage",
|
||||
"bootimg_dtb_second",
|
||||
"flash_offset_base",
|
||||
"flash_offset_kernel",
|
||||
"flash_offset_ramdisk",
|
||||
"flash_offset_second",
|
||||
"flash_offset_tags",
|
||||
"flash_pagesize",
|
||||
"flash_fastboot_max_size",
|
||||
"flash_sparse",
|
||||
"flash_sparse_samsung_format",
|
||||
"rootfs_image_sector_size",
|
||||
"sd_embed_firmware",
|
||||
"sd_embed_firmware_step_size",
|
||||
"partition_blacklist",
|
||||
"boot_part_start",
|
||||
"partition_type",
|
||||
"root_filesystem",
|
||||
"flash_kernel_on_update",
|
||||
"cgpt_kpart",
|
||||
"cgpt_kpart_start",
|
||||
"cgpt_kpart_size",
|
||||
|
||||
# weston
|
||||
"weston_pixman_type",
|
||||
|
||||
# keymaps
|
||||
"keymaps",
|
||||
]
|
||||
|
||||
# Valid types for the 'chassis' atribute in deviceinfo
|
||||
# See https://www.freedesktop.org/software/systemd/man/machine-info.html
|
||||
deviceinfo_chassis_types = [
|
||||
"desktop",
|
||||
"laptop",
|
||||
"convertible",
|
||||
"server",
|
||||
"tablet",
|
||||
"handset",
|
||||
"watch",
|
||||
"embedded",
|
||||
"vm",
|
||||
]
|
||||
|
||||
|
||||
def sanity_check(deviceinfo: dict[str, Optional[str]], device_name: str):
|
||||
try:
|
||||
_pmos_sanity_check(deviceinfo, device_name)
|
||||
except RuntimeError as err:
|
||||
raise Exception(f"{device_name}: The postmarketOS checker for deviceinfo files has run into an issue.\n"
|
||||
"Here at kupfer, we usually don't maintain our own deviceinfo files "
|
||||
"and instead often download them postmarketOS in our PKGBUILDs.\n"
|
||||
"Please make sure your PKGBUILDs.git is up to date. (run `kupferbootstrap packages update`)\n"
|
||||
"If the problem persists, please open an issue for this device's deviceinfo file "
|
||||
"in the kupfer pkgbuilds git repo on Gitlab.\n\n"
|
||||
"postmarketOS error message (referenced file may not exist until you run makepkg in that directory):\n"
|
||||
f"{err}")
|
||||
|
||||
|
||||
def _pmos_sanity_check(info: dict[str, Optional[str]], device_name: str):
|
||||
# Resolve path for more readable error messages
|
||||
path = os.path.join(config.get_path('pkgbuilds'), 'device', device_name, 'deviceinfo')
|
||||
|
||||
# Legacy errors
|
||||
if "flash_methods" in info:
|
||||
raise RuntimeError("deviceinfo_flash_methods has been renamed to"
|
||||
" deviceinfo_flash_method. Please adjust your"
|
||||
" deviceinfo file: " + path)
|
||||
if "external_disk" in info or "external_disk_install" in info:
|
||||
raise RuntimeError("Instead of deviceinfo_external_disk and"
|
||||
" deviceinfo_external_disk_install, please use the"
|
||||
" new variable deviceinfo_external_storage in your"
|
||||
" deviceinfo file: " + path)
|
||||
if "msm_refresher" in info:
|
||||
raise RuntimeError("It is enough to specify 'msm-fb-refresher' in the"
|
||||
" depends of your device's package now. Please"
|
||||
" delete the deviceinfo_msm_refresher line in: " + path)
|
||||
if "flash_fastboot_vendor_id" in info:
|
||||
raise RuntimeError("Fastboot doesn't allow specifying the vendor ID"
|
||||
" anymore (#1830). Try removing the"
|
||||
" 'deviceinfo_flash_fastboot_vendor_id' line in: " + path + " (if you are sure that "
|
||||
" you need this, then we can probably bring it back to fastboot, just"
|
||||
" let us know in the postmarketOS issues!)")
|
||||
if "nonfree" in info:
|
||||
raise RuntimeError("deviceinfo_nonfree is unused. "
|
||||
"Please delete it in: " + path)
|
||||
if "dev_keyboard" in info:
|
||||
raise RuntimeError("deviceinfo_dev_keyboard is unused. "
|
||||
"Please delete it in: " + path)
|
||||
if "date" in info:
|
||||
raise RuntimeError("deviceinfo_date was replaced by deviceinfo_year. "
|
||||
"Set it to the release year in: " + path)
|
||||
|
||||
# "codename" is required
|
||||
codename = os.path.basename(os.path.dirname(path))
|
||||
if codename.startswith("device-"):
|
||||
codename = codename[7:]
|
||||
# kupfer prepends the SoC
|
||||
codename_alternative = codename.split('-', maxsplit=1)[1] if codename.count('-') > 1 else codename
|
||||
_codename = info.get('codename', None)
|
||||
if not _codename or not (_codename in [codename, codename_alternative] or codename.startswith(_codename) or
|
||||
codename_alternative.startswith(_codename)):
|
||||
raise RuntimeError(f"Please add 'deviceinfo_codename=\"{codename}\"' "
|
||||
f"to: {path}")
|
||||
|
||||
# "chassis" is required
|
||||
chassis_types = deviceinfo_chassis_types
|
||||
if "chassis" not in info or not info["chassis"]:
|
||||
logging.info("NOTE: the most commonly used chassis types in"
|
||||
" postmarketOS are 'handset' (for phones) and 'tablet'.")
|
||||
raise RuntimeError(f"Please add 'deviceinfo_chassis' to: {path}")
|
||||
|
||||
# "arch" is required
|
||||
if "arch" not in info or not info["arch"]:
|
||||
raise RuntimeError(f"Please add 'deviceinfo_arch' to: {path}")
|
||||
|
||||
# "chassis" validation
|
||||
chassis_type = info["chassis"]
|
||||
if chassis_type not in chassis_types:
|
||||
raise RuntimeError(f"Unknown chassis type '{chassis_type}', should"
|
||||
f" be one of {', '.join(chassis_types)}. Fix this"
|
||||
f" and try again: {path}")
|
||||
|
||||
|
||||
def parse_kernel_suffix(deviceinfo: dict[str, Optional[str]], kernel: str = 'mainline') -> dict[str, Optional[str]]:
|
||||
"""
|
||||
Remove the kernel suffix (as selected in 'pmbootstrap init') from
|
||||
deviceinfo variables. Related:
|
||||
https://wiki.postmarketos.org/wiki/Device_specific_package#Multiple_kernels
|
||||
|
||||
:param info: deviceinfo dict, e.g.:
|
||||
{"a": "first",
|
||||
"b_mainline": "second",
|
||||
"b_downstream": "third"}
|
||||
:param device: which device info belongs to
|
||||
:param kernel: which kernel suffix to remove (e.g. "mainline")
|
||||
:returns: info, but with the configured kernel suffix removed, e.g:
|
||||
{"a": "first",
|
||||
"b": "second",
|
||||
"b_downstream": "third"}
|
||||
"""
|
||||
# Do nothing if the configured kernel isn't available in the kernel (e.g.
|
||||
# after switching from device with multiple kernels to device with only one
|
||||
# kernel)
|
||||
# kernels = pmb.parse._apkbuild.kernels(args, device)
|
||||
if not kernel: # or kernel not in kernels:
|
||||
logging.debug(f"parse_kernel_suffix: {kernel} not set, skipping")
|
||||
return deviceinfo
|
||||
|
||||
ret = copy.copy(deviceinfo)
|
||||
|
||||
suffix_kernel = kernel.replace("-", "_")
|
||||
for key in deviceinfo_attributes:
|
||||
key_kernel = f"{key}_{suffix_kernel}"
|
||||
if key_kernel not in ret:
|
||||
continue
|
||||
|
||||
# Move ret[key_kernel] to ret[key]
|
||||
logging.debug(f"parse_kernel_suffix: {key_kernel} => {key}")
|
||||
ret[key] = ret[key_kernel]
|
||||
del (ret[key_kernel])
|
||||
|
||||
return ret
|
||||
|
||||
|
||||
def parse_deviceinfo(deviceinfo_lines: list[str], device_name: str, kernel='mainline') -> DeviceInfo:
|
||||
"""
|
||||
:param device: defaults to args.device
|
||||
:param kernel: defaults to args.kernel
|
||||
"""
|
||||
info: dict[str, Optional[str]] = {}
|
||||
for line in deviceinfo_lines:
|
||||
line = line.strip()
|
||||
if line.startswith("#") or not line:
|
||||
continue
|
||||
if "=" not in line:
|
||||
raise SyntaxError(f"{device_name}: No '=' found:\n\t{line}")
|
||||
split = line.split("=", 1)
|
||||
if not split[0].startswith("deviceinfo_"):
|
||||
logging.warning(f"{device_name}: Unknown key {split[0]} in deviceinfo:\n{line}")
|
||||
continue
|
||||
key = split[0][len("deviceinfo_"):]
|
||||
value = split[1].replace("\"", "").replace("\n", "")
|
||||
info[key] = value
|
||||
|
||||
# Assign empty string as default
|
||||
for key in deviceinfo_attributes:
|
||||
if key not in info:
|
||||
info[key] = None
|
||||
|
||||
info = parse_kernel_suffix(info, kernel)
|
||||
sanity_check(info, device_name)
|
||||
if 'arch' in info:
|
||||
arch = info['arch']
|
||||
info['arch'] = PMOS_ARCHES_OVERRIDES.get(arch, arch) # type: ignore[arg-type]
|
||||
dev = DeviceInfo.fromDict(info)
|
||||
return dev
|
||||
@@ -1,87 +0,0 @@
|
||||
from config.state import config
|
||||
|
||||
from .deviceinfo import DeviceInfo, parse_deviceinfo
|
||||
from .device import get_device
|
||||
|
||||
deviceinfo_text = """
|
||||
# Reference: <https://postmarketos.org/deviceinfo>
|
||||
# Please use double quotes only. You can source this file in shell scripts.
|
||||
|
||||
deviceinfo_format_version="0"
|
||||
deviceinfo_name="BQ Aquaris X5"
|
||||
deviceinfo_manufacturer="BQ"
|
||||
deviceinfo_codename="bq-paella"
|
||||
deviceinfo_year="2015"
|
||||
deviceinfo_dtb="qcom/msm8916-longcheer-l8910"
|
||||
deviceinfo_append_dtb="true"
|
||||
deviceinfo_modules_initfs="smb1360 panel-longcheer-yushun-nt35520 panel-longcheer-truly-otm1288a msm himax-hx852x"
|
||||
deviceinfo_arch="aarch64"
|
||||
|
||||
# Device related
|
||||
deviceinfo_gpu_accelerated="true"
|
||||
deviceinfo_chassis="handset"
|
||||
deviceinfo_keyboard="false"
|
||||
deviceinfo_external_storage="true"
|
||||
deviceinfo_screen_width="720"
|
||||
deviceinfo_screen_height="1280"
|
||||
deviceinfo_getty="ttyMSM0;115200"
|
||||
|
||||
# Bootloader related
|
||||
deviceinfo_flash_method="fastboot"
|
||||
deviceinfo_kernel_cmdline="earlycon console=ttyMSM0,115200 PMOS_NO_OUTPUT_REDIRECT"
|
||||
deviceinfo_generate_bootimg="true"
|
||||
deviceinfo_flash_offset_base="0x80000000"
|
||||
deviceinfo_flash_offset_kernel="0x00080000"
|
||||
deviceinfo_flash_offset_ramdisk="0x02000000"
|
||||
deviceinfo_flash_offset_second="0x00f00000"
|
||||
deviceinfo_flash_offset_tags="0x01e00000"
|
||||
deviceinfo_flash_pagesize="2048"
|
||||
deviceinfo_flash_sparse="true"
|
||||
"""
|
||||
|
||||
|
||||
def test_parse_deviceinfo():
|
||||
config.try_load_file()
|
||||
d = parse_deviceinfo(deviceinfo_text.split('\n'), 'device-bq-paella')
|
||||
assert isinstance(d, DeviceInfo)
|
||||
assert d
|
||||
assert d.arch
|
||||
assert d.chassis
|
||||
assert d.flash_method
|
||||
assert d.flash_pagesize
|
||||
# test that fields not listed in the class definition make it into the object
|
||||
assert d.dtb
|
||||
assert d.gpu_accelerated
|
||||
|
||||
|
||||
def test_parse_variant_deviceinfo():
|
||||
config.try_load_file()
|
||||
# {'variant1': 'AAAAA', 'variant2': 'BBBBB', 'variant3': 'CCCCC'}
|
||||
variants = {f"variant{i+1}": chr(ord('A') + i) * 5 for i in range(0, 3)}
|
||||
field = "dev_touchscreen_calibration"
|
||||
text = deviceinfo_text + '\n'.join([""] + [f"deviceinfo_{field}_{variant}={value}" for variant, value in variants.items()])
|
||||
for variant, result in variants.items():
|
||||
d = parse_deviceinfo(text.split('\n'), 'device-bq-paella', kernel=variant)
|
||||
# note: the python code from pmb only strips one variant, the shell code in packaging strips all variants
|
||||
assert f'{field}_{variant}' not in d
|
||||
assert field in d
|
||||
assert d[field] == result
|
||||
|
||||
|
||||
def test_get_deviceinfo_from_repo():
|
||||
config.try_load_file()
|
||||
dev = get_device('sdm845-oneplus-enchilada')
|
||||
assert dev
|
||||
info = dev.parse_deviceinfo()
|
||||
assert info
|
||||
|
||||
|
||||
def test_get_variant_deviceinfo_from_repo():
|
||||
config.try_load_file()
|
||||
dev = get_device('sdm845-xiaomi-beryllium-ebbg')
|
||||
assert dev
|
||||
info = dev.parse_deviceinfo()
|
||||
assert info
|
||||
assert 'dtb' in info # variant-specific variable, check it has been stripped down from 'dtb_ebbg' to 'dtb'
|
||||
assert 'dtb_tianma' not in info
|
||||
assert info.dtb
|
||||
299
dictscheme.py
299
dictscheme.py
@@ -1,299 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
import toml
|
||||
|
||||
from munch import Munch
|
||||
from toml.encoder import TomlEncoder, TomlPreserveInlineDictEncoder
|
||||
from typing import ClassVar, Generator, Optional, Union, Mapping, Any, get_type_hints, get_origin, get_args, Iterable
|
||||
|
||||
from typehelpers import UnionType, NoneType
|
||||
|
||||
|
||||
def resolve_type_hint(hint: type, ignore_origins: list[type] = []) -> Iterable[type]:
|
||||
origin = get_origin(hint)
|
||||
args: Iterable[type] = get_args(hint)
|
||||
if origin in ignore_origins:
|
||||
return [hint]
|
||||
if origin is Optional:
|
||||
args = set(list(args) + [NoneType])
|
||||
if origin in [Union, UnionType, Optional]:
|
||||
results: list[type] = []
|
||||
for arg in args:
|
||||
results += resolve_type_hint(arg, ignore_origins=ignore_origins)
|
||||
return results
|
||||
return [origin or hint]
|
||||
|
||||
|
||||
def flatten_hints(hints: Any) -> Generator[Any, None, None]:
|
||||
if not isinstance(hints, (list, tuple)):
|
||||
yield hints
|
||||
return
|
||||
for i in hints:
|
||||
yield from flatten_hints(i)
|
||||
|
||||
|
||||
def resolve_dict_hints(hints: Any) -> Generator[tuple[Any, ...], None, None]:
|
||||
for hint in flatten_hints(hints):
|
||||
t_origin = get_origin(hint)
|
||||
t_args = get_args(hint)
|
||||
if t_origin == dict:
|
||||
yield t_args
|
||||
continue
|
||||
if t_origin in [NoneType, Optional, Union, UnionType] and t_args:
|
||||
yield from resolve_dict_hints(t_args)
|
||||
continue
|
||||
|
||||
|
||||
class DictScheme(Munch):
|
||||
|
||||
_type_hints: ClassVar[dict[str, Any]]
|
||||
_strip_hidden: ClassVar[bool] = False
|
||||
_sparse: ClassVar[bool] = False
|
||||
|
||||
def __init__(self, d: Mapping = {}, validate: bool = True, **kwargs):
|
||||
self.update(dict(d) | kwargs, validate=validate)
|
||||
|
||||
@classmethod
|
||||
def transform(
|
||||
cls,
|
||||
values: Mapping[str, Any],
|
||||
*,
|
||||
validate: bool = True,
|
||||
allow_extra: bool = False,
|
||||
type_hints: Optional[dict[str, Any]] = None,
|
||||
) -> Any:
|
||||
results: dict[str, Any] = {}
|
||||
values = dict(values)
|
||||
for key in list(values.keys()):
|
||||
value = values.pop(key)
|
||||
type_hints = cls._type_hints if type_hints is None else type_hints
|
||||
if key in type_hints:
|
||||
_classes = tuple[type](resolve_type_hint(type_hints[key]))
|
||||
optional = bool(set([NoneType, None]).intersection(_classes))
|
||||
if optional and value is None:
|
||||
results[key] = None
|
||||
continue
|
||||
if issubclass(_classes[0], dict):
|
||||
assert isinstance(value, dict) or (optional and value is None), f'{key=} is not dict: {value!r}, {_classes=}'
|
||||
target_class = _classes[0]
|
||||
if target_class in [None, NoneType, Optional]:
|
||||
for target in _classes[1:]:
|
||||
if target not in [None, NoneType, Optional]:
|
||||
target_class = target
|
||||
break
|
||||
if target_class is dict:
|
||||
dict_hints = list(resolve_dict_hints(type_hints[key]))
|
||||
if len(dict_hints) != 1:
|
||||
msg = f"transform(): Received wrong amount of type hints for key {key}: {len(dict_hints)}"
|
||||
if validate:
|
||||
raise Exception(msg)
|
||||
logging.warning(msg)
|
||||
if len(dict_hints) == 1 and value is not None:
|
||||
if len(dict_hints[0]) != 2 or not all(dict_hints[0]):
|
||||
logging.debug(f"Weird dict hints received: {dict_hints}")
|
||||
continue
|
||||
key_type, value_type = dict_hints[0]
|
||||
if not isinstance(value, Mapping):
|
||||
msg = f"Got non-mapping {value!r} for expected dict type: {key_type} => {value_type}. Allowed classes: {_classes}"
|
||||
if validate:
|
||||
raise Exception(msg)
|
||||
logging.warning(msg)
|
||||
results[key] = value
|
||||
continue
|
||||
if isinstance(key_type, type):
|
||||
if issubclass(key_type, str):
|
||||
target_class = Munch
|
||||
else:
|
||||
msg = f"{key=} subdict got wrong key type hint (expected str): {key_type}"
|
||||
if validate:
|
||||
raise Exception(msg)
|
||||
logging.warning(msg)
|
||||
if validate:
|
||||
for k in value:
|
||||
if not isinstance(k, tuple(flatten_hints(key_type))):
|
||||
raise Exception(f'Subdict "{key}": wrong type for subkey "{k}": got: {type(k)}, expected: {key_type}')
|
||||
dict_content_hints = {k: value_type for k in value}
|
||||
value = cls.transform(value, validate=validate, allow_extra=allow_extra, type_hints=dict_content_hints)
|
||||
if not isinstance(value, target_class):
|
||||
if not (optional and value is None):
|
||||
assert issubclass(target_class, Munch)
|
||||
# despite the above assert, mypy doesn't seem to understand target_class is a Munch here
|
||||
kwargs = {'validate': validate} if issubclass(target_class, DictScheme) else {}
|
||||
value = target_class(value, **kwargs) # type:ignore[attr-defined]
|
||||
else:
|
||||
# print(f"nothing to do: '{key}' was already {target_class})
|
||||
pass
|
||||
# handle numerics
|
||||
elif set(_classes).intersection([int, float]) and isinstance(value, str) and str not in _classes:
|
||||
parsed_number = None
|
||||
parsers: list[tuple[type, list]] = [(int, [10]), (int, [0]), (float, [])]
|
||||
for _cls, args in parsers:
|
||||
if _cls not in _classes:
|
||||
continue
|
||||
try:
|
||||
parsed_number = _cls(value, *args)
|
||||
break
|
||||
except ValueError:
|
||||
continue
|
||||
if parsed_number is None:
|
||||
if validate:
|
||||
raise Exception(f"Couldn't parse string value {repr(value)} for key '{key}' into number formats: " +
|
||||
(', '.join(list(c.__name__ for c in _classes))))
|
||||
else:
|
||||
value = parsed_number
|
||||
if validate:
|
||||
if not isinstance(value, _classes):
|
||||
raise Exception(f'key "{key}" has value of wrong type! expected: '
|
||||
f'{" ,".join([ c.__name__ for c in _classes])}; '
|
||||
f'got: {type(value).__name__}; value: {value}')
|
||||
elif validate and not allow_extra:
|
||||
logging.debug(f"{cls}: unknown key '{key}': {value}")
|
||||
raise Exception(f'{cls}: Unknown key "{key}"')
|
||||
else:
|
||||
if isinstance(value, dict) and not isinstance(value, Munch):
|
||||
value = Munch.fromDict(value)
|
||||
results[key] = value
|
||||
if values:
|
||||
if validate:
|
||||
raise Exception(f'values contained unknown keys: {list(values.keys())}')
|
||||
results |= values
|
||||
|
||||
return results
|
||||
|
||||
@classmethod
|
||||
def fromDict(cls, values: Mapping[str, Any], validate: bool = True):
|
||||
return cls(d=values, validate=validate)
|
||||
|
||||
def toDict(
|
||||
self,
|
||||
strip_hidden: Optional[bool] = None,
|
||||
sparse: Optional[bool] = None,
|
||||
):
|
||||
return self.strip_dict(
|
||||
self,
|
||||
strip_hidden=strip_hidden,
|
||||
sparse=sparse,
|
||||
recursive=True,
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def strip_dict(
|
||||
cls,
|
||||
d: dict[Any, Any],
|
||||
strip_hidden: Optional[bool] = None,
|
||||
sparse: Optional[bool] = None,
|
||||
recursive: bool = True,
|
||||
hints: Optional[dict[str, Any]] = None,
|
||||
validate: bool = True,
|
||||
) -> dict[Any, Any]:
|
||||
# preserve original None-type args
|
||||
_sparse = cls._sparse if sparse is None else sparse
|
||||
_strip_hidden = cls._strip_hidden if strip_hidden is None else strip_hidden
|
||||
hints = cls._type_hints if hints is None else hints
|
||||
result = dict(d)
|
||||
if not (_strip_hidden or _sparse or result):
|
||||
return result
|
||||
for k, v in d.items():
|
||||
type_hint = resolve_type_hint(hints.get(k, "abc"))
|
||||
if not isinstance(k, str):
|
||||
msg = f"strip_dict(): unknown key type {k=}: {type(k)=}"
|
||||
if validate:
|
||||
raise Exception(msg)
|
||||
logging.warning(f"{msg} (skipping)")
|
||||
continue
|
||||
if _strip_hidden and k.startswith('_'):
|
||||
result.pop(k)
|
||||
continue
|
||||
if v is None:
|
||||
if NoneType not in type_hint:
|
||||
msg = f'encountered illegal null value at key "{k}" for typehint {type_hint}'
|
||||
if validate:
|
||||
raise Exception(msg)
|
||||
logging.warning(msg)
|
||||
if _sparse:
|
||||
result.pop(k)
|
||||
continue
|
||||
if recursive and isinstance(v, dict):
|
||||
if not v:
|
||||
result[k] = {}
|
||||
continue
|
||||
if isinstance(v, DictScheme):
|
||||
# pass None in sparse and strip_hidden
|
||||
result[k] = v.toDict(strip_hidden=strip_hidden, sparse=sparse)
|
||||
continue
|
||||
if isinstance(v, Munch):
|
||||
result[k] = v.toDict()
|
||||
if k not in hints:
|
||||
continue
|
||||
_subhints = {}
|
||||
_hints = resolve_type_hint(hints[k], [dict])
|
||||
hints_flat = list(flatten_hints(_hints))
|
||||
subclass = DictScheme
|
||||
for hint in hints_flat:
|
||||
if get_origin(hint) == dict:
|
||||
_valtype = get_args(hint)[1]
|
||||
_subhints = {n: _valtype for n in v.keys()}
|
||||
break
|
||||
if isinstance(hint, type) and issubclass(hint, DictScheme):
|
||||
subclass = hint
|
||||
_subhints = hint._type_hints
|
||||
break
|
||||
else:
|
||||
# print(f"ignoring {hint=}")
|
||||
continue
|
||||
result[k] = subclass.strip_dict(
|
||||
v,
|
||||
hints=_subhints,
|
||||
sparse=_sparse,
|
||||
strip_hidden=_strip_hidden,
|
||||
recursive=recursive,
|
||||
)
|
||||
return result
|
||||
|
||||
def update(self, d: Mapping[str, Any], validate: bool = True):
|
||||
Munch.update(self, type(self).transform(d, validate=validate))
|
||||
|
||||
def __init_subclass__(cls):
|
||||
super().__init_subclass__()
|
||||
cls._type_hints = {name: hint for name, hint in get_type_hints(cls).items() if get_origin(hint) is not ClassVar}
|
||||
|
||||
def __repr__(self):
|
||||
return f'{type(self)}{dict.__repr__(dict(self))}'
|
||||
|
||||
def toYAML(
|
||||
self,
|
||||
strip_hidden: Optional[bool] = None,
|
||||
sparse: Optional[bool] = None,
|
||||
**yaml_args,
|
||||
) -> str:
|
||||
import yaml
|
||||
yaml_args = {'sort_keys': False} | yaml_args
|
||||
dumped = yaml.dump(
|
||||
self.toDict(strip_hidden=strip_hidden, sparse=sparse),
|
||||
**yaml_args,
|
||||
)
|
||||
if dumped is None:
|
||||
raise Exception(f"Failed to yaml-serialse {self}")
|
||||
return dumped
|
||||
|
||||
def toToml(
|
||||
self,
|
||||
strip_hidden: Optional[bool] = None,
|
||||
sparse: Optional[bool] = None,
|
||||
encoder: Optional[TomlEncoder] = TomlPreserveInlineDictEncoder(),
|
||||
) -> str:
|
||||
return toml.dumps(
|
||||
self.toDict(strip_hidden=strip_hidden, sparse=sparse),
|
||||
encoder=encoder,
|
||||
)
|
||||
|
||||
|
||||
class TomlInlineDict(dict, toml.decoder.InlineTableDict):
|
||||
pass
|
||||
|
||||
|
||||
def toml_inline_dicts(value: Any) -> Any:
|
||||
if not isinstance(value, Mapping):
|
||||
return value
|
||||
return TomlInlineDict({k: toml_inline_dicts(v) for k, v in value.items()})
|
||||
222
distro/distro.py
222
distro/distro.py
@@ -1,35 +1,23 @@
|
||||
import logging
|
||||
from typing import Optional, Mapping
|
||||
|
||||
from enum import IntFlag
|
||||
from typing import Generic, Mapping, Optional, TypeVar
|
||||
|
||||
from constants import Arch, ARCHES, REPOSITORIES, KUPFER_BRANCH_MARKER, KUPFER_HTTPS, CHROOT_PATHS
|
||||
from constants import Arch, ARCHES, BASE_DISTROS, REPOSITORIES, KUPFER_HTTPS, CHROOT_PATHS
|
||||
from generator import generate_pacman_conf_body
|
||||
from config.state import config
|
||||
from config import config
|
||||
|
||||
from .repo import BinaryPackageType, RepoInfo, Repo, LocalRepo, RemoteRepo
|
||||
from .repo_config import AbstrRepoConfig, BaseDistro, ReposConfigFile, REPOS_CONFIG_DEFAULT, get_repo_config as _get_repo_config
|
||||
from .package import PackageInfo
|
||||
from .repo import RepoInfo, Repo
|
||||
|
||||
|
||||
class DistroLocation(IntFlag):
|
||||
REMOTE = 0
|
||||
LOCAL = 1
|
||||
CHROOT = 3
|
||||
|
||||
|
||||
RepoType = TypeVar('RepoType', bound=Repo)
|
||||
|
||||
|
||||
class Distro(Generic[RepoType]):
|
||||
repos: Mapping[str, RepoType]
|
||||
class Distro:
|
||||
repos: Mapping[str, Repo]
|
||||
arch: str
|
||||
|
||||
def __init__(self, arch: Arch, repo_infos: dict[str, RepoInfo], scan=False):
|
||||
assert (arch in ARCHES)
|
||||
self.arch = arch
|
||||
self.repos = dict[str, RepoType]()
|
||||
self.repos = dict[str, Repo]()
|
||||
for repo_name, repo_info in repo_infos.items():
|
||||
self.repos[repo_name] = self._create_repo(
|
||||
self.repos[repo_name] = Repo(
|
||||
name=repo_name,
|
||||
arch=arch,
|
||||
url_template=repo_info.url_template,
|
||||
@@ -37,22 +25,16 @@ class Distro(Generic[RepoType]):
|
||||
scan=scan,
|
||||
)
|
||||
|
||||
def _create_repo(self, **kwargs) -> RepoType:
|
||||
raise NotImplementedError()
|
||||
Repo(**kwargs)
|
||||
|
||||
def get_packages(self) -> dict[str, BinaryPackageType]:
|
||||
def get_packages(self) -> dict[str, PackageInfo]:
|
||||
""" get packages from all repos, semantically overlaying them"""
|
||||
results = dict[str, BinaryPackageType]()
|
||||
results = dict[str, PackageInfo]()
|
||||
for repo in list(self.repos.values())[::-1]:
|
||||
assert repo.packages is not None
|
||||
results.update(repo.packages)
|
||||
return results
|
||||
|
||||
def repos_config_snippet(self, extra_repos: Mapping[str, RepoInfo] = {}) -> str:
|
||||
extras: list[Repo] = [
|
||||
Repo(name, url_template=info.url_template, arch=self.arch, options=info.options, scan=False) for name, info in extra_repos.items()
|
||||
]
|
||||
extras = [Repo(name, url_template=info.url_template, arch=self.arch, options=info.options, scan=False) for name, info in extra_repos.items()]
|
||||
return '\n\n'.join(repo.config_snippet() for repo in (extras + list(self.repos.values())))
|
||||
|
||||
def get_pacman_conf(self, extra_repos: Mapping[str, RepoInfo] = {}, check_space: bool = True, in_chroot: bool = True):
|
||||
@@ -71,177 +53,43 @@ class Distro(Generic[RepoType]):
|
||||
return True
|
||||
|
||||
|
||||
class LocalDistro(Distro[LocalRepo]):
|
||||
|
||||
def _create_repo(self, **kwargs) -> LocalRepo:
|
||||
return LocalRepo(**kwargs)
|
||||
|
||||
|
||||
class RemoteDistro(Distro[RemoteRepo]):
|
||||
|
||||
def _create_repo(self, **kwargs) -> RemoteRepo:
|
||||
return RemoteRepo(**kwargs)
|
||||
def get_base_distro(arch: str) -> Distro:
|
||||
repos = {name: RepoInfo(url_template=url) for name, url in BASE_DISTROS[arch]['repos'].items()}
|
||||
return Distro(arch=arch, repo_infos=repos, scan=False)
|
||||
|
||||
|
||||
def get_kupfer(arch: str, url_template: str, scan: bool = False) -> Distro:
|
||||
repos = {name: RepoInfo(url_template=url_template, options={'SigLevel': 'Never'}) for name in REPOSITORIES}
|
||||
remote = not url_template.startswith('file://')
|
||||
clss = RemoteDistro if remote else LocalDistro
|
||||
distro = clss(
|
||||
return Distro(
|
||||
arch=arch,
|
||||
repo_infos=repos,
|
||||
scan=scan,
|
||||
)
|
||||
assert isinstance(distro, (LocalDistro, RemoteDistro))
|
||||
if remote:
|
||||
assert isinstance(distro, RemoteDistro)
|
||||
for repo in distro.repos.values():
|
||||
repo.cache_repo_db = True
|
||||
|
||||
return distro
|
||||
|
||||
|
||||
_kupfer_https: dict[Arch, RemoteDistro] = {}
|
||||
_kupfer_local: dict[Arch, LocalDistro] = {}
|
||||
_kupfer_local_chroots: dict[Arch, LocalDistro] = {}
|
||||
_kupfer_https = dict[Arch, Distro]()
|
||||
_kupfer_local = dict[Arch, Distro]()
|
||||
_kupfer_local_chroots = dict[Arch, Distro]()
|
||||
|
||||
|
||||
def reset_distro_caches():
|
||||
global _kupfer_https, _kupfer_local, _kupfer_local_chroots
|
||||
for cache in _kupfer_https, _kupfer_local, _kupfer_local_chroots:
|
||||
assert isinstance(cache, dict)
|
||||
cache.clear()
|
||||
|
||||
|
||||
def get_kupfer_url(url: str = KUPFER_HTTPS, branch: Optional[str] = None) -> str:
|
||||
"""gets the repo URL for `branch`, getting branch from config if `None` is passed."""
|
||||
branch = config.file.pacman.repo_branch if branch is None else branch
|
||||
return url.replace(KUPFER_BRANCH_MARKER, branch)
|
||||
|
||||
|
||||
def get_repo_config(*args, **kwargs) -> ReposConfigFile:
|
||||
repo_config, changed = _get_repo_config(*args, **kwargs)
|
||||
if changed:
|
||||
logging.debug("Repo configs changed, resetting caches")
|
||||
reset_distro_caches()
|
||||
return repo_config
|
||||
|
||||
|
||||
def get_kupfer_repo_names(local) -> list[str]:
|
||||
configs = get_repo_config()
|
||||
results = []
|
||||
for repo, repo_config in configs.repos.items():
|
||||
if not local and repo_config.local_only:
|
||||
continue
|
||||
results.append(repo)
|
||||
return results
|
||||
|
||||
|
||||
def get_RepoInfo(arch: Arch, repo_config: AbstrRepoConfig, default_url: Optional[str]) -> RepoInfo:
|
||||
url = repo_config.remote_url or default_url
|
||||
if isinstance(url, dict):
|
||||
if arch not in url and not default_url:
|
||||
raise Exception(f"Invalid repo config: Architecture {arch} not in remote_url mapping: {url}")
|
||||
url = url.get(arch, default_url)
|
||||
assert url
|
||||
return RepoInfo(
|
||||
url_template=get_kupfer_url(url),
|
||||
options=repo_config.get('options', None) or {},
|
||||
)
|
||||
|
||||
|
||||
def get_base_distro(arch: Arch, scan: bool = False, unsigned: bool = True, cache_db: bool = True) -> RemoteDistro:
|
||||
base_distros = get_repo_config().base_distros
|
||||
if base_distros is None or arch not in base_distros:
|
||||
base_distros = REPOS_CONFIG_DEFAULT.base_distros
|
||||
assert base_distros
|
||||
distro_config: BaseDistro
|
||||
distro_config = base_distros.get(arch) # type: ignore[assignment]
|
||||
repos = {}
|
||||
for repo, repo_config in distro_config.repos.items():
|
||||
if unsigned:
|
||||
repo_config['options'] = (repo_config.get('options', None) or {}) | {'SigLevel': 'Never'}
|
||||
repos[repo] = get_RepoInfo(arch, repo_config, default_url=distro_config.remote_url)
|
||||
|
||||
distro = RemoteDistro(arch=arch, repo_infos=repos, scan=False)
|
||||
if cache_db:
|
||||
for r in distro.repos.values():
|
||||
assert isinstance(r, RemoteRepo)
|
||||
r.cache_repo_db = True
|
||||
if scan:
|
||||
distro.scan()
|
||||
return distro
|
||||
|
||||
|
||||
def get_kupfer_distro(
|
||||
arch: Arch,
|
||||
location: DistroLocation,
|
||||
scan: bool = False,
|
||||
cache_db: bool = True,
|
||||
) -> Distro:
|
||||
global _kupfer_https, _kupfer_local, _kupfer_local_chroots
|
||||
cls: type[Distro]
|
||||
cache: Mapping[str, Distro]
|
||||
repo_config = get_repo_config()
|
||||
remote = False
|
||||
if location == DistroLocation.REMOTE:
|
||||
remote = True
|
||||
cache = _kupfer_https
|
||||
default_url = repo_config.remote_url or KUPFER_HTTPS
|
||||
repos = {repo: get_RepoInfo(arch, conf, default_url) for repo, conf in repo_config.repos.items() if not conf.local_only}
|
||||
cls = RemoteDistro
|
||||
elif location in [DistroLocation.CHROOT, DistroLocation.LOCAL]:
|
||||
if location == DistroLocation.CHROOT:
|
||||
cache = _kupfer_local_chroots
|
||||
pkgdir = CHROOT_PATHS['packages']
|
||||
else:
|
||||
assert location == DistroLocation.LOCAL
|
||||
cache = _kupfer_local
|
||||
pkgdir = config.get_path('packages')
|
||||
default_url = f"file://{pkgdir}/$arch/$repo"
|
||||
cls = LocalDistro
|
||||
repos = {}
|
||||
for name, repo in repo_config.repos.items():
|
||||
repo = repo.copy()
|
||||
repo.remote_url = default_url
|
||||
repos[name] = get_RepoInfo(arch, repo, default_url)
|
||||
else:
|
||||
raise Exception(f"Unknown distro location {location}")
|
||||
if cache is None:
|
||||
cache = {}
|
||||
assert arch
|
||||
assert isinstance(cache, dict)
|
||||
if arch not in cache or not cache[arch]:
|
||||
distro = cls(
|
||||
arch=arch,
|
||||
repo_infos=repos,
|
||||
scan=False,
|
||||
)
|
||||
assert isinstance(distro, (LocalDistro, RemoteDistro))
|
||||
cache[arch] = distro
|
||||
if remote and cache_db:
|
||||
assert isinstance(distro, RemoteDistro)
|
||||
for r in distro.repos.values():
|
||||
r.cache_repo_db = True
|
||||
if scan:
|
||||
distro.scan()
|
||||
return distro
|
||||
item: Distro = cache[arch]
|
||||
def get_kupfer_https(arch: Arch, scan: bool = False) -> Distro:
|
||||
global _kupfer_https
|
||||
if arch not in _kupfer_https or not _kupfer_https[arch]:
|
||||
_kupfer_https[arch] = get_kupfer(arch, KUPFER_HTTPS.replace('%branch%', config.file['pacman']['repo_branch']), scan)
|
||||
item = _kupfer_https[arch]
|
||||
if scan and not item.is_scanned():
|
||||
item.scan()
|
||||
return item
|
||||
|
||||
|
||||
def get_kupfer_https(arch: Arch, scan: bool = False, cache_db: bool = True) -> RemoteDistro:
|
||||
d = get_kupfer_distro(arch, location=DistroLocation.REMOTE, scan=scan, cache_db=cache_db)
|
||||
assert isinstance(d, RemoteDistro)
|
||||
return d
|
||||
|
||||
|
||||
def get_kupfer_local(arch: Optional[Arch] = None, scan: bool = False, in_chroot: bool = True) -> LocalDistro:
|
||||
arch = arch or config.runtime.arch
|
||||
assert arch
|
||||
location = DistroLocation.CHROOT if in_chroot else DistroLocation.LOCAL
|
||||
d = get_kupfer_distro(arch, location=location, scan=scan)
|
||||
assert isinstance(d, LocalDistro)
|
||||
return d
|
||||
def get_kupfer_local(arch: Optional[Arch] = None, in_chroot: bool = True, scan: bool = False) -> Distro:
|
||||
global _kupfer_local, _kupfer_local_chroots
|
||||
cache = _kupfer_local_chroots if in_chroot else _kupfer_local
|
||||
arch = arch or config.runtime['arch']
|
||||
if arch not in cache or not cache[arch]:
|
||||
dir = CHROOT_PATHS['packages'] if in_chroot else config.get_path('packages')
|
||||
cache[arch] = get_kupfer(arch, f"file://{dir}/$arch/$repo")
|
||||
item = cache[arch]
|
||||
if scan and not item.is_scanned():
|
||||
item.scan()
|
||||
return item
|
||||
|
||||
@@ -1,93 +1,33 @@
|
||||
import logging
|
||||
import os
|
||||
|
||||
from shutil import copyfileobj
|
||||
from typing import Optional, Union
|
||||
from urllib.request import urlopen
|
||||
|
||||
from exec.file import get_temp_dir, makedir
|
||||
from typing import Optional
|
||||
|
||||
|
||||
class PackageInfo:
|
||||
name: str
|
||||
version: str
|
||||
|
||||
|
||||
class BinaryPackage(PackageInfo):
|
||||
arch: str
|
||||
filename: str
|
||||
resolved_url: Optional[str]
|
||||
_desc: Optional[dict[str, Union[str, list[str]]]]
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
name: str,
|
||||
version: str,
|
||||
arch: str,
|
||||
filename: str,
|
||||
resolved_url: Optional[str] = None,
|
||||
resolved_url: str = None,
|
||||
):
|
||||
self.name = name
|
||||
self.version = version
|
||||
self.arch = arch
|
||||
self.filename = filename
|
||||
self.resolved_url = resolved_url
|
||||
|
||||
def __repr__(self):
|
||||
return f'{self.name}@{self.version}'
|
||||
|
||||
@classmethod
|
||||
def parse_desc(clss, desc_str: str, resolved_repo_url=None):
|
||||
@staticmethod
|
||||
def parse_desc(desc_str: str, resolved_url=None):
|
||||
"""Parses a desc file, returning a PackageInfo"""
|
||||
desc: dict[str, Union[str, list[str]]] = {}
|
||||
for segment in f'\n{desc_str}'.split('\n%'):
|
||||
if not segment.strip():
|
||||
continue
|
||||
key, elements = (e.strip() for e in segment.strip().split('%\n', 1))
|
||||
elements_split = elements.split('\n')
|
||||
desc[key] = elements if len(elements_split) == 1 else elements_split
|
||||
validated: dict[str, str] = {}
|
||||
for key in ['NAME', 'VERSION', 'ARCH', 'FILENAME']:
|
||||
assert key in desc
|
||||
value = desc[key]
|
||||
assert isinstance(value, str)
|
||||
validated[key] = value
|
||||
p = clss(
|
||||
name=validated['NAME'],
|
||||
version=validated['VERSION'],
|
||||
arch=validated['ARCH'],
|
||||
filename=validated['FILENAME'],
|
||||
resolved_url='/'.join([resolved_repo_url, validated['FILENAME']]),
|
||||
)
|
||||
p._desc = desc
|
||||
return p
|
||||
|
||||
def acquire(self) -> str:
|
||||
raise NotImplementedError()
|
||||
|
||||
|
||||
class LocalPackage(BinaryPackage):
|
||||
|
||||
def acquire(self) -> str:
|
||||
assert self.resolved_url and self.filename and self.filename in self.resolved_url
|
||||
path = f'{self.resolved_url.split("file://")[1]}'
|
||||
assert os.path.exists(path) or print(path)
|
||||
return path
|
||||
|
||||
|
||||
class RemotePackage(BinaryPackage):
|
||||
|
||||
def acquire(self, dest_dir: Optional[str] = None) -> str:
|
||||
assert self.resolved_url and '.pkg.tar.' in self.resolved_url
|
||||
url = f"{self.resolved_url}"
|
||||
assert url
|
||||
|
||||
dest_dir = dest_dir or get_temp_dir()
|
||||
makedir(dest_dir)
|
||||
dest_file_path = os.path.join(dest_dir, self.filename)
|
||||
|
||||
logging.info(f"Trying to download package {url}")
|
||||
with urlopen(url) as fsrc, open(dest_file_path, 'wb') as fdst:
|
||||
copyfileobj(fsrc, fdst)
|
||||
logging.info(f"{self.filename} downloaded from repos")
|
||||
return dest_file_path
|
||||
pruned_lines = ([line.strip() for line in desc_str.split('%') if line.strip()])
|
||||
desc = {}
|
||||
for key, value in zip(pruned_lines[0::2], pruned_lines[1::2]):
|
||||
desc[key.strip()] = value.strip()
|
||||
return PackageInfo(desc['NAME'], desc['VERSION'], desc['FILENAME'], resolved_url='/'.join([resolved_url, desc['FILENAME']]))
|
||||
|
||||
106
distro/repo.py
106
distro/repo.py
@@ -2,16 +2,10 @@ from copy import deepcopy
|
||||
import logging
|
||||
import os
|
||||
import tarfile
|
||||
import tempfile
|
||||
import urllib.request
|
||||
|
||||
from typing import Generic, TypeVar
|
||||
|
||||
from config.state import config
|
||||
from exec.file import get_temp_dir
|
||||
from utils import download_file
|
||||
|
||||
from .package import BinaryPackage, LocalPackage, RemotePackage
|
||||
|
||||
BinaryPackageType = TypeVar('BinaryPackageType', bound=BinaryPackage)
|
||||
from .package import PackageInfo
|
||||
|
||||
|
||||
def resolve_url(url_template, repo_name: str, arch: str):
|
||||
@@ -22,67 +16,47 @@ def resolve_url(url_template, repo_name: str, arch: str):
|
||||
|
||||
|
||||
class RepoInfo:
|
||||
options: dict[str, str]
|
||||
options: dict[str, str] = {}
|
||||
url_template: str
|
||||
|
||||
def __init__(self, url_template: str, options: dict[str, str] = {}):
|
||||
self.url_template = url_template
|
||||
self.options = {} | options
|
||||
self.options.update(options)
|
||||
|
||||
|
||||
class Repo(RepoInfo, Generic[BinaryPackageType]):
|
||||
class Repo(RepoInfo):
|
||||
name: str
|
||||
resolved_url: str
|
||||
arch: str
|
||||
packages: dict[str, BinaryPackageType]
|
||||
packages: dict[str, PackageInfo]
|
||||
remote: bool
|
||||
scanned: bool = False
|
||||
|
||||
def resolve_url(self) -> str:
|
||||
return resolve_url(self.url_template, repo_name=self.name, arch=self.arch)
|
||||
|
||||
def scan(self, allow_failure: bool = False) -> bool:
|
||||
failed = False
|
||||
def scan(self):
|
||||
self.resolved_url = self.resolve_url()
|
||||
self.remote = not self.resolved_url.startswith('file://')
|
||||
try:
|
||||
path = self.acquire_db_file()
|
||||
index = tarfile.open(path)
|
||||
except Exception as ex:
|
||||
if not allow_failure:
|
||||
raise ex
|
||||
logging.error(f"Repo {self.name}, {self.arch}: Error acquiring repo DB: {ex!r}")
|
||||
return False
|
||||
uri = f'{self.resolved_url}/{self.name}.db'
|
||||
path = ''
|
||||
if self.remote:
|
||||
logging.info(f'Downloading repo file from {uri}')
|
||||
with urllib.request.urlopen(uri) as request:
|
||||
fd, path = tempfile.mkstemp()
|
||||
with open(fd, 'wb') as writable:
|
||||
writable.write(request.read())
|
||||
else:
|
||||
path = uri.split('file://')[1]
|
||||
logging.debug(f'Parsing repo file at {path}')
|
||||
for node in index.getmembers():
|
||||
if os.path.basename(node.name) == 'desc':
|
||||
pkgname = os.path.dirname(node.name)
|
||||
logging.debug(f'Parsing desc file for {pkgname}')
|
||||
fd = index.extractfile(node)
|
||||
assert fd
|
||||
contents = fd.read().decode()
|
||||
try:
|
||||
pkg = self._parse_desc(contents)
|
||||
except Exception as ex:
|
||||
if not allow_failure:
|
||||
raise ex
|
||||
logging.error(f'Repo {self.name}, {self.arch}: Error parsing desc for "{pkgname}": {ex!r}')
|
||||
failed = True
|
||||
continue
|
||||
self.packages[pkg.name] = pkg
|
||||
if failed:
|
||||
return False
|
||||
with tarfile.open(path) as index:
|
||||
for node in index.getmembers():
|
||||
if os.path.basename(node.name) == 'desc':
|
||||
logging.debug(f'Parsing desc file for {os.path.dirname(node.name)}')
|
||||
pkg = PackageInfo.parse_desc(index.extractfile(node).read().decode(), self.resolved_url)
|
||||
self.packages[pkg.name] = pkg
|
||||
|
||||
self.scanned = True
|
||||
return True
|
||||
|
||||
def _parse_desc(self, desc_text: str): # can't annotate the type properly :(
|
||||
raise NotImplementedError()
|
||||
|
||||
def parse_desc(self, desc_text: str) -> BinaryPackageType:
|
||||
return self._parse_desc(desc_text)
|
||||
|
||||
def acquire_db_file(self) -> str:
|
||||
raise NotImplementedError
|
||||
|
||||
def __init__(self, name: str, url_template: str, arch: str, options={}, scan=False):
|
||||
self.packages = {}
|
||||
@@ -102,33 +76,3 @@ class Repo(RepoInfo, Generic[BinaryPackageType]):
|
||||
|
||||
def get_RepoInfo(self):
|
||||
return RepoInfo(url_template=self.url_template, options=self.options)
|
||||
|
||||
|
||||
class LocalRepo(Repo[LocalPackage]):
|
||||
|
||||
def _parse_desc(self, desc_text: str) -> LocalPackage:
|
||||
return LocalPackage.parse_desc(desc_text, resolved_repo_url=self.resolved_url)
|
||||
|
||||
def acquire_db_file(self) -> str:
|
||||
return f'{self.resolved_url}/{self.name}.db'.split('file://')[1]
|
||||
|
||||
|
||||
class RemoteRepo(Repo[RemotePackage]):
|
||||
cache_repo_db: bool
|
||||
|
||||
def __init__(self, *kargs, cache_repo_db: bool = False, **kwargs):
|
||||
self.cache_repo_db = cache_repo_db
|
||||
super().__init__(*kargs, **kwargs)
|
||||
|
||||
def _parse_desc(self, desc_text: str) -> RemotePackage:
|
||||
return RemotePackage.parse_desc(desc_text, resolved_repo_url=self.resolved_url)
|
||||
|
||||
def acquire_db_file(self) -> str:
|
||||
uri = f'{self.resolved_url}/{self.name}.db'
|
||||
logging.info(f'Downloading repo file from {uri}')
|
||||
assert self.arch and self.name, f"repo has incomplete information: {self.name=}, {self.arch=}"
|
||||
path = get_temp_dir() if not self.cache_repo_db else os.path.join(config.get_path('pacman'), 'repo_dbs', self.arch)
|
||||
os.makedirs(path, exist_ok=True)
|
||||
repo_file = f'{path}/{self.name}.tar.gz'
|
||||
download_file(repo_file, uri, update=True)
|
||||
return repo_file
|
||||
|
||||
@@ -1,170 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
import os
|
||||
import toml
|
||||
import yaml
|
||||
|
||||
from copy import deepcopy
|
||||
from typing import ClassVar, Optional, Mapping, Union
|
||||
|
||||
from config.state import config
|
||||
from constants import Arch, BASE_DISTROS, KUPFER_HTTPS, REPOS_CONFIG_FILE, REPOSITORIES
|
||||
from dictscheme import DictScheme, toml_inline_dicts, TomlPreserveInlineDictEncoder
|
||||
from utils import sha256sum
|
||||
|
||||
REPOS_KEY = 'repos'
|
||||
REMOTEURL_KEY = 'remote_url'
|
||||
LOCALONLY_KEY = 'local_only'
|
||||
OPTIONS_KEY = 'options'
|
||||
BASEDISTROS_KEY = 'base_distros'
|
||||
|
||||
_current_config: Optional[ReposConfigFile]
|
||||
|
||||
|
||||
class AbstrRepoConfig(DictScheme):
|
||||
options: Optional[dict[str, str]]
|
||||
_strip_hidden: ClassVar[bool] = True
|
||||
_sparse: ClassVar[bool] = True
|
||||
|
||||
|
||||
class BaseDistroRepo(AbstrRepoConfig):
|
||||
remote_url: Optional[str]
|
||||
|
||||
|
||||
class RepoConfig(AbstrRepoConfig):
|
||||
remote_url: Optional[Union[str, dict[Arch, str]]]
|
||||
local_only: Optional[bool]
|
||||
|
||||
|
||||
class BaseDistro(DictScheme):
|
||||
remote_url: Optional[str]
|
||||
repos: dict[str, BaseDistroRepo]
|
||||
|
||||
|
||||
class ReposConfigFile(DictScheme):
|
||||
remote_url: Optional[str]
|
||||
repos: dict[str, RepoConfig]
|
||||
base_distros: dict[Arch, BaseDistro]
|
||||
_path: Optional[str]
|
||||
_checksum: Optional[str]
|
||||
_strip_hidden: ClassVar[bool] = True
|
||||
_sparse: ClassVar[bool] = True
|
||||
|
||||
def __init__(self, d, **kwargs):
|
||||
super().__init__(d=d, **kwargs)
|
||||
self[REPOS_KEY] = self.get(REPOS_KEY, {})
|
||||
for repo_cls, defaults, repos, remote_url in [
|
||||
(RepoConfig, REPO_DEFAULTS, self.get(REPOS_KEY), d.get(REMOTEURL_KEY, None)),
|
||||
*[(BaseDistroRepo, BASE_DISTRO_DEFAULTS, _distro.repos, _distro.get(REMOTEURL_KEY, None)) for _distro in self.base_distros.values()],
|
||||
]:
|
||||
if repos is None:
|
||||
continue
|
||||
for name, repo in repos.items():
|
||||
_repo = dict(defaults | (repo or {})) # type: ignore[operator]
|
||||
if REMOTEURL_KEY not in repo and not repo.get(LOCALONLY_KEY, None):
|
||||
_repo[REMOTEURL_KEY] = remote_url
|
||||
repos[name] = repo_cls(_repo, **kwargs)
|
||||
|
||||
@staticmethod
|
||||
def parse_config(path: str) -> ReposConfigFile:
|
||||
try:
|
||||
with open(path, 'r') as fd:
|
||||
data = yaml.safe_load(fd)
|
||||
data['_path'] = path
|
||||
data['_checksum'] = sha256sum(path)
|
||||
return ReposConfigFile(data, validate=True)
|
||||
except Exception as ex:
|
||||
logging.error(f'Error parsing repos config at "{path}":\n{ex}')
|
||||
raise ex
|
||||
|
||||
def toToml(self, strip_hidden=None, sparse=None, encoder=TomlPreserveInlineDictEncoder()):
|
||||
d = self.toDict(strip_hidden=strip_hidden, sparse=sparse)
|
||||
for key in [REPOS_KEY]:
|
||||
if key not in d or not isinstance(d[key], Mapping):
|
||||
continue
|
||||
inline = {name: {k: toml_inline_dicts(v) for k, v in value.items()} for name, value in d[key].items()}
|
||||
logging.info(f"Inlined {key}: {inline}")
|
||||
d[key] = inline
|
||||
return toml.dumps(d, encoder=encoder)
|
||||
|
||||
|
||||
REPO_DEFAULTS = {
|
||||
LOCALONLY_KEY: None,
|
||||
REMOTEURL_KEY: None,
|
||||
OPTIONS_KEY: {
|
||||
'SigLevel': 'Never'
|
||||
},
|
||||
}
|
||||
|
||||
BASE_DISTRO_DEFAULTS = {
|
||||
REMOTEURL_KEY: None,
|
||||
OPTIONS_KEY: None,
|
||||
}
|
||||
|
||||
REPOS_CONFIG_DEFAULT = ReposConfigFile({
|
||||
'_path': '__DEFAULTS__',
|
||||
'_checksum': None,
|
||||
REMOTEURL_KEY: KUPFER_HTTPS,
|
||||
REPOS_KEY: {
|
||||
'kupfer_local': REPO_DEFAULTS | {
|
||||
LOCALONLY_KEY: True
|
||||
},
|
||||
**{
|
||||
r: deepcopy(REPO_DEFAULTS) for r in REPOSITORIES
|
||||
},
|
||||
},
|
||||
BASEDISTROS_KEY: {
|
||||
arch: {
|
||||
REMOTEURL_KEY: None,
|
||||
'repos': {
|
||||
k: {
|
||||
'remote_url': v
|
||||
} for k, v in arch_def['repos'].items()
|
||||
},
|
||||
} for arch, arch_def in BASE_DISTROS.items()
|
||||
},
|
||||
})
|
||||
|
||||
_current_config = None
|
||||
|
||||
|
||||
def get_repo_config(
|
||||
initialize_pkgbuilds: bool = False,
|
||||
repo_config_file: Optional[str] = None,
|
||||
) -> tuple[ReposConfigFile, bool]:
|
||||
global _current_config
|
||||
repo_config_file_default = os.path.join(config.get_path('pkgbuilds'), REPOS_CONFIG_FILE)
|
||||
if repo_config_file is None:
|
||||
repo_config_file_path = repo_config_file_default
|
||||
else:
|
||||
repo_config_file_path = repo_config_file
|
||||
config_exists = os.path.exists(repo_config_file_path)
|
||||
if not config_exists and _current_config is None:
|
||||
if initialize_pkgbuilds:
|
||||
from packages.pkgbuild import init_pkgbuilds
|
||||
init_pkgbuilds(update=False)
|
||||
return get_repo_config(initialize_pkgbuilds=False, repo_config_file=repo_config_file)
|
||||
if repo_config_file is not None:
|
||||
raise Exception(f"Requested repo config {repo_config_file} doesn't exist")
|
||||
logging.warning(f"{repo_config_file_path} doesn't exist, using built-in repo config defaults")
|
||||
_current_config = deepcopy(REPOS_CONFIG_DEFAULT)
|
||||
return _current_config, False
|
||||
changed = False
|
||||
if (not _current_config) or (config_exists and _current_config._checksum != sha256sum(repo_config_file_path)):
|
||||
if config_exists:
|
||||
conf = ReposConfigFile.parse_config(repo_config_file_path)
|
||||
else:
|
||||
conf = REPOS_CONFIG_DEFAULT
|
||||
changed = conf != (_current_config or {})
|
||||
if changed:
|
||||
_current_config = deepcopy(conf)
|
||||
else:
|
||||
logging.debug("Repo config: Cache hit!")
|
||||
assert _current_config
|
||||
return _current_config, changed
|
||||
|
||||
|
||||
def get_repos(**kwargs) -> list[RepoConfig]:
|
||||
config, _ = get_repo_config(**kwargs)
|
||||
return list(config.repos.values())
|
||||
3
docs/.gitignore
vendored
3
docs/.gitignore
vendored
@@ -2,6 +2,3 @@
|
||||
.doctrees
|
||||
html
|
||||
source/cli
|
||||
checkouts
|
||||
versions
|
||||
archived
|
||||
|
||||
@@ -1,72 +1,16 @@
|
||||
buildargs := -b dirhtml -aE source
|
||||
buildargs := -b dirhtml -aE source html
|
||||
|
||||
.PHONY: cleanbuild clean serve serve_versions versions versions_git versions_index
|
||||
.NOTINTERMEDIATE:
|
||||
.PRECIOUS: versions/index.html versions/%/index.html checkouts/%/docs/html/index.html archived/%.tar.gz
|
||||
|
||||
BRANCHES := main dev
|
||||
TAGS := $(shell git tag)
|
||||
FILTERTED_TAGS := $(foreach tag,$(TAGS),$(shell if [[ -n "$$(git log --max-count=1 --oneline "$(tag)" -- .)" ]]; then echo "$(tag)"; fi))
|
||||
VERSIONS := $(BRANCHES) $(FILTERTED_TAGS)
|
||||
.PHONY: cleanbuild clean
|
||||
|
||||
cleanbuild:
|
||||
@$(MAKE) clean
|
||||
@$(MAKE) html
|
||||
@make clean
|
||||
@make html
|
||||
|
||||
clean:
|
||||
rm -rf html source/cli .buildinfo .doctrees versions checkouts
|
||||
rm -rf html source/cli
|
||||
|
||||
html:
|
||||
sphinx-build $(SPHINXARGS) $(buildargs) html
|
||||
sphinx-build $(buildargs)
|
||||
|
||||
serve: html
|
||||
cd html && python -m http.server 9999
|
||||
|
||||
checkouts/%/docs/html/index.html:
|
||||
@mkdir -p checkouts
|
||||
@# use backslashed multi-line cmd because otherwise variables will be lost
|
||||
@branch="$$(echo "$(@D)" | sed 's|^checkouts/||g;s|/docs/html$$||g')" && \
|
||||
ref="$$branch" && \
|
||||
if ! git log --max-count=1 --oneline "$$branch" >/dev/null 2>/dev/null ; then \
|
||||
commit="$$(git ls-remote origin refs/{tags,heads}/"$$branch" | cut -f 1)" ; \
|
||||
[[ -n "$$commit" ]] && echo "found commit $$commit for $$branch" >&2 && \
|
||||
ref="$$commit" && git branch -f "$$branch" "$$ref" ; \
|
||||
fi && \
|
||||
[[ -n "$$(git log --max-count=1 --oneline "$$ref" -- .)" ]] || \
|
||||
(echo "ERROR: branch '$$branch' seems to have no docs/ dir, checked ref '$$ref'" >&2 && exit 1) && \
|
||||
checkout="checkouts/$$branch" && \
|
||||
ver="$$(echo "$$branch" | sed 's|^v\([0-9]\)|\1|g')" && \
|
||||
set -x && \
|
||||
([[ -e "$$checkout/.git" ]] || git clone .. "$$checkout" ) && \
|
||||
(! [[ -e "$$checkout/docs/source/conf.py" ]] || echo "version = '$$ver'" >> "$$checkout/docs/source/conf.py") && \
|
||||
$(MAKE) -C "$$checkout/docs" SPHINXARGS="-D version=$$ver"
|
||||
|
||||
archived/%.tar.gz: checkouts/%/docs/html/index.html
|
||||
mkdir -p archived
|
||||
tar -C "checkouts/$*/docs/html" -czf "$@" .
|
||||
|
||||
versions/%/index.html: archived/%.tar.gz
|
||||
@mkdir -p "$(@D)"
|
||||
@echo "working on version '$*'"
|
||||
tar -xf "archived/$*.tar.gz" -C "$(@D)"
|
||||
@# ensure index file exists and update its timestamp for Make's dependency detection
|
||||
[[ -e "$(@)" ]] && touch "$(@)"
|
||||
|
||||
versions/versions.css: versjon/versions.css
|
||||
@mkdir -p versions
|
||||
cp versjon/versions.css versions/
|
||||
|
||||
versions_git:
|
||||
@$(MAKE) $(patsubst %, versions/%/index.html, $(VERSIONS))
|
||||
|
||||
versions/index.html: $(sort $(wildcard versions/*/index.html))
|
||||
rm -rf versions/stable
|
||||
@cd versions && set -x && versjon --stable-version main --user_templates ../versjon
|
||||
@# ensure the global index.html exists and is newer than each version's index.html
|
||||
[[ -e "$(@)" ]] && touch "$(@)"
|
||||
|
||||
versions: versions_git versions/versions.css
|
||||
@$(MAKE) versions/index.html
|
||||
|
||||
serve_versions: versions/index.html
|
||||
cd versions && python -m http.server 9888
|
||||
(cd html && python -m http.server 9999)
|
||||
|
||||
@@ -1,5 +1,3 @@
|
||||
sphinx-click
|
||||
myst-parser
|
||||
# furo sphinx theme
|
||||
furo
|
||||
versjon<=2.3.0
|
||||
|
||||
@@ -1,18 +0,0 @@
|
||||
# CLI Interface
|
||||
|
||||
```{eval-rst}
|
||||
.. click:: main:cli
|
||||
:nested: none
|
||||
:prog: kupferbootstrap
|
||||
|
||||
```
|
||||
|
||||
## Commands
|
||||
|
||||
% generated by cmd.rst
|
||||
|
||||
```{toctree}
|
||||
:glob: true
|
||||
|
||||
cli/*
|
||||
```
|
||||
17
docs/source/cli.rst
Normal file
17
docs/source/cli.rst
Normal file
@@ -0,0 +1,17 @@
|
||||
#############
|
||||
CLI Interface
|
||||
#############
|
||||
|
||||
.. click:: main:cli
|
||||
:nested: none
|
||||
:prog: kupferbootstrap
|
||||
|
||||
|
||||
Commands
|
||||
========
|
||||
|
||||
.. generated by cmd.rst
|
||||
.. toctree::
|
||||
:glob:
|
||||
|
||||
cli/*
|
||||
@@ -1,23 +1,21 @@
|
||||
---
|
||||
nosearch: true
|
||||
orphan: true
|
||||
---
|
||||
:orphan:
|
||||
:nosearch:
|
||||
|
||||
only used to trigger builds of the submodule docs!
|
||||
|
||||
```{eval-rst}
|
||||
.. autosummary::
|
||||
:toctree: cli
|
||||
:template: command.rst
|
||||
:recursive:
|
||||
|
||||
binfmt
|
||||
boot
|
||||
cache
|
||||
chroot
|
||||
config
|
||||
devices
|
||||
flavours
|
||||
flash
|
||||
forwarding
|
||||
image
|
||||
net
|
||||
packages
|
||||
```
|
||||
ssh
|
||||
telnet
|
||||
|
||||
@@ -5,9 +5,7 @@ sys.path.insert(0, os.path.abspath('../..'))
|
||||
extensions = [
|
||||
'sphinx_click',
|
||||
'sphinx.ext.autosummary', # Create neat summary tables
|
||||
'myst_parser'
|
||||
]
|
||||
myst_all_links_external = True
|
||||
templates_path = ['templates']
|
||||
project = 'Kupfer👢strap'
|
||||
html_title = 'Kupferbootstrap'
|
||||
@@ -20,13 +18,4 @@ html_theme_options = {
|
||||
"globaltoc_collapse": True,
|
||||
"light_logo": "kupfer-black-transparent.svg",
|
||||
"dark_logo": "kupfer-white-transparent.svg",
|
||||
"light_css_variables": {
|
||||
"color-brand-primary": "#882a1a",
|
||||
"color-brand-content": "#882a1a",
|
||||
},
|
||||
"dark_css_variables": {
|
||||
"color-brand-primary": "#eba38d",
|
||||
"color-brand-content": "#eba38d",
|
||||
"color-problematic": "#ff7564",
|
||||
},
|
||||
}
|
||||
|
||||
134
docs/source/config.rst
Normal file
134
docs/source/config.rst
Normal file
@@ -0,0 +1,134 @@
|
||||
#############
|
||||
Configuration
|
||||
#############
|
||||
|
||||
|
||||
Kupferbootstrap uses `toml <https://en.wikipedia.org/wiki/TOML>`_ for its configuration file.
|
||||
|
||||
The file can either be edited manually or managed via the :doc:`cli/config` subcommand.
|
||||
|
||||
You can quickly generate a default config by running :code:`kupferbootstrap config init -N`.
|
||||
|
||||
|
||||
File Location
|
||||
#############
|
||||
|
||||
The configuration is stored in ``~/.config/kupfer/kupferbootstrap.toml``, where ``~`` is your user's home folder.
|
||||
|
||||
Kupferbootstrap needs to create a number of folders, e.g. to download ``PKGBUILDs.git`` and store binary packages.
|
||||
By default, all of those folders live inside ``~/.cache/kupfer/``.
|
||||
|
||||
See also the ``[paths]`` section in your config.
|
||||
|
||||
Sections
|
||||
########
|
||||
|
||||
A config file is split into sections like so:
|
||||
|
||||
.. code-block:: toml
|
||||
|
||||
[pkgbuilds]
|
||||
git_repo = "https://gitlab.com/kupfer/packages/pkgbuilds.git"
|
||||
git_branch = "dev"
|
||||
|
||||
[pacman]
|
||||
parallel_downloads = 3
|
||||
|
||||
|
||||
Here, we have two sections: ``pkgbuilds`` and ``pacman``.
|
||||
|
||||
Flavours
|
||||
########
|
||||
|
||||
Flavours are preset collections of software and functionality to enable,
|
||||
i.e. desktop environments like `Gnome <https://en.wikipedia.org/wiki/GNOME>`_
|
||||
and `Phosh <https://en.wikipedia.org/wiki/Phosh>`_.
|
||||
|
||||
|
||||
Profiles
|
||||
########
|
||||
|
||||
The last section and currently the only one with subsections is the ``profiles`` section.
|
||||
|
||||
A profile is the configuration of a specific device image. It specifies (amongst others):
|
||||
|
||||
* the device model
|
||||
* the flavour (desktop environment)
|
||||
* the host- and user name
|
||||
* extra packages to install
|
||||
|
||||
Using a profile's ``parent`` key,
|
||||
you can inherit settings from another profile.
|
||||
|
||||
This allows you to easily keep a number of slight variations of the same target profile around
|
||||
without the need to constantly modify your Kupferbootstrap configuration file.
|
||||
|
||||
You can easily create new profiles with
|
||||
`kupferbootstrap config profile init <../cli/config/#kupferbootstrap-config-profile-init>`_.
|
||||
|
||||
Here's an example:
|
||||
|
||||
.. code:: toml
|
||||
|
||||
[profiles]
|
||||
current = "graphical"
|
||||
|
||||
[profiles.default]
|
||||
parent = ""
|
||||
device = "oneplus-enchilada"
|
||||
flavour = "barebone"
|
||||
pkgs_include = [ "wget", "rsync", "nano", "tmux", "zsh", "pv", ]
|
||||
pkgs_exclude = []
|
||||
hostname = "kupferphone"
|
||||
username = "prawn"
|
||||
size_extra_mb = 800
|
||||
|
||||
[profiles.graphical]
|
||||
parent = "default"
|
||||
flavour = "phosh"
|
||||
pkgs_include = [ "firefox", "tilix", "gnome-tweaks" ]
|
||||
size_extra_mb = "+3000"
|
||||
|
||||
[profiles.hades]
|
||||
parent = "graphical"
|
||||
flavour = "phosh"
|
||||
hostname = "hades"
|
||||
|
||||
[profiles.recovery]
|
||||
parent = "default"
|
||||
flavour = "debug-shell"
|
||||
|
||||
[profiles.beryllium]
|
||||
parent = "graphical"
|
||||
device = "xiaomi-beryllium-ebbg"
|
||||
flavour = "gnome"
|
||||
hostname = "pocof1"
|
||||
|
||||
|
||||
|
||||
The ``current`` key in the ``profiles`` section controlls which profile gets used by Kupferbootstrap by default.
|
||||
|
||||
The first subsection (``profiles.default``) describes the `default` profile
|
||||
which gets created by `config init <../cli/config/#kupferbootstrap-config-init>`_.
|
||||
|
||||
Next, we have a `graphical` profile that defines a couple of graphical programs for all but the `recovery` profile,
|
||||
since that doesn't have a GUI.
|
||||
|
||||
``size_extra_mb``
|
||||
-----------------
|
||||
|
||||
Note how ``size_extra_mb`` can either be a plain integer (``800``) or a string,
|
||||
optionally leading with a plus sign (``+3000``),
|
||||
which instructs Kupferbootstrap to add the value to the parent profile's ``size_extra_mb``.
|
||||
|
||||
``pkgs_include`` / ``pkgs_exclude``
|
||||
-----------------------------------
|
||||
|
||||
Like ``size_extra_mb``, ``pkgs_include`` will be merged with the parent profile's ``pkgs_include``.
|
||||
|
||||
To exclude unwanted packages from being inherited from a parent profile, use ``pkgs_exclude`` in the child profile.
|
||||
|
||||
.. hint::
|
||||
``pkgs_exclude`` has no influence on Pacman's dependency resolution.
|
||||
It only blocks packages during image build that would usually be explicitly installed
|
||||
due to being listed in a parent profile or the selected flavour.
|
||||
@@ -1,11 +0,0 @@
|
||||
# Kupferbootstrap Documentation
|
||||
|
||||
This is the documentation for [Kupferbootstrap](https://gitlab.com/kupfer/kupferbootstrap),
|
||||
a tool to build and flash packages and images for the [Kupfer](https://gitlab.com/kupfer/) mobile Linux distro.
|
||||
|
||||
## Documentation pages
|
||||
|
||||
```{toctree}
|
||||
usage/index
|
||||
cli
|
||||
```
|
||||
16
docs/source/index.rst
Normal file
16
docs/source/index.rst
Normal file
@@ -0,0 +1,16 @@
|
||||
#############################
|
||||
Kupferbootstrap Documentation
|
||||
#############################
|
||||
|
||||
This is the documentation for `Kupferbootstrap <https://gitlab.com/kupfer/kupferbootstrap>`_,
|
||||
a tool to build and flash packages and images for the `Kupfer <https://gitlab.com/kupfer/>`_ mobile Linux distro.
|
||||
|
||||
|
||||
Documentation pages
|
||||
===================
|
||||
|
||||
.. toctree::
|
||||
|
||||
install
|
||||
config
|
||||
cli
|
||||
35
docs/source/install.rst
Normal file
35
docs/source/install.rst
Normal file
@@ -0,0 +1,35 @@
|
||||
############
|
||||
Installation
|
||||
############
|
||||
|
||||
|
||||
#.
|
||||
Install Python 3, Docker, and git.
|
||||
|
||||
On Arch: ``pacman -S python docker git --needed --noconfirm``
|
||||
|
||||
.. Hint::
|
||||
After installing Docker you will have to add your user to the ``docker`` group:
|
||||
|
||||
``sudo usermod -aG docker "$(whoami)"``
|
||||
|
||||
Then restart your desktop session for the new group to take effect.
|
||||
|
||||
#. Pick which Kupferbootstrap branch to clone: usually either ``main`` or ``dev``
|
||||
|
||||
#. Clone the repository: ``git clone -b INSERT_BRANCHNAME_HERE https://gitlab.com/kupfer/kupferbootstrap``
|
||||
|
||||
#. Change into the folder: ``cd kupferbootstrap``
|
||||
|
||||
#.
|
||||
Install python dependencies: ``pip3 install -r requirements.txt``
|
||||
|
||||
.. Note::
|
||||
Most of our python dependencies are available as distro packages on most distros,
|
||||
sadly it's incomplete on Arch.
|
||||
|
||||
See ``requirements.txt`` for the list of required python packages.
|
||||
|
||||
#. Symlink ``kupferbootstrap`` into your ``$PATH``: ``sudo ln -s "$(pwd)/bin/kupferbootstrap" /usr/local/bin/``
|
||||
|
||||
#. You should now be able to run ``kupferbootstrap --help``!
|
||||
@@ -1,6 +1,5 @@
|
||||
.. title: {{fullname}}
|
||||
|
||||
.. click:: {% if fullname == 'main' %}main:cli{% else %}{{fullname}}.cli:cmd_{{fullname}}{% endif %}
|
||||
.. click:: {% if fullname == 'main' %}main:cli{% else %}{{fullname}}:cmd_{{fullname}}{% endif %}
|
||||
:prog: kupferbootstrap {{fullname}}
|
||||
:nested: full
|
||||
|
||||
|
||||
@@ -1,125 +0,0 @@
|
||||
# Configuration
|
||||
|
||||
Kupferbootstrap uses [toml](https://en.wikipedia.org/wiki/TOML) for its configuration file.
|
||||
|
||||
The file can either be edited manually or managed via the [`kupferbootstrap config`](../../cli/config) subcommand.
|
||||
|
||||
```{hint}
|
||||
You can quickly generate a default config by running {code}`kupferbootstrap config init -N`.
|
||||
|
||||
For an interactive dialogue, omit the `-N`.
|
||||
```
|
||||
|
||||
## File Location
|
||||
|
||||
The configuration is stored in `~/.config/kupfer/kupferbootstrap.toml`, where `~` is your user's home folder.
|
||||
|
||||
Kupferbootstrap needs to create a number of folders, e.g. to download `PKGBUILDs.git` and store binary packages.
|
||||
By default, all of those folders live inside `~/.cache/kupfer/`.
|
||||
|
||||
See also the `[paths]` section in your config.
|
||||
|
||||
## Sections
|
||||
|
||||
A config file is split into sections like so:
|
||||
|
||||
```toml
|
||||
[pkgbuilds]
|
||||
git_repo = "https://gitlab.com/kupfer/packages/pkgbuilds.git"
|
||||
git_branch = "dev"
|
||||
|
||||
[pacman]
|
||||
parallel_downloads = 3
|
||||
```
|
||||
|
||||
Here, we have two sections: `pkgbuilds` and `pacman`.
|
||||
|
||||
## Flavours
|
||||
|
||||
Flavours are preset collections of software and functionality to enable,
|
||||
i.e. desktop environments like [Gnome](https://en.wikipedia.org/wiki/GNOME)
|
||||
and [Phosh](https://en.wikipedia.org/wiki/Phosh).
|
||||
|
||||
## Profiles
|
||||
|
||||
The last section and currently the only one with subsections is the `profiles` section.
|
||||
|
||||
A profile is the configuration of a specific device image. It specifies (amongst others):
|
||||
|
||||
- the device model
|
||||
- the flavour (desktop environment)
|
||||
- the host- and user name
|
||||
- extra packages to install
|
||||
|
||||
Using a profile's `parent` key,
|
||||
you can inherit settings from another profile.
|
||||
|
||||
This allows you to easily keep a number of slight variations of the same target profile around
|
||||
without the need to constantly modify your Kupferbootstrap configuration file.
|
||||
|
||||
You can easily create new profiles with
|
||||
[kupferbootstrap config profile init](../../cli/config/#kupferbootstrap-config-profile-init).
|
||||
|
||||
Here's an example:
|
||||
|
||||
```toml
|
||||
[profiles]
|
||||
current = "graphical"
|
||||
|
||||
[profiles.default]
|
||||
parent = ""
|
||||
device = "sdm845-oneplus-enchilada"
|
||||
flavour = "barebone"
|
||||
pkgs_include = [ "wget", "rsync", "nano", "tmux", "zsh", "pv", ]
|
||||
pkgs_exclude = []
|
||||
hostname = "kupferphone"
|
||||
username = "prawn"
|
||||
size_extra_mb = 800
|
||||
|
||||
[profiles.graphical]
|
||||
parent = "default"
|
||||
flavour = "phosh"
|
||||
pkgs_include = [ "firefox", "tilix", "gnome-tweaks" ]
|
||||
size_extra_mb = "+3000"
|
||||
|
||||
[profiles.hades]
|
||||
parent = "graphical"
|
||||
flavour = "phosh"
|
||||
hostname = "hades"
|
||||
|
||||
[profiles.recovery]
|
||||
parent = "default"
|
||||
flavour = "debug-shell"
|
||||
|
||||
[profiles.beryllium]
|
||||
parent = "graphical"
|
||||
device = "sdm845-xiaomi-beryllium-ebbg"
|
||||
flavour = "gnome"
|
||||
hostname = "pocof1"
|
||||
```
|
||||
|
||||
The `current` key in the `profiles` section controlls which profile gets used by Kupferbootstrap by default.
|
||||
|
||||
The first subsection (`profiles.default`) describes the `default` profile
|
||||
which gets created by [`kupferbootstrap config init`](../../cli/config/#kupferbootstrap-config-init).
|
||||
|
||||
Next, we have a `graphical` profile that defines a couple of graphical programs for all but the `recovery` profile,
|
||||
since that doesn't have a GUI.
|
||||
|
||||
### `size_extra_mb`
|
||||
|
||||
Note how `size_extra_mb` can either be a plain integer (`800`) or a string,
|
||||
optionally leading with a plus sign (`+3000`),
|
||||
which instructs Kupferbootstrap to add the value to the parent profile's `size_extra_mb`.
|
||||
|
||||
### `pkgs_include` / `pkgs_exclude`
|
||||
|
||||
Like `size_extra_mb`, `pkgs_include` will be merged with the parent profile's `pkgs_include`.
|
||||
|
||||
To exclude unwanted packages from being inherited from a parent profile, use `pkgs_exclude` in the child profile.
|
||||
|
||||
```{hint}
|
||||
`pkgs_exclude` has no influence on Pacman's dependency resolution.
|
||||
It only blocks packages during image build that would usually be explicitly installed
|
||||
due to being listed in a parent profile or the selected flavour.
|
||||
```
|
||||
@@ -1,39 +0,0 @@
|
||||
# FAQ
|
||||
|
||||
|
||||
```{contents} Table of Contents
|
||||
:class: this-will-duplicate-information-and-it-is-still-useful-here
|
||||
:depth: 3
|
||||
```
|
||||
|
||||
|
||||
## Which devices are currently supported?
|
||||
|
||||
Currently very few!
|
||||
See [the `devices` repo](https://gitlab.com/kupfer/packages/pkgbuilds/-/tree/dev/device). We use the same codenames as [postmarketOS](https://wiki.postmarketos.org/wiki/Devices) (although we prefix them with the SoC)
|
||||
|
||||
|
||||
## How to port a new device or package?
|
||||
|
||||
See [Porting](../porting)
|
||||
|
||||
## How to build a specific package
|
||||
|
||||
See also: The full [`kupferbootstrap packages build` docs](../../cli/packages#kupferbootstrap-packages-build)
|
||||
|
||||
### Example
|
||||
|
||||
For rebuilding `kupfer-config` and `crossdirect`, defaulting to your device's architecture
|
||||
|
||||
```sh
|
||||
kupferbootstrap packages build [--force] [--arch $target_arch] kupfer-config crossdirect
|
||||
```
|
||||
|
||||
|
||||
### By package path
|
||||
You can also use the a path snippet (`$repo/$pkgbase`) to the PKGBUILD folder as seen inside your pkgbuilds.git:
|
||||
|
||||
```sh
|
||||
kupferbootstrap packages build [--force] main/kupfer-config cross/crossdirect
|
||||
```
|
||||
|
||||
@@ -1,9 +0,0 @@
|
||||
# Usage
|
||||
|
||||
```{toctree}
|
||||
quickstart
|
||||
faq
|
||||
install
|
||||
config
|
||||
porting
|
||||
```
|
||||
@@ -1,32 +0,0 @@
|
||||
# Installation
|
||||
|
||||
1. Install Python 3, Docker, and git.
|
||||
|
||||
On Arch: `pacman -S python docker git --needed --noconfirm`
|
||||
|
||||
```{Hint}
|
||||
After installing Docker you will have to add your user to the `docker` group:
|
||||
|
||||
`sudo usermod -aG docker "$(whoami)"`
|
||||
|
||||
Then restart your desktop session for the new group to take effect.
|
||||
```
|
||||
|
||||
2. Pick which Kupferbootstrap branch to clone: usually either `main` or `dev`
|
||||
|
||||
3. Clone the repository: `git clone -b INSERT_BRANCHNAME_HERE https://gitlab.com/kupfer/kupferbootstrap`
|
||||
|
||||
4. Change into the folder: `cd kupferbootstrap`
|
||||
|
||||
5. Install python dependencies: `pip3 install -r requirements.txt`
|
||||
|
||||
```{Note}
|
||||
Most of our python dependencies are available as distro packages on most distros,
|
||||
sadly it's incomplete on Arch.
|
||||
|
||||
See `requirements.txt` for the list of required python packages.
|
||||
```
|
||||
|
||||
6. Symlink `kupferbootstrap` into your `$PATH`: `sudo ln -s "$(pwd)/bin/kupferbootstrap" /usr/local/bin/`
|
||||
|
||||
7. You should now be able to run `kupferbootstrap --help`!
|
||||
@@ -1,94 +0,0 @@
|
||||
# Porting
|
||||
## Porting devices
|
||||
|
||||
### Homework
|
||||
Before you can get started porting a device, you'll need to do some research:
|
||||
|
||||
1. Familiarize yourself with git basics.
|
||||
1. Familiarize yourself with Arch Linux packaging, i.e. `PKGBUILD`s and `makepkg`
|
||||
1. Familiarize yourself with the postmarketOS port of the device.
|
||||
```{warning}
|
||||
If there is no postmarketOS port yet, you'll probably need to get deep into kernel development.
|
||||
We suggest [starting with a port to pmOS](https://wiki.postmarketos.org/wiki/Porting_to_a_new_device) then, especially if you're not familiar with the process already.
|
||||
```
|
||||
|
||||
### Porting
|
||||
1. Navigate to your pkgbuilds checkout
|
||||
1. Follow the [general package porting guidelines](#porting-packages) to create a device-, kernel- and probably also a firmware-package for the device and SoC. Usually this roughly means porting the postmarketOS APKBUILDs to our PKGBUILD scheme.
|
||||
You can get inspiration by comparing existing Kupfer ports (e.g. one of the SDM845 devices) to the [postmarketOS packages](https://gitlab.com/postmarketOS/pmaports/-/tree/master/device) for that device.
|
||||
Usually you should start out by copying and then customizing the Kupfer packages for a device that's as similar to yours as possible, i.e. uses the same or a related SoC, if something like that is already available in Kupfer.
|
||||
```{hint} Package Repos:
|
||||
Device packages belong into `device/`, kernels into `linux/` and firmware into `firmware/`.
|
||||
```
|
||||
1. When submitting your MR, please include some information:
|
||||
- what you have found to be working, broken, and not tested (and why)
|
||||
- any necessary instructions for testing
|
||||
- whether you'd be willing to maintain the device long-term (test kernel upgrades, submit device package updates, etc.)
|
||||
|
||||
|
||||
### Gotchas
|
||||
|
||||
Please be aware of these gotchas:
|
||||
- As of now, Kupfer only really supports platforms using Android's `aboot` bootloader, i.e. ex-Android phones. In order to support other boot modes (e.g. uboot on the Librem5 and Pine devices), we'll need to port and switch to postmarketOS's [boot-deploy](https://gitlab.com/postmarketOS/boot-deploy) first and add support for EFI setups to Kupferbootstrap.
|
||||
|
||||
|
||||
## Porting packages
|
||||
|
||||
### Homework
|
||||
Before you can get started, you'll need to do some research:
|
||||
|
||||
1. Familiarize yourself with git basics.
|
||||
1. Familiarize yourself with Arch Linux packaging, i.e. `PKGBUILD`s and `makepkg`
|
||||
|
||||
### Development
|
||||
|
||||
```{warning}
|
||||
Throughout the process, use git to version your changes.
|
||||
- Don't procrastinate using git or committing until you're "done" or "have got something working", you'll regret it.
|
||||
- Don't worry about a "clean" git history while you're developing; we can squash it up later.
|
||||
- \[Force-]Push your changes regularly, just like committing. Don't wait for perfection.
|
||||
```
|
||||
1. Create a new git branch for your package locally.
|
||||
```{hint}
|
||||
It might be a good ideaa to get into the habit of prefixing branch names with \[a part of] your username and a slash like so:
|
||||
`myNickname/myFeatureNme`
|
||||
This makes it easier to work in the same remote repo with multiple people.
|
||||
```
|
||||
1.
|
||||
```{note}
|
||||
The pkgbuilds git repo contains multiple package repositories, represented by folders at the top level (`main`, `cross`, `phosh`, etc.).
|
||||
```
|
||||
Try to choose a sensible package repo for your new packages and create new folders for each `pkgbase` inside the repo folder.
|
||||
1. Navigate into the folder of the new package and create a new `PKGBUILD`; fill it with life!
|
||||
1. **`_mode`**: Add the build mode at the top of the PKGBUILD.
|
||||
```{hint}
|
||||
If you're unsure what to pick, go with `_mode=host`. It'll use `crossdirect` to get speeds close to proper cross-compiling.
|
||||
```
|
||||
This determines whether it's built using a foreign-arch chroot (`_mode=host`) executed with qemu-user, or using real cross-compilation (`_mode=cross`) from a host-architecture chroot, but the package's build tooling has to specifically support the latter, so it's mostly useful for kernels and uncompiled packages.
|
||||
1. **`_nodeps`**: (Optional) If your package doesn't require its listed dependencies to build
|
||||
(usually because you're packaging a meta-package or only configs or scripts)
|
||||
you can add `_nodeps=true` as the next line after the `_mode=` line to speed up packaging.
|
||||
`makedeps` are still installed anyway.
|
||||
1. Test building it with `kupferbootstrap packages build $pkgbname`
|
||||
1. For any files and git repos downloaded by your PKGBUILD,
|
||||
add them to a new `.gitignore` file in the same directory as your `PKGBUILD`.
|
||||
```{hint}
|
||||
Don't forget to `git add` the new `.gitignore` file!
|
||||
```
|
||||
1. Run `kupferbootstrap packages check` to make sure the formatting for your PKGBUILDs is okay.
|
||||
```{warning}
|
||||
This is **not** optional. MRs with failing CI will **not** be merged.
|
||||
```
|
||||
|
||||
### Pushing
|
||||
1. Fork the Kupfer pkgbuilds repo on Gitlab using the Fork button
|
||||
1. Add your fork's **SSH** URI to your local git repo as a **new remote**: `git remote add fork git@gitlab...`
|
||||
1. `git push -u fork $branchname` it
|
||||
|
||||
### Submitting the MR
|
||||
When you're ready, open a Merge Request on the Kupfer pkgbuilds repo.
|
||||
|
||||
```{hint}
|
||||
Prefix the MR title with `Draft: ` to indicate a Work In Progress state.
|
||||
```
|
||||
|
||||
@@ -1,9 +0,0 @@
|
||||
# Quickstart
|
||||
|
||||
1. [Install](../install) Kupferbootstrap
|
||||
1. [Configure](../config) it: `kuperbootstrap config init`
|
||||
1. [Update your PKGBUILDs + SRCINFO cache](../../cli/packages#kupferbootstrap-packages-update): `kupferbootstrap packages update`
|
||||
1. [Build an image](../../cli/image#kupferbootstrap-image-build): `kupferbootstrap image build`
|
||||
1. [Flash the image](../../cli/image#kupferbootstrap-image-flash): `kupferbootstrap image flash abootimg && kupferbootstrap image flash full userdata`
|
||||
|
||||
See also: [Frequently Asked Questions](../faq)
|
||||
@@ -1,58 +0,0 @@
|
||||
{# FORMAT_VERSION #}
|
||||
|
||||
{% macro format_version(version) %}
|
||||
{% if page in version.html_files %}
|
||||
{% set version_path = page_root + docs_path[version.name] + "/" + page %}
|
||||
{% else %}
|
||||
{% set version_path = page_root + docs_path[version.name] %}
|
||||
{% endif %}
|
||||
{% if current == version.name %}
|
||||
<strong>
|
||||
<dd><a href="{{ version_path }}">{{ version.name }}</a></dd>
|
||||
</strong>
|
||||
{% else %}
|
||||
<dd><a href="{{ version_path }}">{{ version.name }}</a></dd>
|
||||
{% endif %}
|
||||
{% endmacro %}
|
||||
|
||||
<div id="versjon-overlay">
|
||||
<button type="button" class="versjon">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" id="branch-icon" class="ionicon" viewBox="0 0 512 512">
|
||||
<!-- Taken from Ionic, MIT licensed. Copyright (c) 2015-present Ionic (http://ionic.io/) -->
|
||||
<title>Git Branch</title><circle cx="160" cy="96" r="48" fill="none" stroke="currentColor" stroke-linecap="round" stroke-linejoin="round" stroke-width="32"/><circle cx="160" cy="416" r="48" fill="none" stroke="currentColor" stroke-linecap="round" stroke-linejoin="round" stroke-width="32"/><path fill="none" stroke="currentColor" stroke-linecap="round" stroke-linejoin="round" stroke-width="32" d="M160 368V144"/><circle cx="352" cy="160" r="48" fill="none" stroke="currentColor" stroke-linecap="round" stroke-linejoin="round" stroke-width="32"/><path d="M352 208c0 128-192 48-192 160" fill="none" stroke="currentColor" stroke-linecap="round" stroke-linejoin="round" stroke-width="32"/>
|
||||
</svg>
|
||||
Version: {{current}}
|
||||
</button>
|
||||
<div class="versjon-content">
|
||||
<div class="versjon-content-inner">
|
||||
<dl>
|
||||
<dl>
|
||||
<dt>Branches</dt>
|
||||
{% for version in other %}
|
||||
{{ format_version(version) | indent(16) }}
|
||||
{% endfor %}
|
||||
</dl>
|
||||
<dt>Versions</dt>
|
||||
{% for version in semver %}
|
||||
{{ format_version(version) | indent(16) }}
|
||||
{% endfor %}
|
||||
</dl>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<script>
|
||||
var coll = document.getElementsByClassName("versjon");
|
||||
var i;
|
||||
|
||||
for (i = 0; i < coll.length; i++) {
|
||||
coll[i].addEventListener("click", function () {
|
||||
this.classList.toggle("active");
|
||||
var content = this.nextElementSibling;
|
||||
if (content.style.maxHeight) {
|
||||
content.style.maxHeight = null;
|
||||
} else {
|
||||
content.style.maxHeight = content.scrollHeight + "px";
|
||||
}
|
||||
});
|
||||
}
|
||||
</script>
|
||||
@@ -1 +0,0 @@
|
||||
<link href="{{ page_root }}versions.css" rel="stylesheet" type="text/css">
|
||||
@@ -1,11 +0,0 @@
|
||||
{% if stable and (stable.name|default("")) != current %}
|
||||
{% if page in stable.html_files %}
|
||||
{% set stable_path = page_root + docs_path[stable.name] + "/" + page %}
|
||||
{% else %}
|
||||
{% set stable_path = page_root + docs_path[stable.name] %}
|
||||
{% endif %}
|
||||
<p class="versjon-{% if is_semver %}old{% else %}dev{% endif %}-warning">
|
||||
<strong>Warning:</strong> These docs are for version <b>{{current}}</b>. The docs for the latest stable version are at
|
||||
<b> <a href="{{ stable_path }}">{{ stable.name }}</a> </b>.
|
||||
</p>
|
||||
{% endif %}
|
||||
@@ -1,99 +0,0 @@
|
||||
.versjon {
|
||||
cursor: pointer;
|
||||
padding: 10px;
|
||||
width: 100%;
|
||||
border: none;
|
||||
text-align: left;
|
||||
outline: none;
|
||||
font-size: 15px;
|
||||
background: var(--color-code-background);
|
||||
color: var(--color-code-foreground);
|
||||
transition: background-color 0.1s linear;
|
||||
}
|
||||
|
||||
.versjon:hover {
|
||||
background-color: var(--color-highlighted-background);
|
||||
}
|
||||
|
||||
.versjon:after {
|
||||
content: '\002B';
|
||||
font-weight: bold;
|
||||
float: right;
|
||||
margin-left: 5px;
|
||||
}
|
||||
|
||||
.versjon:active:after {
|
||||
content: "\2212";
|
||||
}
|
||||
|
||||
.versjon-content {
|
||||
max-height: 0;
|
||||
overflow: hidden;
|
||||
transition: max-height 0.2s ease-out;
|
||||
}
|
||||
|
||||
.versjon-content-inner {
|
||||
padding: 10px 18px
|
||||
}
|
||||
|
||||
#versjon-overlay {
|
||||
position: fixed;
|
||||
z-index: 100;
|
||||
bottom: 0px;
|
||||
right: 0px;
|
||||
width: 250px;
|
||||
background: var(--color-code-background);
|
||||
max-height: 100%;
|
||||
overflow: scroll;
|
||||
}
|
||||
|
||||
p.versjon-old-warning {
|
||||
margin: 10px 0;
|
||||
padding: 5px 10px;
|
||||
border-radius: 4px;
|
||||
|
||||
letter-spacing: 1px;
|
||||
color: #fff;
|
||||
text-shadow: 0 0 2px #000;
|
||||
text-align: center;
|
||||
|
||||
background: #d40 repeating-linear-gradient(135deg,
|
||||
transparent,
|
||||
transparent 56px,
|
||||
rgba(255, 255, 255, 0.2) 56px,
|
||||
rgba(255, 255, 255, 0.2) 112px);
|
||||
}
|
||||
|
||||
p.versjon-old-warning a {
|
||||
color: #fff;
|
||||
border-color: #fff;
|
||||
}
|
||||
|
||||
p.versjon-dev-warning {
|
||||
margin: 10px 0;
|
||||
padding: 5px 10px;
|
||||
border-radius: 4px;
|
||||
|
||||
letter-spacing: 1px;
|
||||
color: #fff;
|
||||
text-shadow: 0 0 2px #000;
|
||||
text-align: center;
|
||||
|
||||
background: #E67300 repeating-linear-gradient(135deg,
|
||||
transparent,
|
||||
transparent 56px,
|
||||
rgba(255, 255, 255, 0.2) 56px,
|
||||
rgba(255, 255, 255, 0.2) 112px);
|
||||
}
|
||||
|
||||
p.versjon-dev-warning a {
|
||||
color: #fff;
|
||||
border-color: #fff;
|
||||
}
|
||||
|
||||
#branch-icon {
|
||||
width: 1em;
|
||||
height: 1em;
|
||||
background-size: contain;
|
||||
background-repeat: no-repeat;
|
||||
}
|
||||
35
exec/cmd.py
35
exec/cmd.py
@@ -3,23 +3,17 @@ import os
|
||||
import pwd
|
||||
import subprocess
|
||||
|
||||
from subprocess import CompletedProcess # make it easy for users of this module
|
||||
from shlex import quote as shell_quote
|
||||
from typing import IO, Optional, Union
|
||||
|
||||
from typehelpers import TypeAlias
|
||||
from typing import Optional, Union, TypeAlias
|
||||
|
||||
ElevationMethod: TypeAlias = str
|
||||
|
||||
FileDescriptor: TypeAlias = Union[int, IO]
|
||||
|
||||
# as long as **only** sudo is supported, hardcode the default into ELEVATION_METHOD_DEFAULT.
|
||||
# when other methods are added, all mentions of ELEVATION_METHOD_DEFAULT should be replaced by a config key.
|
||||
|
||||
ELEVATION_METHOD_DEFAULT = "sudo"
|
||||
|
||||
ELEVATION_METHODS: dict[ElevationMethod, list[str]] = {
|
||||
"none": [],
|
||||
"sudo": ['sudo', '--'],
|
||||
}
|
||||
|
||||
@@ -40,8 +34,6 @@ def flatten_shell_script(script: Union[list[str], str], shell_quote_items: bool
|
||||
cmds = script
|
||||
if shell_quote_items:
|
||||
cmds = [shell_quote(i) for i in cmds]
|
||||
else:
|
||||
cmds = [(i if i != '' else '""') for i in cmds]
|
||||
script = " ".join(cmds)
|
||||
if wrap_in_shell_quote:
|
||||
script = shell_quote(script)
|
||||
@@ -55,18 +47,15 @@ def wrap_in_bash(cmd: Union[list[str], str], flatten_result=True) -> Union[str,
|
||||
return res
|
||||
|
||||
|
||||
def generate_cmd_elevated(cmd: Union[list[str], str], elevation_method: ElevationMethod):
|
||||
def generate_cmd_elevated(cmd: list[str], elevation_method: ElevationMethod):
|
||||
"wraps `cmd` in the necessary commands to escalate, e.g. `['sudo', '--', cmd]`."
|
||||
if isinstance(cmd, str):
|
||||
cmd = wrap_in_bash(cmd, flatten_result=False)
|
||||
assert not isinstance(cmd, str) # typhints cmd as list[str]
|
||||
if elevation_method not in ELEVATION_METHODS:
|
||||
raise Exception(f"Unknown elevation method {elevation_method}")
|
||||
return ELEVATION_METHODS[elevation_method] + cmd
|
||||
|
||||
|
||||
def generate_cmd_su(
|
||||
cmd: Union[list[str], str],
|
||||
cmd: list[str],
|
||||
switch_user: str,
|
||||
elevation_method: Optional[ElevationMethod] = None,
|
||||
force_su: bool = False,
|
||||
@@ -95,9 +84,9 @@ def run_cmd(
|
||||
cwd: Optional[str] = None,
|
||||
switch_user: Optional[str] = None,
|
||||
elevation_method: Optional[ElevationMethod] = None,
|
||||
stdout: Optional[FileDescriptor] = None,
|
||||
stderr: Optional[FileDescriptor] = None,
|
||||
) -> Union[CompletedProcess, int]:
|
||||
stdout: Optional[int] = None,
|
||||
stderr=None,
|
||||
) -> Union[subprocess.CompletedProcess, int]:
|
||||
"execute `script` as `switch_user`, elevating and su'ing as necessary"
|
||||
kwargs: dict = {}
|
||||
env_cmd = []
|
||||
@@ -105,12 +94,10 @@ def run_cmd(
|
||||
env_cmd = generate_env_cmd(env)
|
||||
kwargs['env'] = env
|
||||
if not attach_tty:
|
||||
if (stdout, stderr) == (None, None):
|
||||
kwargs['capture_output'] = capture_output
|
||||
else:
|
||||
for name, fd in {'stdout': stdout, 'stderr': stderr}.items():
|
||||
if fd is not None:
|
||||
kwargs[name] = fd
|
||||
kwargs |= {'stdout': stdout} if stdout else {'capture_output': capture_output}
|
||||
if stderr:
|
||||
kwargs['stderr'] = stderr
|
||||
|
||||
script = flatten_shell_script(script)
|
||||
if cwd:
|
||||
kwargs['cwd'] = cwd
|
||||
@@ -118,7 +105,7 @@ def run_cmd(
|
||||
cmd = env_cmd + wrapped_script
|
||||
if switch_user:
|
||||
cmd = generate_cmd_su(cmd, switch_user, elevation_method=elevation_method)
|
||||
logging.debug(f'Running cmd: "{cmd}"' + (f' (path: {repr(cwd)})' if cwd else ''))
|
||||
logging.debug(f'Running cmd: "{cmd}"')
|
||||
if attach_tty:
|
||||
return subprocess.call(cmd, **kwargs)
|
||||
else:
|
||||
|
||||
35
exec/file.py
35
exec/file.py
@@ -8,7 +8,7 @@ from shutil import rmtree
|
||||
from tempfile import mkdtemp
|
||||
from typing import Optional, Union
|
||||
|
||||
from .cmd import run_cmd, run_root_cmd, elevation_noop, generate_cmd_su, wrap_in_bash, shell_quote
|
||||
from .cmd import run_root_cmd, elevation_noop, generate_cmd_su, wrap_in_bash, shell_quote
|
||||
from utils import get_user_name, get_group_name
|
||||
|
||||
|
||||
@@ -41,7 +41,7 @@ def chown(path: str, user: Optional[Union[str, int]] = None, group: Optional[Uni
|
||||
raise Exception(f"Failed to change owner of '{path}' to '{owner}'")
|
||||
|
||||
|
||||
def chmod(path, mode: Union[int, str] = 0o0755, force_sticky=True, privileged: bool = True):
|
||||
def chmod(path, mode: Union[int, str] = 0o0755, force_sticky=True):
|
||||
if not isinstance(mode, str):
|
||||
octal = oct(mode)[2:]
|
||||
else:
|
||||
@@ -54,7 +54,7 @@ def chmod(path, mode: Union[int, str] = 0o0755, force_sticky=True, privileged: b
|
||||
os.chmod(path, mode=octal) # type: ignore
|
||||
except:
|
||||
cmd = ["chmod", octal, path]
|
||||
result = run_cmd(cmd, switch_user='root' if privileged else None)
|
||||
result = run_root_cmd(cmd)
|
||||
assert isinstance(result, subprocess.CompletedProcess)
|
||||
if result.returncode:
|
||||
raise Exception(f"Failed to set mode of '{path}' to '{chmod}'")
|
||||
@@ -82,12 +82,8 @@ def write_file(
|
||||
fstat: os.stat_result
|
||||
exists = root_check_exists(path)
|
||||
dirname = os.path.dirname(path)
|
||||
failed = False
|
||||
if exists:
|
||||
try:
|
||||
fstat = os.stat(path)
|
||||
except PermissionError:
|
||||
failed = True
|
||||
fstat = os.stat(path)
|
||||
else:
|
||||
chown_user = chown_user or get_user_name(os.getuid())
|
||||
chown_group = chown_group or get_group_name(os.getgid())
|
||||
@@ -98,10 +94,9 @@ def write_file(
|
||||
if mode:
|
||||
if not mode.isnumeric():
|
||||
raise Exception(f"Unknown file mode '{mode}' (must be numeric): {path}")
|
||||
if not exists or failed or stat.filemode(int(mode, 8)) != stat.filemode(fstat.st_mode):
|
||||
if not exists or stat.filemode(int(mode, 8)) != stat.filemode(fstat.st_mode):
|
||||
chmod_mode = mode
|
||||
if not failed:
|
||||
failed = try_native_filewrite(path, content, chmod_mode) is not None
|
||||
failed = try_native_filewrite(path, content, chmod_mode)
|
||||
if exists or failed:
|
||||
if failed:
|
||||
try:
|
||||
@@ -144,13 +139,7 @@ def remove_file(path: str, recursive=False):
|
||||
raise Exception(f"Unable to remove {path}: cmd returned {rc}")
|
||||
|
||||
|
||||
def makedir(
|
||||
path,
|
||||
user: Optional[Union[str, int]] = None,
|
||||
group: Optional[Union[str, int]] = None,
|
||||
parents: bool = True,
|
||||
mode: Optional[Union[int, str]] = None,
|
||||
):
|
||||
def makedir(path, user: Optional[str] = None, group: Optional[str] = None, parents: bool = True):
|
||||
if not root_check_exists(path):
|
||||
try:
|
||||
if parents:
|
||||
@@ -159,8 +148,6 @@ def makedir(
|
||||
os.mkdir(path)
|
||||
except:
|
||||
run_root_cmd(['mkdir'] + (['-p'] if parents else []) + [path])
|
||||
if mode is not None:
|
||||
chmod(path, mode=mode)
|
||||
chown(path, user, group)
|
||||
|
||||
|
||||
@@ -169,20 +156,16 @@ def root_makedir(path, parents: bool = True):
|
||||
|
||||
|
||||
def symlink(source, target):
|
||||
"Create a symlink at `target`, pointing at `source`"
|
||||
try:
|
||||
os.symlink(source, target)
|
||||
except:
|
||||
result = run_root_cmd(['ln', '-s', source, target])
|
||||
assert isinstance(result, subprocess.CompletedProcess)
|
||||
if result.returncode:
|
||||
raise Exception(f'Symlink creation of {target} pointing at {source} failed')
|
||||
run_root_cmd(['ln', '-s', source, target])
|
||||
|
||||
|
||||
def get_temp_dir(register_cleanup=True, mode: int = 0o0755):
|
||||
"create a new tempdir and sanitize ownership so root can access user files as god intended"
|
||||
t = mkdtemp()
|
||||
chmod(t, mode, privileged=False)
|
||||
chmod(t, mode)
|
||||
if register_cleanup:
|
||||
atexit.register(remove_file, t, recursive=True)
|
||||
return t
|
||||
|
||||
@@ -3,8 +3,6 @@ import os
|
||||
import pwd
|
||||
import subprocess
|
||||
|
||||
from typing import Optional
|
||||
|
||||
from .cmd import run_cmd, run_root_cmd, generate_cmd_su
|
||||
|
||||
|
||||
@@ -12,7 +10,7 @@ def get_username(id: int):
|
||||
return pwd.getpwuid(id).pw_name
|
||||
|
||||
|
||||
def run_func(f, expected_user: Optional[str] = None, **kwargs):
|
||||
def run_func(f, expected_user: str = None, **kwargs):
|
||||
current_uid = os.getuid()
|
||||
current_username = get_username(current_uid)
|
||||
target_uid = current_uid
|
||||
|
||||
39
fastboot.py
Normal file
39
fastboot.py
Normal file
@@ -0,0 +1,39 @@
|
||||
import logging
|
||||
import subprocess
|
||||
|
||||
|
||||
def fastboot_erase_dtbo():
|
||||
logging.info("Fastboot: Erasing DTBO")
|
||||
subprocess.run(
|
||||
[
|
||||
'fastboot',
|
||||
'erase',
|
||||
'dtbo',
|
||||
],
|
||||
capture_output=True,
|
||||
)
|
||||
|
||||
|
||||
def fastboot_flash(partition, file):
|
||||
logging.info(f"Fastboot: Flashing {file} to {partition}")
|
||||
result = subprocess.run([
|
||||
'fastboot',
|
||||
'flash',
|
||||
partition,
|
||||
file,
|
||||
])
|
||||
if result.returncode != 0:
|
||||
logging.info(f'Failed to flash {file}')
|
||||
exit(1)
|
||||
|
||||
|
||||
def fastboot_boot(file):
|
||||
logging.info(f"Fastboot: booting {file}")
|
||||
result = subprocess.run([
|
||||
'fastboot',
|
||||
'boot',
|
||||
file,
|
||||
])
|
||||
if result.returncode != 0:
|
||||
logging.fatal(f'Failed to boot {file} using fastboot')
|
||||
exit(1)
|
||||
86
flash.py
Normal file
86
flash.py
Normal file
@@ -0,0 +1,86 @@
|
||||
import shutil
|
||||
import os
|
||||
import click
|
||||
|
||||
from constants import FLASH_PARTS, LOCATIONS
|
||||
from exec.cmd import run_root_cmd
|
||||
from exec.file import get_temp_dir
|
||||
from fastboot import fastboot_flash
|
||||
from image import dd_image, partprobe, shrink_fs, losetup_rootfs_image, losetup_destroy, dump_aboot, dump_lk2nd, dump_qhypstub, get_device_and_flavour, get_image_name, get_image_path
|
||||
from wrapper import enforce_wrap
|
||||
|
||||
ABOOT = FLASH_PARTS['ABOOT']
|
||||
LK2ND = FLASH_PARTS['LK2ND']
|
||||
QHYPSTUB = FLASH_PARTS['QHYPSTUB']
|
||||
ROOTFS = FLASH_PARTS['ROOTFS']
|
||||
|
||||
|
||||
@click.command(name='flash')
|
||||
@click.argument('what', type=click.Choice(list(FLASH_PARTS.values())))
|
||||
@click.argument('location', type=str, required=False)
|
||||
def cmd_flash(what: str, location: str):
|
||||
"""Flash a partition onto a device. `location` takes either a path to a block device or one of emmc, sdcard"""
|
||||
enforce_wrap()
|
||||
device, flavour = get_device_and_flavour()
|
||||
device_image_name = get_image_name(device, flavour)
|
||||
device_image_path = get_image_path(device, flavour)
|
||||
|
||||
# TODO: PARSE DEVICE SECTOR SIZE
|
||||
sector_size = 4096
|
||||
|
||||
if what not in FLASH_PARTS.values():
|
||||
raise Exception(f'Unknown what "{what}", must be one of {", ".join(FLASH_PARTS.values())}')
|
||||
|
||||
if what == ROOTFS:
|
||||
if location is None:
|
||||
raise Exception(f'You need to specify a location to flash {what} to')
|
||||
|
||||
path = ''
|
||||
if location.startswith("/dev/"):
|
||||
path = location
|
||||
else:
|
||||
if location not in LOCATIONS:
|
||||
raise Exception(f'Invalid location {location}. Choose one of {", ".join(LOCATIONS)}')
|
||||
|
||||
dir = '/dev/disk/by-id'
|
||||
for file in os.listdir(dir):
|
||||
sanitized_file = file.replace('-', '').replace('_', '').lower()
|
||||
if f'jumpdrive{location.split("-")[0]}' in sanitized_file:
|
||||
path = os.path.realpath(os.path.join(dir, file))
|
||||
partprobe(path)
|
||||
result = run_root_cmd(['lsblk', path, '-o', 'SIZE'], capture_output=True)
|
||||
if result.returncode != 0:
|
||||
raise Exception(f'Failed to lsblk {path}')
|
||||
if result.stdout == b'SIZE\n 0B\n':
|
||||
raise Exception(f'Disk {path} has a size of 0B. That probably means it is not available (e.g. no'
|
||||
'microSD inserted or no microSD card slot installed in the device) or corrupt or defect')
|
||||
if path == '':
|
||||
raise Exception('Unable to discover Jumpdrive')
|
||||
|
||||
minimal_image_dir = get_temp_dir(register_cleanup=True)
|
||||
minimal_image_path = os.path.join(minimal_image_dir, f'minimal-{device_image_name}')
|
||||
|
||||
shutil.copyfile(device_image_path, minimal_image_path)
|
||||
|
||||
loop_device = losetup_rootfs_image(minimal_image_path, sector_size)
|
||||
partprobe(loop_device)
|
||||
shrink_fs(loop_device, minimal_image_path, sector_size)
|
||||
losetup_destroy(loop_device)
|
||||
|
||||
result = dd_image(input=minimal_image_path, output=path)
|
||||
|
||||
if result.returncode != 0:
|
||||
raise Exception(f'Failed to flash {minimal_image_path} to {path}')
|
||||
else:
|
||||
loop_device = losetup_rootfs_image(device_image_path, sector_size)
|
||||
if what == ABOOT:
|
||||
path = dump_aboot(f'{loop_device}p1')
|
||||
fastboot_flash('boot', path)
|
||||
elif what == LK2ND:
|
||||
path = dump_lk2nd(f'{loop_device}p1')
|
||||
fastboot_flash('lk2nd', path)
|
||||
elif what == QHYPSTUB:
|
||||
path = dump_qhypstub(f'{loop_device}p1')
|
||||
fastboot_flash('qhypstub', path)
|
||||
else:
|
||||
raise Exception(f'Unknown what "{what}", this must be a bug in kupferbootstrap!')
|
||||
@@ -1,71 +0,0 @@
|
||||
import click
|
||||
import logging
|
||||
|
||||
from json import dumps as json_dump
|
||||
from typing import Optional
|
||||
|
||||
from config.cli import resolve_profile_field
|
||||
from config.state import config
|
||||
from utils import color_mark_selected, colors_supported
|
||||
|
||||
from .flavour import get_flavours, get_flavour
|
||||
|
||||
profile_option = click.option('-p', '--profile', help="name of the profile to use", required=False, default=None)
|
||||
|
||||
|
||||
@click.command(name='flavours')
|
||||
@click.option('-j', '--json', is_flag=True, help='output machine-parsable JSON format')
|
||||
@click.option('--output-file', type=click.Path(exists=False, file_okay=True), help="Dump JSON to file")
|
||||
def cmd_flavours(json: bool = False, output_file: Optional[str] = None):
|
||||
'list information about available flavours'
|
||||
results = []
|
||||
json_results = {}
|
||||
profile_flavour = None
|
||||
flavours = get_flavours()
|
||||
interactive_json = json and not output_file
|
||||
use_colors = colors_supported(config.runtime.colors) and not interactive_json
|
||||
profile_name = config.file.profiles.current
|
||||
selected, inherited_from = None, None
|
||||
if output_file:
|
||||
json = True
|
||||
if not flavours:
|
||||
raise Exception("No flavours found!")
|
||||
if not interactive_json:
|
||||
try:
|
||||
selected, inherited_from = resolve_profile_field(None, profile_name, 'flavour', config.file.profiles)
|
||||
if selected:
|
||||
profile_flavour = get_flavour(selected)
|
||||
except Exception as ex:
|
||||
logging.debug(f"Failed to get profile flavour for marking as currently selected, continuing anyway. Exception: {ex}")
|
||||
for name in sorted(flavours.keys()):
|
||||
f = flavours[name]
|
||||
try:
|
||||
f.parse_flavourinfo()
|
||||
except Exception as ex:
|
||||
logging.debug(f"A problem happened while parsing flavourinfo for {name}, continuing anyway. Exception: {ex}")
|
||||
if not interactive_json:
|
||||
snippet = f.nice_str(newlines=True, colors=use_colors)
|
||||
if profile_flavour == f:
|
||||
snippet = color_mark_selected(snippet, profile_name or '[unknown]', inherited_from)
|
||||
snippet += '\n'
|
||||
results += snippet.split('\n')
|
||||
if json:
|
||||
d = dict(f)
|
||||
d["description"] = f.flavour_info.description if (f.flavour_info and f.flavour_info.description) else f.description
|
||||
if "flavour_info" in d and d["flavour_info"]:
|
||||
for k in set(d["flavour_info"].keys()) - set(['description']):
|
||||
d[k] = d["flavour_info"][k]
|
||||
del d["flavour_info"]
|
||||
d["pkgbuild"] = f.pkgbuild.path if f.pkgbuild else None
|
||||
d["package"] = f.pkgbuild.name
|
||||
d["arches"] = sorted(f.pkgbuild.arches) if f.pkgbuild else None
|
||||
json_results[name] = d
|
||||
print()
|
||||
if output_file:
|
||||
with open(output_file, 'w') as fd:
|
||||
fd.write(json_dump(json_results))
|
||||
if interactive_json:
|
||||
print(json_dump(json_results, indent=4))
|
||||
else:
|
||||
for r in results:
|
||||
print(r)
|
||||
@@ -1,129 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
|
||||
from typing import Optional
|
||||
|
||||
from config.state import config
|
||||
from constants import FLAVOUR_DESCRIPTION_PREFIX, FLAVOUR_INFO_FILE
|
||||
from dictscheme import DictScheme
|
||||
from packages.pkgbuild import discover_pkgbuilds, get_pkgbuild_by_name, init_pkgbuilds, Pkgbuild
|
||||
from utils import color_str
|
||||
|
||||
|
||||
class FlavourInfo(DictScheme):
|
||||
rootfs_size: int # rootfs size in GB
|
||||
description: Optional[str]
|
||||
|
||||
def __repr__(self):
|
||||
return f'rootfs_size: {self.rootfs_size}'
|
||||
|
||||
|
||||
class Flavour(DictScheme):
|
||||
name: str
|
||||
pkgbuild: Pkgbuild
|
||||
description: str
|
||||
flavour_info: Optional[FlavourInfo]
|
||||
|
||||
@staticmethod
|
||||
def from_pkgbuild(pkgbuild: Pkgbuild) -> Flavour:
|
||||
name = pkgbuild.name
|
||||
if not name.startswith('flavour-'):
|
||||
raise Exception(f'Flavour package "{name}" doesn\'t start with "flavour-": "{name}"')
|
||||
if name.endswith('-common'):
|
||||
raise Exception(f'Flavour package "{name}" ends with "-common": "{name}"')
|
||||
name = name[8:] # split off 'flavour-'
|
||||
description = pkgbuild.description
|
||||
# cut off FLAVOUR_DESCRIPTION_PREFIX
|
||||
if description.lower().startswith(FLAVOUR_DESCRIPTION_PREFIX.lower()):
|
||||
description = description[len(FLAVOUR_DESCRIPTION_PREFIX):]
|
||||
return Flavour(name=name, pkgbuild=pkgbuild, description=description.strip(), flavour_info=None)
|
||||
|
||||
def __repr__(self):
|
||||
return f'Flavour<"{self.name}": "{self.description}", package: {self.pkgbuild.name if self.pkgbuild else "??? PROBABLY A BUG!"}{f", {self.flavour_info}" if self.flavour_info else ""}>'
|
||||
|
||||
def __str__(self):
|
||||
return self.nice_str()
|
||||
|
||||
def nice_str(self, newlines: bool = False, colors: bool = False) -> str:
|
||||
separator = '\n' if newlines else ', '
|
||||
|
||||
def get_lines(k, v, key_prefix=''):
|
||||
results = []
|
||||
full_k = f'{key_prefix}.{k}' if key_prefix else k
|
||||
if not isinstance(v, (dict, DictScheme)):
|
||||
results = [f'{color_str(full_k, bold=True)}: {v}']
|
||||
else:
|
||||
for _k, _v in v.items():
|
||||
if _k.startswith('_'):
|
||||
continue
|
||||
results += get_lines(_k, _v, key_prefix=full_k)
|
||||
return results
|
||||
|
||||
return separator.join(get_lines(None, self))
|
||||
|
||||
def parse_flavourinfo(self, lazy: bool = True):
|
||||
if lazy and self.flavour_info is not None:
|
||||
return self.flavour_info
|
||||
infopath = os.path.join(config.get_path('pkgbuilds'), self.pkgbuild.path, FLAVOUR_INFO_FILE)
|
||||
if not os.path.exists(infopath):
|
||||
raise Exception(f"Error parsing flavour info for flavour {self.name}: file doesn't exist: {infopath}")
|
||||
try:
|
||||
defaults = {'description': None}
|
||||
with open(infopath, 'r') as fd:
|
||||
infodict = json.load(fd)
|
||||
i = FlavourInfo(**(defaults | infodict))
|
||||
except Exception as ex:
|
||||
raise Exception(f"Error parsing {FLAVOUR_INFO_FILE} for flavour {self.name}: {ex}")
|
||||
self.flavour_info = i
|
||||
if i.description:
|
||||
self.description = i.description
|
||||
return i
|
||||
|
||||
|
||||
_flavours_discovered: bool = False
|
||||
_flavours_cache: dict[str, Flavour] = {}
|
||||
|
||||
|
||||
def get_flavours(lazy: bool = True):
|
||||
global _flavours_cache, _flavours_discovered
|
||||
if lazy and _flavours_discovered:
|
||||
return _flavours_cache
|
||||
logging.info("Searching PKGBUILDs for flavour packages")
|
||||
flavours: dict[str, Flavour] = {}
|
||||
pkgbuilds: dict[str, Pkgbuild] = discover_pkgbuilds(lazy=(lazy or not _flavours_discovered))
|
||||
for pkg in pkgbuilds.values():
|
||||
name = pkg.name
|
||||
if not name.startswith('flavour-') or name.endswith('-common'):
|
||||
continue
|
||||
name = name[8:] # split off 'flavour-'
|
||||
logging.info(f"Found flavour package {name}")
|
||||
flavours[name] = Flavour.from_pkgbuild(pkg)
|
||||
_flavours_cache.clear()
|
||||
_flavours_cache.update(flavours)
|
||||
_flavours_discovered = True
|
||||
return flavours
|
||||
|
||||
|
||||
def get_flavour(name: str, lazy: bool = True):
|
||||
global _flavours_cache
|
||||
pkg_name = f'flavour-{name}'
|
||||
if lazy and name in _flavours_cache:
|
||||
return _flavours_cache[name]
|
||||
try:
|
||||
logging.info(f"Trying to find PKGBUILD for flavour {name}")
|
||||
init_pkgbuilds()
|
||||
pkg = get_pkgbuild_by_name(pkg_name)
|
||||
except Exception as ex:
|
||||
raise Exception(f"Error parsing PKGBUILD for flavour package {pkg_name}:\n{ex}")
|
||||
assert pkg and pkg.name == pkg_name
|
||||
flavour = Flavour.from_pkgbuild(pkg)
|
||||
_flavours_cache[name] = flavour
|
||||
return flavour
|
||||
|
||||
|
||||
def get_profile_flavour(profile_name: Optional[str] = None) -> Flavour:
|
||||
profile = config.enforce_profile_flavour_set(profile_name=profile_name)
|
||||
return get_flavour(profile.flavour)
|
||||
@@ -1,29 +0,0 @@
|
||||
import pytest
|
||||
|
||||
from .flavour import Flavour, get_flavour, get_flavours
|
||||
|
||||
FLAVOUR_NAME = 'phosh'
|
||||
|
||||
|
||||
@pytest.fixture()
|
||||
def flavour(name=FLAVOUR_NAME) -> Flavour:
|
||||
return get_flavour(name)
|
||||
|
||||
|
||||
def test_get_flavour(flavour: Flavour):
|
||||
assert isinstance(flavour, Flavour)
|
||||
assert flavour.name
|
||||
assert flavour.pkgbuild
|
||||
|
||||
|
||||
def test_parse_flavourinfo(flavour: Flavour):
|
||||
info = flavour.parse_flavourinfo()
|
||||
assert isinstance(info.rootfs_size, int)
|
||||
# rootfs_size should not be zero
|
||||
assert info.rootfs_size
|
||||
|
||||
|
||||
def test_get_flavours():
|
||||
flavours = get_flavours()
|
||||
assert flavours
|
||||
assert FLAVOUR_NAME in flavours
|
||||
@@ -2,10 +2,9 @@ import click
|
||||
import logging
|
||||
|
||||
from exec.cmd import run_root_cmd
|
||||
from ssh import run_ssh_command
|
||||
from wrapper import check_programs_wrap
|
||||
|
||||
from .ssh import run_ssh_command
|
||||
|
||||
|
||||
@click.command(name='forwarding')
|
||||
def cmd_forwarding():
|
||||
17
generator.py
17
generator.py
@@ -1,16 +1,13 @@
|
||||
from typing import Optional
|
||||
|
||||
from constants import Arch, CFLAGS_ARCHES, CFLAGS_GENERAL, COMPILE_ARCHES, GCC_HOSTSPECS
|
||||
from config.state import config
|
||||
from constants import Arch, GCC_HOSTSPECS, CFLAGS_GENERAL, CFLAGS_ARCHES, COMPILE_ARCHES, CHROOT_PATHS
|
||||
from config import config
|
||||
|
||||
|
||||
def generate_makepkg_conf(arch: Arch, cross: bool = False, chroot: Optional[str] = None) -> str:
|
||||
def generate_makepkg_conf(arch: Arch, cross: bool = False, chroot: str = None) -> str:
|
||||
"""
|
||||
Generate a makepkg.conf. For use with crosscompiling, specify `cross=True` and pass as `chroot`
|
||||
the relative path inside the native chroot where the foreign chroot will be mounted.
|
||||
"""
|
||||
assert config.runtime.arch
|
||||
hostspec = GCC_HOSTSPECS[config.runtime.arch if cross else arch][arch]
|
||||
hostspec = GCC_HOSTSPECS[config.runtime['arch'] if cross else arch][arch]
|
||||
cflags = CFLAGS_ARCHES[arch] + CFLAGS_GENERAL
|
||||
if cross and not chroot:
|
||||
raise Exception('Cross-compile makepkg conf requested but no chroot path given: "{chroot}"')
|
||||
@@ -198,7 +195,7 @@ def generate_pacman_conf_body(
|
||||
check_space: bool = True,
|
||||
in_chroot: bool = True,
|
||||
):
|
||||
pacman_cache = f"{config.get_path('pacman')}/{arch}" if not in_chroot else '/var/cache/pacman/pkg'
|
||||
pacman_cache = config.get_path('pacman') if not in_chroot else CHROOT_PATHS['pacman']
|
||||
return f'''
|
||||
#
|
||||
# /etc/pacman.conf
|
||||
@@ -213,7 +210,7 @@ def generate_pacman_conf_body(
|
||||
# If you wish to use different paths, uncomment and update the paths.
|
||||
#RootDir = /
|
||||
#DBPath = /var/lib/pacman/
|
||||
CacheDir = {pacman_cache}
|
||||
CacheDir = {pacman_cache}/{arch}
|
||||
#LogFile = /var/log/pacman.log
|
||||
#GPGDir = /etc/pacman.d/gnupg/
|
||||
#HookDir = /etc/pacman.d/hooks/
|
||||
@@ -236,7 +233,7 @@ Color
|
||||
#NoProgressBar
|
||||
{'' if check_space else '#'}CheckSpace
|
||||
VerbosePkgLists
|
||||
ParallelDownloads = {config.file.pacman.parallel_downloads}
|
||||
ParallelDownloads = {config.file['pacman']['parallel_downloads']}
|
||||
|
||||
# By default, pacman accepts packages signed by keys that its local keyring
|
||||
# trusts (see pacman-key and its man page), as well as unsigned packages.
|
||||
|
||||
@@ -7,19 +7,18 @@ import click
|
||||
import logging
|
||||
from signal import pause
|
||||
from subprocess import CompletedProcess
|
||||
from typing import Optional, Union
|
||||
from typing import Optional
|
||||
|
||||
from config.state import config, Profile
|
||||
from chroot.device import DeviceChroot, get_device_chroot
|
||||
from constants import Arch, BASE_LOCAL_PACKAGES, BASE_PACKAGES, POST_INSTALL_CMDS
|
||||
from constants import Arch, BASE_PACKAGES, DEVICES, FLAVOURS
|
||||
from config import config, Profile
|
||||
from distro.distro import get_base_distro, get_kupfer_https
|
||||
from devices.device import Device, get_profile_device
|
||||
from exec.cmd import run_root_cmd, generate_cmd_su
|
||||
from exec.file import get_temp_dir, root_write_file, root_makedir, makedir
|
||||
from flavours.flavour import Flavour, get_profile_flavour
|
||||
from net.ssh import copy_ssh_keys
|
||||
from packages.build import build_enable_qemu_binfmt, build_packages, filter_pkgbuilds
|
||||
from wrapper import enforce_wrap
|
||||
from exec.file import root_write_file, root_makedir, makedir
|
||||
from packages import build_enable_qemu_binfmt, build_packages_by_paths
|
||||
from packages.device import get_profile_device
|
||||
from ssh import copy_ssh_keys
|
||||
from wrapper import wrap_if_foreign_arch
|
||||
|
||||
# image files need to be slightly smaller than partitions to fit
|
||||
IMG_FILE_ROOT_DEFAULT_SIZE = "1800M"
|
||||
@@ -44,39 +43,10 @@ def partprobe(device: str):
|
||||
return run_root_cmd(['partprobe', device])
|
||||
|
||||
|
||||
def bytes_to_sectors(b: int, sector_size: int, round_up: bool = True):
|
||||
sectors, rest = divmod(b, sector_size)
|
||||
if rest and round_up:
|
||||
sectors += 1
|
||||
return sectors
|
||||
|
||||
|
||||
def get_fs_size(partition: str) -> tuple[int, int]:
|
||||
blocks_cmd = run_root_cmd(['dumpe2fs', '-h', partition], env={"LC_ALL": "C"}, capture_output=True)
|
||||
if blocks_cmd.returncode != 0:
|
||||
logging.debug(f"dumpe2fs stdout:\n: {blocks_cmd.stdout}")
|
||||
logging.debug(f"dumpe2fs stderr:\n {blocks_cmd.stderr}")
|
||||
raise Exception(f'Failed to detect new filesystem size of {partition}')
|
||||
blocks_text = blocks_cmd.stdout.decode('utf-8') if blocks_cmd.stdout else ''
|
||||
try:
|
||||
fs_blocks = int(re.search('\\nBlock count:[ ]+([0-9]+)\\n', blocks_text, flags=re.MULTILINE).group(1)) # type: ignore[union-attr]
|
||||
fs_block_size = int(re.search('\\nBlock size:[ ]+([0-9]+)\\n', blocks_text).group(1)) # type: ignore[union-attr]
|
||||
except Exception as ex:
|
||||
logging.debug(f"dumpe2fs stdout:\n {blocks_text}")
|
||||
logging.debug(f"dumpe2fs stderr:\n: {blocks_cmd.stderr}")
|
||||
logging.info("Failed to scrape block size and count from dumpe2fs:", ex)
|
||||
raise ex
|
||||
return fs_blocks, fs_block_size
|
||||
|
||||
|
||||
def align_bytes(size_bytes: int, alignment: int = 4096) -> int:
|
||||
rest = size_bytes % alignment
|
||||
if rest:
|
||||
size_bytes += alignment - rest
|
||||
return size_bytes
|
||||
|
||||
|
||||
def shrink_fs(loop_device: str, file: str, sector_size: int):
|
||||
# 8: 512 bytes sectors
|
||||
# 1: 4096 bytes sectors
|
||||
sectors_blocks_factor = 4096 // sector_size
|
||||
partprobe(loop_device)
|
||||
logging.debug(f"Checking filesystem at {loop_device}p2")
|
||||
result = run_root_cmd(['e2fsck', '-fy', f'{loop_device}p2'])
|
||||
@@ -84,16 +54,18 @@ def shrink_fs(loop_device: str, file: str, sector_size: int):
|
||||
# https://man7.org/linux/man-pages/man8/e2fsck.8.html#EXIT_CODE
|
||||
raise Exception(f'Failed to e2fsck {loop_device}p2 with exit code {result.returncode}')
|
||||
|
||||
logging.info(f'Shrinking filesystem at {loop_device}p2')
|
||||
result = run_root_cmd(['resize2fs', '-M', f'{loop_device}p2'])
|
||||
logging.debug(f'Shrinking filesystem at {loop_device}p2')
|
||||
result = run_root_cmd(['resize2fs', '-M', f'{loop_device}p2'], capture_output=True)
|
||||
if result.returncode != 0:
|
||||
print(result.stdout)
|
||||
print(result.stderr)
|
||||
raise Exception(f'Failed to resize2fs {loop_device}p2')
|
||||
|
||||
logging.debug(f'Reading size of shrunken filesystem on {loop_device}p2')
|
||||
fs_blocks, fs_block_size = get_fs_size(f'{loop_device}p2')
|
||||
sectors = bytes_to_sectors(fs_blocks * fs_block_size, sector_size)
|
||||
logging.debug(f'Finding end block of shrunken filesystem on {loop_device}p2')
|
||||
blocks = int(re.search('is now [0-9]+', result.stdout.decode('utf-8')).group(0).split(' ')[2]) # type: ignore
|
||||
sectors = blocks * sectors_blocks_factor #+ 157812 - 25600
|
||||
|
||||
logging.info(f'Shrinking partition at {loop_device}p2 to {sectors} sectors ({sectors * sector_size} bytes)')
|
||||
logging.debug(f'Shrinking partition at {loop_device}p2 to {sectors} sectors')
|
||||
child_proccess = subprocess.Popen(
|
||||
generate_cmd_su(['fdisk', '-b', str(sector_size), loop_device], switch_user='root'), # type: ignore
|
||||
stdin=subprocess.PIPE,
|
||||
@@ -119,7 +91,7 @@ def shrink_fs(loop_device: str, file: str, sector_size: int):
|
||||
if returncode > 1:
|
||||
raise Exception(f'Failed to shrink partition size of {loop_device}p2 with fdisk')
|
||||
|
||||
partprobe(loop_device).check_returncode()
|
||||
partprobe(loop_device)
|
||||
|
||||
logging.debug(f'Finding end sector of partition at {loop_device}p2')
|
||||
result = run_root_cmd(['fdisk', '-b', str(sector_size), '-l', loop_device], capture_output=True)
|
||||
@@ -137,7 +109,7 @@ def shrink_fs(loop_device: str, file: str, sector_size: int):
|
||||
if end_sector == 0:
|
||||
raise Exception(f'Failed to find end sector of {loop_device}p2')
|
||||
|
||||
end_size = align_bytes((end_sector + 1) * sector_size, 4096)
|
||||
end_size = (end_sector + 1) * sector_size
|
||||
|
||||
logging.debug(f'({end_sector} + 1) sectors * {sector_size} bytes/sector = {end_size} bytes')
|
||||
logging.info(f'Truncating {file} to {end_size} bytes')
|
||||
@@ -159,21 +131,23 @@ def losetup_destroy(loop_device):
|
||||
)
|
||||
|
||||
|
||||
def get_device_name(device: Union[str, Device]) -> str:
|
||||
return device.name if isinstance(device, Device) else device
|
||||
def get_device_and_flavour(profile_name: Optional[str] = None) -> tuple[str, str]:
|
||||
config.enforce_config_loaded()
|
||||
profile = config.get_profile(profile_name)
|
||||
if not profile['device']:
|
||||
raise Exception("Please set the device using 'kupferbootstrap config init ...'")
|
||||
|
||||
if not profile['flavour']:
|
||||
raise Exception("Please set the flavour using 'kupferbootstrap config init ...'")
|
||||
|
||||
return (profile['device'], profile['flavour'])
|
||||
|
||||
|
||||
def get_flavour_name(flavour: Union[str, Flavour]) -> str:
|
||||
if isinstance(flavour, Flavour):
|
||||
return flavour.name
|
||||
return flavour
|
||||
def get_image_name(device, flavour, img_type='full') -> str:
|
||||
return f'{device}-{flavour}-{img_type}.img'
|
||||
|
||||
|
||||
def get_image_name(device: Union[str, Device], flavour: Union[str, Flavour], img_type='full') -> str:
|
||||
return f'{get_device_name(device)}-{get_flavour_name(flavour)}-{img_type}.img'
|
||||
|
||||
|
||||
def get_image_path(device: Union[str, Device], flavour: Union[str, Flavour], img_type='full') -> str:
|
||||
def get_image_path(device, flavour, img_type='full') -> str:
|
||||
return os.path.join(config.get_path('images'), get_image_name(device, flavour, img_type))
|
||||
|
||||
|
||||
@@ -226,32 +200,46 @@ def mount_chroot(rootfs_source: str, boot_src: str, chroot: DeviceChroot):
|
||||
chroot.mount(boot_src, '/boot', options=['defaults'])
|
||||
|
||||
|
||||
def dump_file_from_image(image_path: str, file_path: str, target_path: Optional[str] = None):
|
||||
target_path = target_path or os.path.join(get_temp_dir(), os.path.basename(file_path))
|
||||
result = run_root_cmd([
|
||||
def dump_aboot(image_path: str) -> str:
|
||||
path = '/tmp/aboot.img'
|
||||
result = subprocess.run([
|
||||
'debugfs',
|
||||
image_path,
|
||||
'-R',
|
||||
f'\'dump /{file_path.lstrip("/")} {target_path}\'',
|
||||
f'dump /aboot.img {path}',
|
||||
])
|
||||
if result.returncode != 0 or not os.path.exists(target_path):
|
||||
raise Exception(f'Failed to dump {file_path} from /boot')
|
||||
return target_path
|
||||
|
||||
|
||||
def dump_aboot(image_path: str) -> str:
|
||||
return dump_file_from_image(image_path, file_path='/aboot.img')
|
||||
if result.returncode != 0:
|
||||
raise Exception('Failed to dump aboot.img')
|
||||
return path
|
||||
|
||||
|
||||
def dump_lk2nd(image_path: str) -> str:
|
||||
"""
|
||||
This doesn't append the image with the appended DTB which is needed for some devices, so it should get added in the future.
|
||||
"""
|
||||
return dump_file_from_image(image_path, file_path='/lk2nd.img')
|
||||
path = '/tmp/lk2nd.img'
|
||||
result = subprocess.run([
|
||||
'debugfs',
|
||||
image_path,
|
||||
'-R',
|
||||
f'dump /lk2nd.img {path}',
|
||||
])
|
||||
if result.returncode != 0:
|
||||
raise Exception('Failed to dump lk2nd.img')
|
||||
return path
|
||||
|
||||
|
||||
def dump_qhypstub(image_path: str) -> str:
|
||||
return dump_file_from_image(image_path, file_path='/qhyptstub.img')
|
||||
path = '/tmp/qhypstub.bin'
|
||||
result = subprocess.run([
|
||||
'debugfs',
|
||||
image_path,
|
||||
'-R',
|
||||
f'dump /qhypstub.bin {path}',
|
||||
])
|
||||
if result.returncode != 0:
|
||||
raise Exception('Failed to dump qhypstub.bin')
|
||||
return path
|
||||
|
||||
|
||||
def create_img_file(image_path: str, size_str: str):
|
||||
@@ -281,46 +269,46 @@ def partition_device(device: str):
|
||||
raise Exception(f'Failed to create partitions on {device}')
|
||||
|
||||
|
||||
def create_filesystem(device: str, blocksize: Optional[int], label=None, options=[], fstype='ext4'):
|
||||
"""Creates a new filesystem. Blocksize defaults"""
|
||||
def create_filesystem(device: str, blocksize: int = 4096, label=None, options=[], fstype='ext4'):
|
||||
# blocksize can be 4k max due to pagesize
|
||||
blocksize = min(blocksize, 4096)
|
||||
if fstype.startswith('ext'):
|
||||
# blocksize for ext-fs must be >=1024
|
||||
blocksize = max(blocksize, 1024)
|
||||
|
||||
labels = ['-L', label] if label else []
|
||||
cmd = [f'mkfs.{fstype}', '-F', *labels]
|
||||
if blocksize:
|
||||
# blocksize can be 4k max due to pagesize
|
||||
blocksize = min(blocksize, 4096)
|
||||
if fstype.startswith('ext'):
|
||||
# blocksize for ext-fs must be >=1024
|
||||
blocksize = max(blocksize, 1024)
|
||||
cmd += [
|
||||
'-b',
|
||||
str(blocksize),
|
||||
]
|
||||
cmd.append(device)
|
||||
cmd = [
|
||||
f'mkfs.{fstype}',
|
||||
'-F',
|
||||
'-b',
|
||||
str(blocksize),
|
||||
] + labels + [device]
|
||||
result = run_root_cmd(cmd)
|
||||
if result.returncode != 0:
|
||||
raise Exception(f'Failed to create {fstype} filesystem on {device} with CMD: {cmd}')
|
||||
|
||||
|
||||
def create_root_fs(device: str, blocksize: Optional[int]):
|
||||
def create_root_fs(device: str, blocksize: int):
|
||||
create_filesystem(device, blocksize=blocksize, label='kupfer_root', options=['-O', '^metadata_csum', '-N', '100000'])
|
||||
|
||||
|
||||
def create_boot_fs(device: str, blocksize: Optional[int]):
|
||||
def create_boot_fs(device: str, blocksize: int):
|
||||
create_filesystem(device, blocksize=blocksize, label='kupfer_boot', fstype='ext2')
|
||||
|
||||
|
||||
def install_rootfs(
|
||||
rootfs_device: str,
|
||||
bootfs_device: str,
|
||||
device: Union[str, Device],
|
||||
flavour: Flavour,
|
||||
device: str,
|
||||
flavour: str,
|
||||
arch: Arch,
|
||||
packages: list[str],
|
||||
use_local_repos: bool,
|
||||
profile: Profile,
|
||||
):
|
||||
user = profile['username'] or 'kupfer'
|
||||
chroot = get_device_chroot(device=get_device_name(device), flavour=flavour.name, arch=arch, packages=packages, use_local_repos=use_local_repos)
|
||||
post_cmds = FLAVOURS[flavour].get('post_cmds', [])
|
||||
chroot = get_device_chroot(device=device, flavour=flavour, arch=arch, packages=packages, use_local_repos=use_local_repos)
|
||||
|
||||
mount_chroot(rootfs_device, bootfs_device, chroot)
|
||||
|
||||
@@ -331,11 +319,9 @@ def install_rootfs(
|
||||
user=user,
|
||||
password=profile['password'],
|
||||
)
|
||||
chroot.add_sudo_config(config_name='wheel', privilegee='%wheel', password_required=True)
|
||||
copy_ssh_keys(
|
||||
chroot,
|
||||
chroot.path,
|
||||
user=user,
|
||||
allow_fail=True,
|
||||
)
|
||||
files = {
|
||||
'etc/pacman.conf': get_base_distro(arch).get_pacman_conf(
|
||||
@@ -343,17 +329,16 @@ def install_rootfs(
|
||||
extra_repos=get_kupfer_https(arch).repos,
|
||||
in_chroot=True,
|
||||
),
|
||||
'etc/hostname': profile['hostname'] or 'kupfer',
|
||||
'etc/sudoers.d/wheel': "# allow members of group wheel to execute any command\n%wheel ALL=(ALL:ALL) ALL\n",
|
||||
'etc/hostname': profile['hostname'],
|
||||
}
|
||||
for target, content in files.items():
|
||||
root_write_file(os.path.join(chroot.path, target.lstrip('/')), content)
|
||||
|
||||
logging.info("Running post-install CMDs")
|
||||
for cmd in POST_INSTALL_CMDS:
|
||||
result = chroot.run_cmd(cmd)
|
||||
if post_cmds:
|
||||
result = chroot.run_cmd(' && '.join(post_cmds))
|
||||
assert isinstance(result, subprocess.CompletedProcess)
|
||||
if result.returncode != 0:
|
||||
raise Exception(f'Error running post-install cmd: {cmd}')
|
||||
raise Exception('Error running post_cmds')
|
||||
|
||||
logging.info('Preparing to unmount chroot')
|
||||
res = chroot.run_cmd('sync && umount /boot', attach_tty=True)
|
||||
@@ -368,104 +353,67 @@ def install_rootfs(
|
||||
|
||||
@click.group(name='image')
|
||||
def cmd_image():
|
||||
"""Build, flash and boot device images"""
|
||||
|
||||
|
||||
sectorsize_option = click.option(
|
||||
'-b',
|
||||
'--sector-size',
|
||||
help="Override the device's sector size",
|
||||
type=int,
|
||||
default=None,
|
||||
)
|
||||
"""Build and manage device images"""
|
||||
|
||||
|
||||
@cmd_image.command(name='build')
|
||||
@click.argument('profile_name', required=False)
|
||||
@click.option(
|
||||
'--local-repos/--no-local-repos',
|
||||
'-l/-L',
|
||||
help='Whether to use local package repos at all or only use HTTPS repos.',
|
||||
default=True,
|
||||
show_default=True,
|
||||
is_flag=True,
|
||||
)
|
||||
@click.option(
|
||||
'--build-pkgs/--no-build-pkgs',
|
||||
'-p/-P',
|
||||
help='Whether to build missing/outdated local packages if local repos are enabled.',
|
||||
default=True,
|
||||
show_default=True,
|
||||
is_flag=True,
|
||||
)
|
||||
@click.option(
|
||||
'--no-download-pkgs',
|
||||
help='Disable trying to download packages instead of building if building is enabled.',
|
||||
default=False,
|
||||
is_flag=True,
|
||||
)
|
||||
@click.option(
|
||||
'--block-target',
|
||||
help='Override the block device file to write the final image to',
|
||||
type=click.Path(),
|
||||
default=None,
|
||||
)
|
||||
@click.option(
|
||||
'--skip-part-images',
|
||||
help='Skip creating image files for the partitions and directly work on the target block device.',
|
||||
default=False,
|
||||
is_flag=True,
|
||||
)
|
||||
@sectorsize_option
|
||||
def cmd_build(
|
||||
profile_name: Optional[str] = None,
|
||||
local_repos: bool = True,
|
||||
build_pkgs: bool = True,
|
||||
no_download_pkgs=False,
|
||||
block_target: Optional[str] = None,
|
||||
sector_size: Optional[int] = None,
|
||||
skip_part_images: bool = False,
|
||||
):
|
||||
@click.option('--local-repos/--no-local-repos',
|
||||
'-l/-L',
|
||||
default=True,
|
||||
show_default=True,
|
||||
help='Whether to use local package repos at all or only use HTTPS repos.')
|
||||
@click.option('--build-pkgs/--no-build-pkgs',
|
||||
'-p/-P',
|
||||
default=True,
|
||||
show_default=True,
|
||||
help='Whether to build missing/outdated local packages if local repos are enabled.')
|
||||
@click.option('--no-download-pkgs',
|
||||
is_flag=True,
|
||||
default=False,
|
||||
help='Disable trying to download packages instead of building if building is enabled.')
|
||||
@click.option('--block-target', type=click.Path(), default=None, help='Override the block device file to write the final image to')
|
||||
@click.option('--skip-part-images',
|
||||
is_flag=True,
|
||||
default=False,
|
||||
help='Skip creating image files for the partitions and directly work on the target block device.')
|
||||
def cmd_build(profile_name: str = None,
|
||||
local_repos: bool = True,
|
||||
build_pkgs: bool = True,
|
||||
no_download_pkgs=False,
|
||||
block_target: str = None,
|
||||
skip_part_images: bool = False):
|
||||
"""
|
||||
Build a device image.
|
||||
|
||||
Unless overriden, required packages will be built or preferably downloaded from HTTPS repos.
|
||||
"""
|
||||
|
||||
config.enforce_profile_device_set()
|
||||
config.enforce_profile_flavour_set()
|
||||
enforce_wrap()
|
||||
device = get_profile_device(profile_name)
|
||||
arch = device.arch
|
||||
# check_programs_wrap(['makepkg', 'pacman', 'pacstrap'])
|
||||
arch = get_profile_device(profile_name).arch
|
||||
wrap_if_foreign_arch(arch)
|
||||
profile: Profile = config.get_profile(profile_name)
|
||||
flavour = get_profile_flavour(profile_name)
|
||||
rootfs_size_mb = flavour.parse_flavourinfo().rootfs_size * 1000 + int(profile.size_extra_mb)
|
||||
device, flavour = get_device_and_flavour(profile_name)
|
||||
size_extra_mb: int = int(profile["size_extra_mb"])
|
||||
|
||||
packages = BASE_LOCAL_PACKAGES + [device.package.name, flavour.pkgbuild.name]
|
||||
packages_extra = BASE_PACKAGES + profile.pkgs_include
|
||||
sector_size = 4096
|
||||
rootfs_size_mb = FLAVOURS[flavour].get('size', 2) * 1000
|
||||
|
||||
if arch != config.runtime.arch:
|
||||
packages = BASE_PACKAGES + DEVICES[device] + FLAVOURS[flavour]['packages'] + profile['pkgs_include']
|
||||
|
||||
if arch != config.runtime['arch']:
|
||||
build_enable_qemu_binfmt(arch)
|
||||
|
||||
if local_repos and build_pkgs:
|
||||
logging.info("Making sure all packages are built")
|
||||
# enforce that local base packages are built
|
||||
pkgbuilds = set(filter_pkgbuilds(packages, arch=arch, allow_empty_results=False, use_paths=False))
|
||||
# extra packages might be a mix of package names that are in our PKGBUILDs and packages from the base distro
|
||||
pkgbuilds |= set(filter_pkgbuilds(packages_extra, arch=arch, allow_empty_results=True, use_paths=False))
|
||||
build_packages(pkgbuilds, arch, try_download=not no_download_pkgs)
|
||||
build_packages_by_paths(packages, arch, try_download=not no_download_pkgs)
|
||||
|
||||
sector_size = sector_size or device.get_image_sectorsize()
|
||||
|
||||
image_path = block_target or get_image_path(device, flavour.name)
|
||||
image_path = block_target or get_image_path(device, flavour)
|
||||
|
||||
makedir(os.path.dirname(image_path))
|
||||
|
||||
logging.info(f'Creating new file at {image_path}')
|
||||
create_img_file(image_path, f"{rootfs_size_mb}M")
|
||||
create_img_file(image_path, f"{rootfs_size_mb + size_extra_mb}M")
|
||||
|
||||
loop_device = losetup_rootfs_image(image_path, sector_size or device.get_image_sectorsize_default())
|
||||
loop_device = losetup_rootfs_image(image_path, sector_size)
|
||||
|
||||
partition_device(loop_device)
|
||||
partprobe(loop_device)
|
||||
@@ -480,7 +428,7 @@ def cmd_build(
|
||||
else:
|
||||
logging.info('Creating per-partition image files')
|
||||
boot_dev = create_img_file(get_image_path(device, flavour, 'boot'), IMG_FILE_BOOT_DEFAULT_SIZE)
|
||||
root_dev = create_img_file(get_image_path(device, flavour, 'root'), f'{rootfs_size_mb - 200}M')
|
||||
root_dev = create_img_file(get_image_path(device, flavour, 'root'), f'{rootfs_size_mb + size_extra_mb - 200}M')
|
||||
|
||||
create_boot_fs(boot_dev, sector_size)
|
||||
create_root_fs(root_dev, sector_size)
|
||||
@@ -491,7 +439,7 @@ def cmd_build(
|
||||
device,
|
||||
flavour,
|
||||
arch,
|
||||
list(set(packages) | set(packages_extra)),
|
||||
packages,
|
||||
local_repos,
|
||||
profile,
|
||||
)
|
||||
@@ -508,19 +456,15 @@ def cmd_build(
|
||||
|
||||
@cmd_image.command(name='inspect')
|
||||
@click.option('--shell', '-s', is_flag=True)
|
||||
@sectorsize_option
|
||||
@click.argument('profile', required=False)
|
||||
def cmd_inspect(profile: Optional[str] = None, shell: bool = False, sector_size: Optional[int] = None):
|
||||
"""Loop-mount the device image for inspection."""
|
||||
config.enforce_profile_device_set()
|
||||
config.enforce_profile_flavour_set()
|
||||
enforce_wrap()
|
||||
device = get_profile_device(profile)
|
||||
arch = device.arch
|
||||
flavour = get_profile_flavour(profile).name
|
||||
sector_size = sector_size or device.get_image_sectorsize_default()
|
||||
|
||||
chroot = get_device_chroot(device.name, flavour, arch)
|
||||
def cmd_inspect(profile: str = None, shell: bool = False):
|
||||
"""Open a shell in a device image"""
|
||||
arch = get_profile_device(profile).arch
|
||||
wrap_if_foreign_arch(arch)
|
||||
device, flavour = get_device_and_flavour(profile)
|
||||
# TODO: PARSE DEVICE SECTOR SIZE
|
||||
sector_size = 4096
|
||||
chroot = get_device_chroot(device, flavour, arch)
|
||||
image_path = get_image_path(device, flavour)
|
||||
loop_device = losetup_rootfs_image(image_path, sector_size)
|
||||
partprobe(loop_device)
|
||||
@@ -531,7 +475,7 @@ def cmd_inspect(profile: Optional[str] = None, shell: bool = False, sector_size:
|
||||
if shell:
|
||||
chroot.initialized = True
|
||||
chroot.activate()
|
||||
if arch != config.runtime.arch:
|
||||
if arch != config.runtime['arch']:
|
||||
logging.info('Installing requisites for foreign-arch shell')
|
||||
build_enable_qemu_binfmt(arch)
|
||||
logging.info('Starting inspection shell')
|
||||
@@ -1,75 +0,0 @@
|
||||
import os
|
||||
import urllib.request
|
||||
import click
|
||||
|
||||
from typing import Optional
|
||||
|
||||
from config.state import config
|
||||
from constants import FLASH_PARTS, FASTBOOT, JUMPDRIVE, JUMPDRIVE_VERSION
|
||||
from exec.file import makedir
|
||||
from devices.device import get_profile_device
|
||||
from flavours.flavour import get_profile_flavour
|
||||
from flavours.cli import profile_option
|
||||
from wrapper import enforce_wrap
|
||||
|
||||
from .fastboot import fastboot_boot, fastboot_erase
|
||||
from .image import get_device_name, losetup_rootfs_image, get_image_path, dump_aboot, dump_lk2nd
|
||||
|
||||
LK2ND = FLASH_PARTS['LK2ND']
|
||||
ABOOT = FLASH_PARTS['ABOOT']
|
||||
|
||||
BOOT_TYPES = [ABOOT, LK2ND, JUMPDRIVE]
|
||||
|
||||
|
||||
@click.command(name='boot')
|
||||
@profile_option
|
||||
@click.argument('type', required=False, default=ABOOT, type=click.Choice(BOOT_TYPES))
|
||||
@click.option('-b', '--sector-size', type=int, help="Override the device's sector size", default=None)
|
||||
@click.option(
|
||||
'--erase-dtbo/--no-erase-dtbo',
|
||||
is_flag=True,
|
||||
default=True,
|
||||
show_default=True,
|
||||
help="Erase the DTBO partition before flashing",
|
||||
)
|
||||
@click.option('--confirm', is_flag=True, help="Ask for confirmation before executing fastboot commands")
|
||||
def cmd_boot(
|
||||
type: str,
|
||||
profile: Optional[str] = None,
|
||||
sector_size: Optional[int] = None,
|
||||
erase_dtbo: bool = True,
|
||||
confirm: bool = False,
|
||||
):
|
||||
"""Boot JumpDrive or the Kupfer aboot image. Erases Android DTBO in the process."""
|
||||
enforce_wrap()
|
||||
device = get_profile_device(profile)
|
||||
flavour = get_profile_flavour(profile).name
|
||||
deviceinfo = device.parse_deviceinfo()
|
||||
sector_size = sector_size or device.get_image_sectorsize_default()
|
||||
if not sector_size:
|
||||
raise Exception(f"Device {device.name} has no rootfs_image_sector_size specified")
|
||||
image_path = get_image_path(device, flavour)
|
||||
strategy = deviceinfo.flash_method
|
||||
if not strategy:
|
||||
raise Exception(f"Device {device.name} has no flash strategy defined")
|
||||
|
||||
if strategy == FASTBOOT:
|
||||
if type == JUMPDRIVE:
|
||||
file = f'boot-{get_device_name(device)}.img'
|
||||
path = os.path.join(config.get_path('jumpdrive'), file)
|
||||
makedir(os.path.dirname(path))
|
||||
if not os.path.exists(path):
|
||||
urllib.request.urlretrieve(f'https://github.com/dreemurrs-embedded/Jumpdrive/releases/download/{JUMPDRIVE_VERSION}/{file}', path)
|
||||
else:
|
||||
loop_device = losetup_rootfs_image(image_path, sector_size)
|
||||
if type == LK2ND:
|
||||
path = dump_lk2nd(loop_device + 'p1')
|
||||
elif type == ABOOT:
|
||||
path = dump_aboot(loop_device + 'p1')
|
||||
else:
|
||||
raise Exception(f'Unknown boot image type {type}')
|
||||
if erase_dtbo:
|
||||
fastboot_erase('dtbo', confirm=confirm)
|
||||
fastboot_boot(path, confirm=confirm)
|
||||
else:
|
||||
raise Exception(f'Unsupported flash strategy "{strategy}" for device {device.name}')
|
||||
@@ -1,6 +0,0 @@
|
||||
from .boot import cmd_boot
|
||||
from .flash import cmd_flash
|
||||
from .image import cmd_image
|
||||
|
||||
for cmd in [cmd_boot, cmd_flash]:
|
||||
cmd_image.add_command(cmd)
|
||||
@@ -1,65 +0,0 @@
|
||||
import click
|
||||
import logging
|
||||
|
||||
from exec.cmd import run_cmd, CompletedProcess
|
||||
from typing import Optional
|
||||
|
||||
|
||||
def confirm_cmd(cmd: list[str], color='green', default=True, msg='Really execute fastboot cmd?') -> bool:
|
||||
return click.confirm(
|
||||
f'{click.style(msg, fg=color, bold=True)} {" ".join(cmd)}',
|
||||
default=default,
|
||||
abort=False,
|
||||
)
|
||||
|
||||
|
||||
def fastboot_erase(target: str, confirm: bool = False):
|
||||
if not target:
|
||||
raise Exception(f"No fastboot erase target specified: {repr(target)}")
|
||||
cmd = [
|
||||
'fastboot',
|
||||
'erase',
|
||||
target,
|
||||
]
|
||||
if confirm:
|
||||
if not confirm_cmd(cmd, msg=f'Really erase fastboot "{target}" partition?', color='yellow'):
|
||||
raise Exception("user aborted")
|
||||
logging.info(f"Fastboot: Erasing {target}")
|
||||
run_cmd(
|
||||
cmd,
|
||||
capture_output=True,
|
||||
)
|
||||
|
||||
|
||||
def fastboot_flash(partition: str, file: str, sparse_size: Optional[str] = None, confirm: bool = False):
|
||||
cmd = [
|
||||
'fastboot',
|
||||
*(['-S', sparse_size] if sparse_size is not None else []),
|
||||
'flash',
|
||||
partition,
|
||||
file,
|
||||
]
|
||||
if confirm:
|
||||
if not confirm_cmd(cmd):
|
||||
raise Exception("user aborted")
|
||||
logging.info(f"Fastboot: Flashing {file} to {partition}")
|
||||
result = run_cmd(cmd)
|
||||
assert isinstance(result, CompletedProcess)
|
||||
if result.returncode != 0:
|
||||
raise Exception(f'Failed to flash {file}')
|
||||
|
||||
|
||||
def fastboot_boot(file, confirm: bool = False):
|
||||
cmd = [
|
||||
'fastboot',
|
||||
'boot',
|
||||
file,
|
||||
]
|
||||
if confirm:
|
||||
if not confirm_cmd(cmd):
|
||||
raise Exception("user aborted")
|
||||
logging.info(f"Fastboot: booting {file}")
|
||||
result = run_cmd(cmd)
|
||||
assert isinstance(result, CompletedProcess)
|
||||
if result.returncode != 0:
|
||||
raise Exception(f'Failed to boot {file} using fastboot')
|
||||
150
image/flash.py
150
image/flash.py
@@ -1,150 +0,0 @@
|
||||
import shutil
|
||||
import os
|
||||
import click
|
||||
import logging
|
||||
|
||||
from typing import Optional
|
||||
|
||||
from constants import FLASH_PARTS, LOCATIONS, FASTBOOT, JUMPDRIVE
|
||||
from exec.cmd import run_root_cmd
|
||||
from exec.file import get_temp_dir
|
||||
from devices.device import get_profile_device
|
||||
from flavours.flavour import get_profile_flavour
|
||||
from flavours.cli import profile_option
|
||||
from wrapper import enforce_wrap
|
||||
|
||||
from .fastboot import fastboot_flash
|
||||
from .image import dd_image, dump_aboot, dump_lk2nd, dump_qhypstub, get_image_path, losetup_destroy, losetup_rootfs_image, partprobe, shrink_fs
|
||||
|
||||
ABOOT = FLASH_PARTS['ABOOT']
|
||||
LK2ND = FLASH_PARTS['LK2ND']
|
||||
QHYPSTUB = FLASH_PARTS['QHYPSTUB']
|
||||
FULL_IMG = FLASH_PARTS['FULL']
|
||||
|
||||
DD = 'dd'
|
||||
|
||||
FLASH_METHODS = [FASTBOOT, JUMPDRIVE, DD]
|
||||
|
||||
|
||||
def find_jumpdrive(location: str) -> str:
|
||||
if location not in LOCATIONS:
|
||||
raise Exception(f'Invalid location {location}. Choose one of {", ".join(LOCATIONS)}')
|
||||
dir = '/dev/disk/by-id'
|
||||
for file in os.listdir(dir):
|
||||
sanitized_file = file.replace('-', '').replace('_', '').lower()
|
||||
if f'jumpdrive{location.split("-")[0]}' in sanitized_file:
|
||||
return os.path.realpath(os.path.join(dir, file))
|
||||
raise Exception('Unable to discover Jumpdrive')
|
||||
|
||||
|
||||
def test_blockdev(path: str):
|
||||
partprobe(path)
|
||||
result = run_root_cmd(['lsblk', path, '-o', 'SIZE'], capture_output=True)
|
||||
if result.returncode != 0:
|
||||
raise Exception(f'Failed to lsblk {path}')
|
||||
if result.stdout == b'SIZE\n 0B\n':
|
||||
raise Exception(f'Disk {path} has a size of 0B. That probably means it is not available (e.g. no'
|
||||
'microSD inserted or no microSD card slot installed in the device) or corrupt or defect')
|
||||
|
||||
|
||||
def prepare_minimal_image(source_path: str, sector_size: int) -> str:
|
||||
minimal_image_dir = get_temp_dir(register_cleanup=True)
|
||||
minimal_image_path = os.path.join(minimal_image_dir, f'minimal-{os.path.basename(source_path)}')
|
||||
logging.info(f"Copying image {os.path.basename(source_path)} to {minimal_image_dir} for shrinking")
|
||||
shutil.copyfile(source_path, minimal_image_path)
|
||||
|
||||
loop_device = losetup_rootfs_image(minimal_image_path, sector_size)
|
||||
partprobe(loop_device)
|
||||
shrink_fs(loop_device, minimal_image_path, sector_size)
|
||||
losetup_destroy(loop_device)
|
||||
return minimal_image_path
|
||||
|
||||
|
||||
@click.command(name='flash')
|
||||
@profile_option
|
||||
@click.option('-m', '--method', type=click.Choice(FLASH_METHODS))
|
||||
@click.option('--split-size', help='Chunk size when splitting the image into sparse files via fastboot')
|
||||
@click.option('--shrink/--no-shrink', is_flag=True, default=True, help="Copy and shrink the image file to minimal size")
|
||||
@click.option('-b', '--sector-size', type=int, help="Override the device's sector size", default=None)
|
||||
@click.option('--confirm', is_flag=True, help="Ask for confirmation before executing fastboot commands")
|
||||
@click.argument('what', type=click.Choice(list(FLASH_PARTS.values())))
|
||||
@click.argument('location', type=str, required=False)
|
||||
def cmd_flash(
|
||||
what: str,
|
||||
location: str,
|
||||
method: Optional[str] = None,
|
||||
split_size: Optional[str] = None,
|
||||
profile: Optional[str] = None,
|
||||
shrink: bool = True,
|
||||
sector_size: Optional[int] = None,
|
||||
confirm: bool = False,
|
||||
):
|
||||
"""
|
||||
Flash a partition onto a device.
|
||||
|
||||
The syntax of LOCATION depends on the flashing method and is usually only required for flashing "full":
|
||||
|
||||
\b
|
||||
- fastboot: the regular fastboot partition identifier. Usually "userdata"
|
||||
- dd: a path to a block device
|
||||
- jumpdrive: one of "emmc", "sdcard" or a path to a block device
|
||||
"""
|
||||
enforce_wrap()
|
||||
device = get_profile_device(profile)
|
||||
flavour = get_profile_flavour(profile).name
|
||||
device_image_path = get_image_path(device, flavour)
|
||||
|
||||
deviceinfo = device.parse_deviceinfo()
|
||||
sector_size = sector_size or device.get_image_sectorsize_default()
|
||||
method = method or deviceinfo.flash_method
|
||||
|
||||
if what not in FLASH_PARTS.values():
|
||||
raise Exception(f'Unknown what "{what}", must be one of {", ".join(FLASH_PARTS.values())}')
|
||||
|
||||
if location and location.startswith('aboot'):
|
||||
raise Exception("You're trying to flash something "
|
||||
f"to your aboot partition ({location!r}), "
|
||||
"which contains the android bootloader itself.\n"
|
||||
"This will brick your phone and is not what you want.\n"
|
||||
'Aborting.\nDid you mean to flash to "boot"?')
|
||||
|
||||
if what == FULL_IMG:
|
||||
path = ''
|
||||
if method not in FLASH_METHODS:
|
||||
raise Exception(f"Flash method {method} not supported!")
|
||||
if not location:
|
||||
raise Exception(f'You need to specify a location to flash {what} to')
|
||||
path = ''
|
||||
image_path = prepare_minimal_image(device_image_path, sector_size) if shrink else device_image_path
|
||||
if method == FASTBOOT:
|
||||
fastboot_flash(
|
||||
partition=location,
|
||||
file=image_path,
|
||||
sparse_size=split_size if split_size is not None else '100M',
|
||||
confirm=confirm,
|
||||
)
|
||||
elif method in [JUMPDRIVE, DD]:
|
||||
if method == DD or location.startswith("/") or (location not in LOCATIONS and os.path.exists(location)):
|
||||
path = location
|
||||
elif method == JUMPDRIVE:
|
||||
path = find_jumpdrive(location)
|
||||
test_blockdev(path)
|
||||
if dd_image(input=image_path, output=path).returncode != 0:
|
||||
raise Exception(f'Failed to flash {image_path} to {path}')
|
||||
else:
|
||||
raise Exception(f'Unhandled flash method "{method}" for "{what}"')
|
||||
else:
|
||||
if method and method != FASTBOOT:
|
||||
raise Exception(f'Flashing "{what}" with method "{method}" not supported, try no parameter or "{FASTBOOT}"')
|
||||
loop_device = losetup_rootfs_image(device_image_path, sector_size)
|
||||
if what == ABOOT:
|
||||
path = dump_aboot(f'{loop_device}p1')
|
||||
fastboot_flash(location or 'boot', path, confirm=confirm)
|
||||
elif what == LK2ND:
|
||||
path = dump_lk2nd(f'{loop_device}p1')
|
||||
fastboot_flash(location or 'lk2nd', path, confirm=confirm)
|
||||
elif what == QHYPSTUB:
|
||||
path = dump_qhypstub(f'{loop_device}p1')
|
||||
fastboot_flash(location or 'qhypstub', path, confirm=confirm)
|
||||
else:
|
||||
raise Exception(f'Unknown what "{what}", this must be a bug in kupferbootstrap!')
|
||||
@@ -1,100 +0,0 @@
|
||||
import click
|
||||
import os
|
||||
import pytest
|
||||
|
||||
from glob import glob
|
||||
from subprocess import CompletedProcess
|
||||
|
||||
from config.state import config, CONFIG_DEFAULTS
|
||||
from constants import SRCINFO_METADATA_FILE
|
||||
from exec.cmd import run_cmd
|
||||
from exec.file import get_temp_dir
|
||||
from logger import setup_logging
|
||||
from packages.cli import SRCINFO_CACHE_FILES, cmd_build, cmd_clean, cmd_init, cmd_update
|
||||
from utils import git_get_branch
|
||||
|
||||
tempdir = None
|
||||
config.try_load_file()
|
||||
setup_logging(True)
|
||||
|
||||
PKG_TEST_PATH = 'device/device-sdm845-oneplus-enchilada'
|
||||
PKG_TEST_NAME = 'device-sdm845-xiaomi-beryllium-ebbg'
|
||||
|
||||
|
||||
@pytest.fixture()
|
||||
def ctx() -> click.Context:
|
||||
global tempdir
|
||||
if not tempdir:
|
||||
tempdir = get_temp_dir()
|
||||
if not os.environ.get('INTEGRATION_TESTS_USE_GLOBAL_CONFIG', 'false').lower() == 'true':
|
||||
config.file.paths.update(CONFIG_DEFAULTS.paths | {'cache_dir': tempdir})
|
||||
config_path = os.path.join(tempdir, 'kupferbootstrap.toml')
|
||||
config.runtime.config_file = config_path
|
||||
if not os.path.exists(config_path):
|
||||
config.write()
|
||||
config.try_load_file(config_path)
|
||||
print(f'cache_dir: {config.file.paths.cache_dir}')
|
||||
return click.Context(click.Command('integration_tests'))
|
||||
|
||||
|
||||
def test_main_import():
|
||||
from main import cli
|
||||
assert cli
|
||||
|
||||
|
||||
def test_config_load(ctx: click.Context):
|
||||
path = config.runtime.config_file
|
||||
assert path
|
||||
assert path.startswith('/tmp/')
|
||||
assert os.path.exists(path)
|
||||
config.enforce_config_loaded()
|
||||
|
||||
|
||||
def test_packages_update(ctx: click.Context):
|
||||
pkgbuilds_path = config.get_path('pkgbuilds')
|
||||
kbs_branch = git_get_branch(config.runtime.script_source_dir)
|
||||
# Gitlab CI integration: the CI checks out a detached commit, branch comes back empty.
|
||||
if not kbs_branch and os.environ.get('CI', 'false') == 'true':
|
||||
kbs_branch = os.environ.get('CI_COMMIT_BRANCH', '')
|
||||
branches: dict[str, bool] = {'main': False, 'dev': False}
|
||||
if kbs_branch:
|
||||
branches[kbs_branch] = True
|
||||
for branch, may_fail in branches.items():
|
||||
config.file.pkgbuilds.git_branch = branch
|
||||
try:
|
||||
ctx.invoke(cmd_init, update=True, non_interactive=True, switch_branch=True, discard_changes=True, init_caches=False)
|
||||
except Exception as ex:
|
||||
print(f'may_fail: {may_fail}; Exception: {ex}')
|
||||
if not may_fail:
|
||||
raise ex
|
||||
# check branch really doesn't exist
|
||||
res = run_cmd(f"git ls-remote {CONFIG_DEFAULTS.pkgbuilds.git_repo} 'refs/heads/*' | grep 'refs/heads/{branch}'")
|
||||
assert isinstance(res, CompletedProcess)
|
||||
assert res.returncode != 0
|
||||
continue
|
||||
assert git_get_branch(pkgbuilds_path) == branch
|
||||
|
||||
|
||||
def test_packages_clean(ctx: click.Context):
|
||||
if not glob(os.path.join(config.get_path('pkgbuilds'), '*', '*', SRCINFO_METADATA_FILE)):
|
||||
ctx.invoke(cmd_update, non_interactive=True)
|
||||
ctx.invoke(cmd_clean, what=['git'], force=True)
|
||||
|
||||
|
||||
def test_packages_cache_init(ctx: click.Context):
|
||||
ctx.invoke(cmd_update, non_interactive=True, switch_branch=False, discard_changes=False, init_caches=True)
|
||||
|
||||
for f in SRCINFO_CACHE_FILES:
|
||||
assert os.path.exists(os.path.join(config.get_path('pkgbuilds'), PKG_TEST_PATH, f))
|
||||
|
||||
|
||||
def build_pkgs(_ctx: click.Context, query: list[str], arch: str = 'aarch64', **kwargs):
|
||||
_ctx.invoke(cmd_build, paths=query, arch=arch, **kwargs)
|
||||
|
||||
|
||||
def test_packages_build_by_path(ctx: click.Context):
|
||||
build_pkgs(ctx, [PKG_TEST_PATH], force=True)
|
||||
|
||||
|
||||
def test_split_package_build_by_name(ctx: click.Context):
|
||||
build_pkgs(ctx, [PKG_TEST_NAME])
|
||||
@@ -1 +0,0 @@
|
||||
../../wrapper_su_helper.py
|
||||
32
local/update-pacman-files.sh
Executable file
32
local/update-pacman-files.sh
Executable file
@@ -0,0 +1,32 @@
|
||||
#!/bin/sh
|
||||
set -e
|
||||
|
||||
wget https://raw.githubusercontent.com/archlinuxarm/PKGBUILDs/master/core/pacman/makepkg.conf -O etc/makepkg.conf
|
||||
sed -i "s/@CARCH@/aarch64/g" etc/makepkg.conf
|
||||
sed -i "s/@CHOST@/aarch64-unknown-linux-gnu/g" etc/makepkg.conf
|
||||
sed -i "s/@CARCHFLAGS@/-march=armv8-a /g" etc/makepkg.conf
|
||||
sed -i "s/xz /xz -T0 /g" etc/makepkg.conf
|
||||
sed -i "s/ check / !check /g" etc/makepkg.conf
|
||||
chroot="/chroot/base_aarch64"
|
||||
include="-I\${CROOT}/usr/include -I$chroot/usr/include"
|
||||
lib_croot="\${CROOT}/lib"
|
||||
lib_chroot="$chroot/usr/lib"
|
||||
cat >>etc/makepkg.conf <<EOF
|
||||
|
||||
export CROOT="/usr/aarch64-linux-gnu"
|
||||
export ARCH="arm64"
|
||||
export CROSS_COMPILE="aarch64-linux-gnu-"
|
||||
export CC="aarch64-linux-gnu-gcc $include -L$lib_croot -L$lib_chroot"
|
||||
export CXX="aarch64-linux-gnu-g++ $include -L$lib_croot -L$lib_chroot"
|
||||
export CFLAGS="\$CFLAGS $include"
|
||||
export CXXFLAGS="\$CXXFLAGS $include"
|
||||
export LDFLAGS="\$LDFLAGS,-L$lib_croot,-L$lib_chroot,-rpath-link,$lib_croot,-rpath-link,$lib_chroot"
|
||||
export PACMAN_CHROOT="$chroot"
|
||||
EOF
|
||||
# TODO: Set PACKAGER
|
||||
wget https://raw.githubusercontent.com/archlinuxarm/PKGBUILDs/master/core/pacman/pacman.conf -O etc/pacman.conf
|
||||
sed -i "s/@CARCH@/aarch64/g" etc/pacman.conf
|
||||
sed -i "s/#ParallelDownloads.*/ParallelDownloads = 8/g" etc/pacman.conf
|
||||
sed -i "s/SigLevel.*/SigLevel = Never/g" etc/pacman.conf
|
||||
sed -i "s/^CheckSpace/#CheckSpace/g" etc/pacman.conf
|
||||
sed -i "s|Include = /etc/pacman.d/mirrorlist|Server = http://mirror.archlinuxarm.org/\$arch/\$repo|g" etc/pacman.conf
|
||||
27
logger.py
27
logger.py
@@ -3,13 +3,11 @@ import coloredlogs
|
||||
import logging
|
||||
import sys
|
||||
|
||||
from typing import Optional
|
||||
|
||||
|
||||
def setup_logging(verbose: bool, quiet: bool = False, force_colors: Optional[bool] = None, log_setup: bool = True):
|
||||
def setup_logging(verbose: bool, log_setup: bool = True):
|
||||
level_colors = coloredlogs.DEFAULT_LEVEL_STYLES | {'info': {'color': 'magenta', 'bright': True}, 'debug': {'color': 'blue', 'bright': True}}
|
||||
field_colors = coloredlogs.DEFAULT_FIELD_STYLES | {'asctime': {'color': 'white', 'faint': True}}
|
||||
level = logging.DEBUG if verbose and not quiet else (logging.INFO if not quiet else logging.ERROR)
|
||||
level = logging.DEBUG if verbose else logging.INFO
|
||||
coloredlogs.install(
|
||||
stream=sys.stdout,
|
||||
fmt='%(asctime)s %(levelname)s: %(message)s',
|
||||
@@ -17,14 +15,9 @@ def setup_logging(verbose: bool, quiet: bool = False, force_colors: Optional[boo
|
||||
level=level,
|
||||
level_styles=level_colors,
|
||||
field_styles=field_colors,
|
||||
isatty=force_colors,
|
||||
)
|
||||
# don't raise Exceptions when e.g. output stream is closed
|
||||
logging.raiseExceptions = False
|
||||
if log_setup:
|
||||
logging.debug('Logger: Logging set up.')
|
||||
if force_colors is not None:
|
||||
logging.debug(f'Logger: Force-{"en" if force_colors else "dis"}abled colors')
|
||||
logging.debug('Logging set up.')
|
||||
|
||||
|
||||
verbose_option = click.option(
|
||||
@@ -33,17 +26,3 @@ verbose_option = click.option(
|
||||
is_flag=True,
|
||||
help='Enables verbose logging',
|
||||
)
|
||||
|
||||
quiet_option = click.option(
|
||||
'-q',
|
||||
'--quiet',
|
||||
is_flag=True,
|
||||
help='Disable most logging, only log errors. (Currently only affects KBS logging, not called subprograms)',
|
||||
)
|
||||
|
||||
color_option = click.option(
|
||||
'--force-colors/--no-colors',
|
||||
is_flag=True,
|
||||
default=None,
|
||||
help='Force enable/disable log coloring. Defaults to autodetection.',
|
||||
)
|
||||
|
||||
88
main.py
88
main.py
@@ -3,54 +3,35 @@
|
||||
import click
|
||||
import subprocess
|
||||
|
||||
from os import isatty
|
||||
from traceback import format_exc, format_exception_only, format_tb
|
||||
from traceback import format_exc as get_trace
|
||||
from typing import Optional
|
||||
|
||||
from logger import color_option, logging, quiet_option, setup_logging, verbose_option
|
||||
from wrapper import get_wrapper_type, enforce_wrap, nowrapper_option
|
||||
from progressbar import progress_bars_option
|
||||
|
||||
from binfmt.cli import cmd_binfmt
|
||||
from config.cli import config, config_option, cmd_config
|
||||
from packages.cli import cmd_packages
|
||||
from flavours.cli import cmd_flavours
|
||||
from devices.cli import cmd_devices
|
||||
from net.cli import cmd_net
|
||||
from chroot.cli import cmd_chroot
|
||||
from cache.cli import cmd_cache
|
||||
from image.cli import cmd_image
|
||||
from logger import logging, setup_logging, verbose_option
|
||||
from wrapper import nowrapper_option, enforce_wrap
|
||||
from config import config, config_option, cmd_config
|
||||
from forwarding import cmd_forwarding
|
||||
from packages import cmd_packages
|
||||
from telnet import cmd_telnet
|
||||
from chroot import cmd_chroot
|
||||
from cache import cmd_cache
|
||||
from image import cmd_image
|
||||
from boot import cmd_boot
|
||||
from flash import cmd_flash
|
||||
from ssh import cmd_ssh
|
||||
|
||||
|
||||
@click.group()
|
||||
@click.option('--error-shell', '-E', 'error_shell', is_flag=True, default=False, help='Spawn shell after error occurs')
|
||||
@verbose_option
|
||||
@quiet_option
|
||||
@config_option
|
||||
@nowrapper_option
|
||||
@color_option
|
||||
@progress_bars_option
|
||||
def cli(
|
||||
verbose: bool = False,
|
||||
quiet: bool = False,
|
||||
config_file: Optional[str] = None,
|
||||
wrapper_override: Optional[bool] = None,
|
||||
error_shell: bool = False,
|
||||
force_colors: Optional[bool] = None,
|
||||
force_progress_bars: Optional[bool] = None,
|
||||
):
|
||||
setup_logging(verbose, quiet=quiet, force_colors=force_colors)
|
||||
# stdout is fd 1
|
||||
config.runtime.colors = isatty(1) if force_colors is None else force_colors
|
||||
config.runtime.verbose = verbose
|
||||
config.runtime.progress_bars = force_progress_bars
|
||||
config.runtime.no_wrap = wrapper_override is False
|
||||
config.runtime.error_shell = error_shell
|
||||
def cli(verbose: bool = False, config_file: str = None, wrapper_override: Optional[bool] = None, error_shell: bool = False):
|
||||
setup_logging(verbose)
|
||||
config.runtime['verbose'] = verbose
|
||||
config.runtime['no_wrap'] = wrapper_override is False
|
||||
config.runtime['error_shell'] = error_shell
|
||||
config.try_load_file(config_file)
|
||||
if config.file_state.exception:
|
||||
logging.warning(f"Config file couldn't be loaded: {config.file_state.exception}")
|
||||
if wrapper_override:
|
||||
logging.info(f'Force-wrapping in wrapper-type: "{get_wrapper_type()}"!')
|
||||
enforce_wrap()
|
||||
|
||||
|
||||
@@ -58,35 +39,26 @@ def main():
|
||||
try:
|
||||
return cli(prog_name='kupferbootstrap')
|
||||
except Exception as ex:
|
||||
if config.runtime.verbose:
|
||||
msg = format_exc()
|
||||
if config.runtime['verbose']:
|
||||
logging.fatal(get_trace())
|
||||
else:
|
||||
tb_start = ''.join(format_tb(ex.__traceback__, limit=1)).strip('\n')
|
||||
tb_end = ''.join(format_tb(ex.__traceback__, limit=-1)).strip('\n')
|
||||
short_tb = [
|
||||
'Traceback (most recent call last):',
|
||||
tb_start,
|
||||
'[...]',
|
||||
tb_end,
|
||||
format_exception_only(ex)[-1], # type: ignore[arg-type]
|
||||
]
|
||||
msg = '\n'.join(short_tb)
|
||||
logging.fatal('\n' + msg)
|
||||
if config.runtime.error_shell:
|
||||
logging.fatal(ex)
|
||||
if config.runtime['error_shell']:
|
||||
logging.info('Starting error shell. Type exit to quit.')
|
||||
subprocess.call('/bin/bash')
|
||||
exit(1)
|
||||
|
||||
|
||||
cli.add_command(cmd_binfmt)
|
||||
cli.add_command(cmd_cache)
|
||||
cli.add_command(cmd_chroot)
|
||||
cli.add_command(cmd_config)
|
||||
cli.add_command(cmd_devices)
|
||||
cli.add_command(cmd_flavours)
|
||||
cli.add_command(cmd_image)
|
||||
cli.add_command(cmd_net)
|
||||
cli.add_command(cmd_cache)
|
||||
cli.add_command(cmd_packages)
|
||||
cli.add_command(cmd_image)
|
||||
cli.add_command(cmd_boot)
|
||||
cli.add_command(cmd_flash)
|
||||
cli.add_command(cmd_ssh)
|
||||
cli.add_command(cmd_forwarding)
|
||||
cli.add_command(cmd_telnet)
|
||||
cli.add_command(cmd_chroot)
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
|
||||
@@ -1,9 +0,0 @@
|
||||
import click
|
||||
|
||||
from .forwarding import cmd_forwarding
|
||||
from .ssh import cmd_ssh
|
||||
from .telnet import cmd_telnet
|
||||
|
||||
cmd_net = click.Group('net', help='Network utilities like ssh and telnet')
|
||||
for cmd in cmd_forwarding, cmd_ssh, cmd_telnet:
|
||||
cmd_net.add_command(cmd)
|
||||
1004
packages/__init__.py
1004
packages/__init__.py
File diff suppressed because it is too large
Load Diff
@@ -1,875 +0,0 @@
|
||||
import logging
|
||||
import multiprocessing
|
||||
import os
|
||||
import shutil
|
||||
import subprocess
|
||||
import sys
|
||||
|
||||
from copy import deepcopy
|
||||
from urllib.error import HTTPError
|
||||
from typing import Iterable, Iterator, Optional
|
||||
|
||||
from binfmt.binfmt import binfmt_is_registered, binfmt_register
|
||||
from constants import CROSSDIRECT_PKGS, QEMU_BINFMT_PKGS, GCC_HOSTSPECS, ARCHES, Arch, CHROOT_PATHS, MAKEPKG_CMD
|
||||
from config.state import config
|
||||
from exec.cmd import run_cmd, run_root_cmd
|
||||
from exec.file import makedir, remove_file, symlink
|
||||
from chroot.build import get_build_chroot, BuildChroot
|
||||
from distro.distro import get_kupfer_https, get_kupfer_local, get_kupfer_repo_names
|
||||
from distro.package import RemotePackage, LocalPackage
|
||||
from distro.repo import LocalRepo
|
||||
from progressbar import BAR_PADDING, get_levels_bar
|
||||
from wrapper import check_programs_wrap, is_wrapped
|
||||
from utils import ellipsize, sha256sum
|
||||
|
||||
from .pkgbuild import discover_pkgbuilds, filter_pkgbuilds, Pkgbase, Pkgbuild, SubPkgbuild
|
||||
|
||||
pacman_cmd = [
|
||||
'pacman',
|
||||
'-Syuu',
|
||||
'--noconfirm',
|
||||
'--overwrite=*',
|
||||
'--needed',
|
||||
]
|
||||
|
||||
|
||||
def get_makepkg_env(arch: Optional[Arch] = None):
|
||||
# has to be a function because calls to `config` must be done after config file was read
|
||||
threads = config.file.build.threads or multiprocessing.cpu_count()
|
||||
# env = {key: val for key, val in os.environ.items() if not key.split('_', maxsplit=1)[0] in ['CI', 'GITLAB', 'FF']}
|
||||
env = {
|
||||
'LANG': 'C',
|
||||
'CARGO_BUILD_JOBS': str(threads),
|
||||
'MAKEFLAGS': f"-j{threads}",
|
||||
'PATH': '/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin',
|
||||
}
|
||||
native = config.runtime.arch
|
||||
assert native
|
||||
if arch and arch != native:
|
||||
env |= {'QEMU_LD_PREFIX': f'/usr/{GCC_HOSTSPECS[native][arch]}'}
|
||||
return env
|
||||
|
||||
|
||||
def init_local_repo(repo: str, arch: Arch):
|
||||
repo_dir = os.path.join(config.get_package_dir(arch), repo)
|
||||
if not os.path.exists(repo_dir):
|
||||
logging.info(f'Creating local repo "{repo}" ({arch})')
|
||||
makedir(repo_dir)
|
||||
for ext in ['db', 'files']:
|
||||
filename_stripped = f'{repo}.{ext}'
|
||||
filename = f'{filename_stripped}.tar.xz'
|
||||
if not os.path.exists(os.path.join(repo_dir, filename)):
|
||||
logging.info(f'Initialising local repo {f"{ext} " if ext != "db" else ""}db for repo "{repo}" ({arch})')
|
||||
result = run_cmd(
|
||||
[
|
||||
'tar',
|
||||
'-czf',
|
||||
filename,
|
||||
'-T',
|
||||
'/dev/null',
|
||||
],
|
||||
cwd=os.path.join(repo_dir),
|
||||
)
|
||||
assert isinstance(result, subprocess.CompletedProcess)
|
||||
if result.returncode != 0:
|
||||
raise Exception(f'Failed to create local repo "{repo}"')
|
||||
symlink_path = os.path.join(repo_dir, filename_stripped)
|
||||
if not os.path.islink(symlink_path):
|
||||
if os.path.exists(symlink_path):
|
||||
remove_file(symlink_path)
|
||||
symlink(filename, symlink_path)
|
||||
|
||||
|
||||
def init_prebuilts(arch: Arch):
|
||||
"""Ensure that all `constants.REPOSITORIES` inside `dir` exist"""
|
||||
prebuilts_dir = config.get_path('packages')
|
||||
makedir(prebuilts_dir)
|
||||
for repo in get_kupfer_repo_names(local=True):
|
||||
init_local_repo(repo, arch)
|
||||
|
||||
|
||||
def generate_dependency_chain(package_repo: dict[str, Pkgbuild], to_build: Iterable[Pkgbuild]) -> list[set[Pkgbuild]]:
|
||||
"""
|
||||
This figures out all dependencies and their sub-dependencies for the selection and adds those packages to the selection.
|
||||
First the top-level packages get selected by searching the paths.
|
||||
Then their dependencies and sub-dependencies and so on get added to the selection.
|
||||
"""
|
||||
visited = set[Pkgbuild]()
|
||||
visited_names = set[str]()
|
||||
dep_levels: list[set[Pkgbuild]] = [set(), set()]
|
||||
|
||||
def visit(package: Pkgbuild, visited=visited, visited_names=visited_names):
|
||||
visited.add(package)
|
||||
visited_names.update(package.names())
|
||||
|
||||
def join_levels(levels: list[set[Pkgbuild]]) -> dict[Pkgbuild, int]:
|
||||
result = dict[Pkgbuild, int]()
|
||||
for i, level in enumerate(levels):
|
||||
for pkg in level:
|
||||
result[pkg] = i
|
||||
return result
|
||||
|
||||
def get_dependencies(package: Pkgbuild, package_repo: dict[str, Pkgbuild] = package_repo) -> Iterator[Pkgbuild]:
|
||||
for dep_name in package.depends:
|
||||
if dep_name in visited_names:
|
||||
continue
|
||||
elif dep_name in package_repo:
|
||||
dep_pkg = package_repo[dep_name]
|
||||
visit(dep_pkg)
|
||||
yield dep_pkg
|
||||
|
||||
def get_recursive_dependencies(package: Pkgbuild, package_repo: dict[str, Pkgbuild] = package_repo) -> Iterator[Pkgbuild]:
|
||||
for pkg in get_dependencies(package, package_repo):
|
||||
yield pkg
|
||||
for sub_pkg in get_recursive_dependencies(pkg, package_repo):
|
||||
yield sub_pkg
|
||||
|
||||
logging.debug('Generating dependency chain:')
|
||||
# init level 0
|
||||
for package in to_build:
|
||||
visit(package)
|
||||
dep_levels[0].add(package)
|
||||
logging.debug(f'Adding requested package {package.name}')
|
||||
# add dependencies of our requested builds to level 0
|
||||
for dep_pkg in get_recursive_dependencies(package):
|
||||
logging.debug(f"Adding {package.name}'s dependency {dep_pkg.name} to level 0")
|
||||
dep_levels[0].add(dep_pkg)
|
||||
visit(dep_pkg)
|
||||
"""
|
||||
Starting with `level` = 0, iterate over the packages in `dep_levels[level]`:
|
||||
1. Moving packages that are dependencies of other packages up to `level`+1
|
||||
2. Adding yet unadded local dependencies of all pkgs on `level` to `level`+1
|
||||
3. increment level
|
||||
"""
|
||||
level = 0
|
||||
# protect against dependency cycles
|
||||
repeat_count = 0
|
||||
_last_level: Optional[set[Pkgbuild]] = None
|
||||
while dep_levels[level]:
|
||||
level_copy = dep_levels[level].copy()
|
||||
modified = False
|
||||
logging.debug(f'Scanning dependency level {level}')
|
||||
if level > 100:
|
||||
raise Exception('Dependency chain reached 100 levels depth, this is probably a bug. Aborting!')
|
||||
|
||||
for pkg in level_copy:
|
||||
pkg_done = False
|
||||
if pkg not in dep_levels[level]:
|
||||
# pkg has been moved, move on
|
||||
continue
|
||||
# move pkg to level+1 if something else depends on it
|
||||
for other_pkg in level_copy:
|
||||
if pkg == other_pkg:
|
||||
continue
|
||||
if pkg_done:
|
||||
break
|
||||
if not issubclass(type(other_pkg), Pkgbuild):
|
||||
raise Exception('Not a Pkgbuild object:' + repr(other_pkg))
|
||||
for dep_name in other_pkg.depends:
|
||||
if dep_name in pkg.names():
|
||||
dep_levels[level].remove(pkg)
|
||||
dep_levels[level + 1].add(pkg)
|
||||
logging.debug(f'Moving {pkg.name} to level {level+1} because {other_pkg.name} depends on it as {dep_name}')
|
||||
modified = True
|
||||
pkg_done = True
|
||||
break
|
||||
for dep_name in pkg.depends:
|
||||
if dep_name in visited_names:
|
||||
continue
|
||||
elif dep_name in package_repo:
|
||||
dep_pkg = package_repo[dep_name]
|
||||
logging.debug(f"Adding {pkg.name}'s dependency {dep_name} to level {level}")
|
||||
dep_levels[level].add(dep_pkg)
|
||||
visit(dep_pkg)
|
||||
modified = True
|
||||
|
||||
if _last_level == dep_levels[level]:
|
||||
repeat_count += 1
|
||||
else:
|
||||
repeat_count = 0
|
||||
if repeat_count > 10:
|
||||
raise Exception(f'Probable dependency cycle detected: Level has been passed on unmodifed multiple times: #{level}: {_last_level}')
|
||||
_last_level = dep_levels[level].copy()
|
||||
if not modified: # if the level was modified, make another pass.
|
||||
level += 1
|
||||
dep_levels.append(set[Pkgbuild]())
|
||||
# reverse level list into buildorder (deps first!), prune empty levels
|
||||
return list([lvl for lvl in dep_levels[::-1] if lvl])
|
||||
|
||||
|
||||
def add_file_to_repo(file_path: str, repo_name: str, arch: Arch, remove_original: bool = True):
|
||||
check_programs_wrap(['repo-add'])
|
||||
repo_dir = os.path.join(config.get_package_dir(arch), repo_name)
|
||||
pacman_cache_dir = os.path.join(config.get_path('pacman'), arch)
|
||||
file_name = os.path.basename(file_path)
|
||||
target_file = os.path.join(repo_dir, file_name)
|
||||
|
||||
init_local_repo(repo_name, arch)
|
||||
if file_path != target_file:
|
||||
logging.debug(f'moving {file_path} to {target_file} ({repo_dir})')
|
||||
shutil.copy(
|
||||
file_path,
|
||||
repo_dir,
|
||||
)
|
||||
if remove_original:
|
||||
remove_file(file_path)
|
||||
|
||||
# clean up same name package from pacman cache
|
||||
cache_file = os.path.join(pacman_cache_dir, file_name)
|
||||
if os.path.exists(cache_file):
|
||||
logging.debug(f"Removing cached package file {cache_file}")
|
||||
remove_file(cache_file)
|
||||
cmd = [
|
||||
'repo-add',
|
||||
'--remove',
|
||||
os.path.join(
|
||||
repo_dir,
|
||||
f'{repo_name}.db.tar.xz',
|
||||
),
|
||||
target_file,
|
||||
]
|
||||
logging.debug(f'repo: running cmd: {cmd}')
|
||||
result = run_cmd(cmd, stderr=sys.stdout)
|
||||
assert isinstance(result, subprocess.CompletedProcess)
|
||||
if result.returncode != 0:
|
||||
raise Exception(f'Failed add package {target_file} to repo {repo_name}')
|
||||
for ext in ['db', 'files']:
|
||||
old = os.path.join(repo_dir, f'{repo_name}.{ext}.tar.xz.old')
|
||||
if os.path.exists(old):
|
||||
remove_file(old)
|
||||
|
||||
|
||||
def strip_compression_extension(filename: str):
|
||||
for ext in ['zst', 'xz', 'gz', 'bz2']:
|
||||
if filename.endswith(f'.pkg.tar.{ext}'):
|
||||
return filename[:-(len(ext) + 1)]
|
||||
logging.debug(f"file {filename} matches no known package extension")
|
||||
return filename
|
||||
|
||||
|
||||
def add_package_to_repo(package: Pkgbuild, arch: Arch):
|
||||
logging.info(f'Adding {package.path} to repo {package.repo}')
|
||||
pkgbuild_dir = os.path.join(config.get_path('pkgbuilds'), package.path)
|
||||
|
||||
files = []
|
||||
for file in os.listdir(pkgbuild_dir):
|
||||
# Forced extension by makepkg.conf
|
||||
pkgext = '.pkg.tar'
|
||||
if pkgext not in file:
|
||||
continue
|
||||
stripped_name = strip_compression_extension(file)
|
||||
if not stripped_name.endswith(pkgext):
|
||||
continue
|
||||
|
||||
repo_file = os.path.join(config.get_package_dir(arch), package.repo, file)
|
||||
files.append(repo_file)
|
||||
add_file_to_repo(os.path.join(pkgbuild_dir, file), package.repo, arch)
|
||||
|
||||
# copy any-arch packages to other repos as well
|
||||
if stripped_name.endswith(f'-any{pkgext}'):
|
||||
for repo_arch in ARCHES:
|
||||
if repo_arch == arch:
|
||||
continue # done already
|
||||
add_file_to_repo(repo_file, package.repo, repo_arch, remove_original=False)
|
||||
|
||||
return files
|
||||
|
||||
|
||||
def try_download_package(dest_file_path: str, package: Pkgbuild, arch: Arch) -> Optional[str]:
|
||||
filename = os.path.basename(dest_file_path)
|
||||
logging.debug(f"checking if we can download {filename}")
|
||||
pkgname = package.name
|
||||
repo_name = package.repo
|
||||
repos = get_kupfer_https(arch, scan=True).repos
|
||||
if repo_name not in repos:
|
||||
logging.warning(f"Repository {repo_name} is not a known HTTPS repo")
|
||||
return None
|
||||
repo = repos[repo_name]
|
||||
if pkgname not in repo.packages:
|
||||
logging.warning(f"Package {pkgname} not found in remote repos, building instead.")
|
||||
return None
|
||||
repo_pkg: RemotePackage = repo.packages[pkgname]
|
||||
if repo_pkg.version != package.version:
|
||||
logging.debug(f"Package {pkgname} versions differ: local: {package.version}, "
|
||||
f"remote: {repo_pkg.version}. Building instead.")
|
||||
return None
|
||||
if repo_pkg.filename != filename:
|
||||
versions_str = f"local: {filename}, remote: {repo_pkg.filename}"
|
||||
if strip_compression_extension(repo_pkg.filename) != strip_compression_extension(filename):
|
||||
logging.debug(f"package filenames don't match: {versions_str}")
|
||||
return None
|
||||
logging.debug(f"ignoring compression extension difference: {versions_str}")
|
||||
cache_file = os.path.join(config.get_path('pacman'), arch, repo_pkg.filename)
|
||||
if os.path.exists(cache_file):
|
||||
if not repo_pkg._desc or 'SHA256SUM' not in repo_pkg._desc:
|
||||
cache_matches = False
|
||||
extra_msg = ". However, we can't validate it, as the https repo doesnt provide a SHA256SUM for it."
|
||||
else:
|
||||
cache_matches = sha256sum(cache_file) == repo_pkg._desc['SHA256SUM']
|
||||
extra_msg = (". However its checksum doesn't match." if not cache_matches else " and its checksum matches.")
|
||||
logging.debug(f"While checking the HTTPS repo DB, we found a matching filename in the pacman cache{extra_msg}")
|
||||
if cache_matches:
|
||||
logging.info(f'copying cache file {cache_file} to repo as verified by remote checksum')
|
||||
shutil.copy(cache_file, dest_file_path)
|
||||
remove_file(cache_file)
|
||||
return dest_file_path
|
||||
url = repo_pkg.resolved_url
|
||||
assert url
|
||||
try:
|
||||
path = repo_pkg.acquire()
|
||||
assert os.path.exists(path)
|
||||
return path
|
||||
except HTTPError as e:
|
||||
if e.code == 404:
|
||||
logging.debug(f"remote package {filename} missing on server: {url}")
|
||||
else:
|
||||
logging.error(f"remote package {filename} failed to download ({e.code}): {url}: {e}")
|
||||
return None
|
||||
|
||||
|
||||
def check_package_version_built(
|
||||
package: Pkgbuild,
|
||||
arch: Arch,
|
||||
try_download: bool = False,
|
||||
refresh_sources: bool = False,
|
||||
) -> bool:
|
||||
logging.info(f"Checking if {package.name} is built for architecture {arch}")
|
||||
|
||||
if refresh_sources:
|
||||
setup_sources(package)
|
||||
|
||||
missing = True
|
||||
filename = package.get_filename(arch)
|
||||
filename_stripped = strip_compression_extension(filename)
|
||||
local_repo: Optional[LocalRepo] = None
|
||||
if not filename_stripped.endswith('.pkg.tar'):
|
||||
raise Exception(f'{package.name}: stripped filename has unknown extension. {filename}')
|
||||
logging.debug(f'Checking if {filename_stripped} is built')
|
||||
|
||||
any_arch = filename_stripped.endswith('any.pkg.tar')
|
||||
if any_arch:
|
||||
logging.debug(f"{package.name}: any-arch pkg detected")
|
||||
|
||||
init_prebuilts(arch)
|
||||
# check if DB entry exists and matches PKGBUILD
|
||||
try:
|
||||
local_distro = get_kupfer_local(arch, in_chroot=False, scan=True)
|
||||
if package.repo not in local_distro.repos:
|
||||
raise Exception(f"Repo {package.repo} not found locally")
|
||||
local_repo = local_distro.repos[package.repo]
|
||||
if not local_repo.scanned:
|
||||
local_repo.scan()
|
||||
if package.name not in local_repo.packages:
|
||||
raise Exception(f"Package '{package.name}' not found")
|
||||
binpkg: LocalPackage = local_repo.packages[package.name]
|
||||
if package.version != binpkg.version:
|
||||
raise Exception(f"Versions differ: PKGBUILD: {package.version}, Repo: {binpkg.version}")
|
||||
if binpkg.arch not in (['any'] if package.arches == ['any'] else [arch]):
|
||||
raise Exception(f"Wrong Architecture: {binpkg.arch}, requested: {arch}")
|
||||
assert binpkg.resolved_url
|
||||
filepath = binpkg.resolved_url.split('file://')[1]
|
||||
if filename_stripped != strip_compression_extension(binpkg.filename):
|
||||
raise Exception(f"Repo entry exists but the filename {binpkg.filename} doesn't match expected {filename_stripped}")
|
||||
if not os.path.exists(filepath):
|
||||
raise Exception(f"Repo entry exists but file {filepath} is missing from disk")
|
||||
assert binpkg._desc
|
||||
if 'SHA256SUM' not in binpkg._desc or not binpkg._desc['SHA256SUM']:
|
||||
raise Exception("Repo entry exists but has no checksum")
|
||||
if sha256sum(filepath) != binpkg._desc['SHA256SUM']:
|
||||
raise Exception("Repo entry exists but checksum doesn't match")
|
||||
missing = False
|
||||
file = filepath
|
||||
filename = binpkg.filename
|
||||
logging.debug(f"{filename} found in {package.repo}.db ({arch}) and checksum matches")
|
||||
except Exception as ex:
|
||||
logging.debug(f"Failed to search local repos for package {package.name}: {ex}")
|
||||
|
||||
# file might be in repo directory but not in DB or checksum mismatch
|
||||
for ext in ['xz', 'zst']:
|
||||
if not missing:
|
||||
break
|
||||
file = os.path.join(config.get_package_dir(arch), package.repo, f'{filename_stripped}.{ext}')
|
||||
if not os.path.exists(file):
|
||||
# look for 'any' arch packages in other repos
|
||||
if any_arch:
|
||||
target_repo_file = os.path.join(config.get_package_dir(arch), package.repo, filename)
|
||||
if os.path.exists(target_repo_file):
|
||||
file = target_repo_file
|
||||
missing = False
|
||||
else:
|
||||
# we have to check if another arch's repo holds our any-arch pkg
|
||||
for repo_arch in ARCHES:
|
||||
if repo_arch == arch:
|
||||
continue # we already checked that
|
||||
other_repo_file = os.path.join(config.get_package_dir(repo_arch), package.repo, filename)
|
||||
if os.path.exists(other_repo_file):
|
||||
logging.info(f"package {file} found in {repo_arch} repo, copying to {arch}")
|
||||
file = other_repo_file
|
||||
missing = False
|
||||
if try_download and missing:
|
||||
downloaded = try_download_package(file, package, arch)
|
||||
if downloaded:
|
||||
file = downloaded
|
||||
filename = os.path.basename(file)
|
||||
missing = False
|
||||
logging.info(f"Successfully downloaded {filename} from HTTPS mirror")
|
||||
if os.path.exists(file):
|
||||
missing = False
|
||||
add_file_to_repo(file, repo_name=package.repo, arch=arch, remove_original=False)
|
||||
assert local_repo
|
||||
local_repo.scan()
|
||||
# copy arch=(any) packages to all arches
|
||||
if any_arch and not missing:
|
||||
# copy to other arches if they don't have it
|
||||
for repo_arch in ARCHES:
|
||||
if repo_arch == arch:
|
||||
continue # we already have that
|
||||
copy_target = os.path.join(config.get_package_dir(repo_arch), package.repo, filename)
|
||||
if not os.path.exists(copy_target):
|
||||
logging.info(f"copying any-arch package {package.name} to {repo_arch} repo: {copy_target}")
|
||||
add_file_to_repo(file, package.repo, repo_arch, remove_original=False)
|
||||
other_repo = get_kupfer_local(repo_arch, in_chroot=False, scan=False).repos.get(package.repo, None)
|
||||
if other_repo and other_repo.scanned:
|
||||
other_repo.scan()
|
||||
return not missing
|
||||
|
||||
|
||||
def setup_build_chroot(
|
||||
arch: Arch,
|
||||
extra_packages: list[str] = [],
|
||||
add_kupfer_repos: bool = True,
|
||||
clean_chroot: bool = False,
|
||||
repo: Optional[dict[str, Pkgbuild]] = None,
|
||||
) -> BuildChroot:
|
||||
assert config.runtime.arch
|
||||
if arch != config.runtime.arch:
|
||||
build_enable_qemu_binfmt(arch, repo=repo or discover_pkgbuilds(), lazy=False)
|
||||
init_prebuilts(arch)
|
||||
chroot = get_build_chroot(arch, add_kupfer_repos=add_kupfer_repos)
|
||||
chroot.mount_packages()
|
||||
logging.debug(f'Initializing {arch} build chroot')
|
||||
chroot.initialize(reset=clean_chroot)
|
||||
chroot.write_pacman_conf() # in case it was initialized with different repos
|
||||
chroot.activate()
|
||||
chroot.mount_pacman_cache()
|
||||
chroot.mount_pkgbuilds()
|
||||
if extra_packages:
|
||||
chroot.try_install_packages(extra_packages, allow_fail=False)
|
||||
assert config.runtime.uid is not None
|
||||
chroot.create_user('kupfer', password='12345678', uid=config.runtime.uid, non_unique=True)
|
||||
if not os.path.exists(chroot.get_path('/etc/sudoers.d/kupfer_nopw')):
|
||||
chroot.add_sudo_config('kupfer_nopw', 'kupfer', password_required=False)
|
||||
|
||||
return chroot
|
||||
|
||||
|
||||
def setup_git_insecure_paths(chroot: BuildChroot, username: str = 'kupfer'):
|
||||
chroot.run_cmd(
|
||||
["git", "config", "--global", "--add", "safe.directory", "'*'"],
|
||||
switch_user=username,
|
||||
).check_returncode() # type: ignore[union-attr]
|
||||
|
||||
|
||||
def setup_sources(package: Pkgbuild, lazy: bool = True):
|
||||
cache = package.srcinfo_cache
|
||||
assert cache
|
||||
# catch cache._changed: if the PKGBUILD changed whatsoever, that's an indicator the sources might be changed
|
||||
if lazy and not cache._changed and cache.is_src_initialised():
|
||||
if cache.validate_checksums():
|
||||
logging.info(f"{package.path}: Sources already set up.")
|
||||
return
|
||||
makepkg_setup = MAKEPKG_CMD + [
|
||||
'--nodeps',
|
||||
'--nobuild',
|
||||
'--noprepare',
|
||||
'--skippgpcheck',
|
||||
]
|
||||
|
||||
logging.info(f'{package.path}: Getting build chroot for source setup')
|
||||
# we need to use a chroot here because makepkg symlinks sources into src/ via an absolute path
|
||||
dir = os.path.join(CHROOT_PATHS['pkgbuilds'], package.path)
|
||||
assert config.runtime.arch
|
||||
chroot = setup_build_chroot(config.runtime.arch)
|
||||
logging.info(f'{package.path}: Setting up sources with makepkg')
|
||||
result = chroot.run_cmd(makepkg_setup, cwd=dir, switch_user='kupfer', stderr=sys.stdout)
|
||||
assert isinstance(result, subprocess.CompletedProcess)
|
||||
if result.returncode != 0:
|
||||
raise Exception(f'{package.path}: Failed to setup sources, exit code: {result.returncode}')
|
||||
cache.refresh_all(write=True)
|
||||
cache.write_src_initialised()
|
||||
old_version = package.version
|
||||
package.refresh_sources()
|
||||
if package.version != old_version:
|
||||
logging.info(f"{package.path}: version refreshed from {old_version} to {package.version}")
|
||||
|
||||
|
||||
def build_package(
|
||||
package: Pkgbuild,
|
||||
arch: Arch,
|
||||
repo_dir: Optional[str] = None,
|
||||
enable_crosscompile: bool = True,
|
||||
enable_crossdirect: bool = True,
|
||||
enable_ccache: bool = True,
|
||||
clean_chroot: bool = False,
|
||||
build_user: str = 'kupfer',
|
||||
repo: Optional[dict[str, Pkgbuild]] = None,
|
||||
):
|
||||
makepkg_compile_opts = ['--holdver']
|
||||
makepkg_conf_path = 'etc/makepkg.conf'
|
||||
repo_dir = repo_dir if repo_dir else config.get_path('pkgbuilds')
|
||||
foreign_arch = config.runtime.arch != arch
|
||||
deps = list(package.makedepends)
|
||||
names = set(package.names())
|
||||
if isinstance(package, SubPkgbuild):
|
||||
names |= set(package.pkgbase.names())
|
||||
if not package.nodeps:
|
||||
deps += list(package.depends)
|
||||
deps = list(set(deps) - names)
|
||||
needs_rust = 'rust' in deps
|
||||
logging.info(f"{package.path}: Preparing to build: getting native arch build chroot")
|
||||
build_root: BuildChroot
|
||||
target_chroot = setup_build_chroot(
|
||||
arch=arch,
|
||||
extra_packages=deps,
|
||||
clean_chroot=clean_chroot,
|
||||
repo=repo,
|
||||
)
|
||||
assert config.runtime.arch
|
||||
native_chroot = target_chroot
|
||||
if foreign_arch:
|
||||
logging.info(f"{package.path}: Preparing to build: getting {arch} build chroot")
|
||||
native_chroot = setup_build_chroot(
|
||||
arch=config.runtime.arch,
|
||||
extra_packages=['base-devel'] + CROSSDIRECT_PKGS,
|
||||
clean_chroot=clean_chroot,
|
||||
repo=repo,
|
||||
)
|
||||
if not package.mode:
|
||||
logging.warning(f'Package {package.path} has no _mode set, assuming "host"')
|
||||
cross = foreign_arch and package.mode == 'cross' and enable_crosscompile
|
||||
|
||||
if cross:
|
||||
logging.info(f'Cross-compiling {package.path}')
|
||||
build_root = native_chroot
|
||||
makepkg_compile_opts += ['--nodeps']
|
||||
env = deepcopy(get_makepkg_env(arch))
|
||||
if enable_ccache:
|
||||
env['PATH'] = f"/usr/lib/ccache:{env['PATH']}"
|
||||
native_chroot.mount_ccache(user=build_user)
|
||||
logging.info(f'{package.path}: Setting up dependencies for cross-compilation')
|
||||
# include crossdirect for ccache symlinks and qemu-user
|
||||
cross_deps = list(package.makedepends) if package.nodeps else (deps + CROSSDIRECT_PKGS + [f"{GCC_HOSTSPECS[native_chroot.arch][arch]}-gcc"])
|
||||
results = native_chroot.try_install_packages(cross_deps)
|
||||
if not package.nodeps:
|
||||
res_crossdirect = results['crossdirect']
|
||||
assert isinstance(res_crossdirect, subprocess.CompletedProcess)
|
||||
if res_crossdirect.returncode != 0:
|
||||
raise Exception('Unable to install crossdirect')
|
||||
# mount foreign arch chroot inside native chroot
|
||||
chroot_relative = os.path.join(CHROOT_PATHS['chroots'], target_chroot.name)
|
||||
makepkg_path_absolute = native_chroot.write_makepkg_conf(target_arch=arch, cross_chroot_relative=chroot_relative, cross=True)
|
||||
makepkg_conf_path = os.path.join('etc', os.path.basename(makepkg_path_absolute))
|
||||
native_chroot.mount_crosscompile(target_chroot)
|
||||
else:
|
||||
logging.info(f'Host-compiling {package.path}')
|
||||
build_root = target_chroot
|
||||
makepkg_compile_opts += ['--nodeps' if package.nodeps else '--syncdeps']
|
||||
env = deepcopy(get_makepkg_env(arch))
|
||||
if foreign_arch and package.crossdirect and enable_crossdirect and package.name not in CROSSDIRECT_PKGS:
|
||||
env['PATH'] = f"/native/usr/lib/crossdirect/{arch}:{env['PATH']}"
|
||||
target_chroot.mount_crossdirect(native_chroot)
|
||||
else:
|
||||
if enable_ccache:
|
||||
logging.debug('ccache enabled')
|
||||
env['PATH'] = f"/usr/lib/ccache:{env['PATH']}"
|
||||
deps += ['ccache']
|
||||
logging.debug(('Building for native arch. ' if not foreign_arch else '') + 'Skipping crossdirect.')
|
||||
if not package.nodeps:
|
||||
dep_install = target_chroot.try_install_packages(deps, allow_fail=False)
|
||||
failed_deps = [name for name, res in dep_install.items() if res.returncode != 0] # type: ignore[union-attr]
|
||||
if failed_deps:
|
||||
raise Exception(f'{package.path}: Dependencies failed to install: {failed_deps}')
|
||||
|
||||
if enable_ccache:
|
||||
build_root.mount_ccache(user=build_user)
|
||||
if needs_rust:
|
||||
build_root.mount_rust(user=build_user)
|
||||
setup_git_insecure_paths(build_root)
|
||||
makepkg_conf_absolute = os.path.join('/', makepkg_conf_path)
|
||||
|
||||
build_cmd = ['source', '/etc/profile', '&&', *MAKEPKG_CMD, '--config', makepkg_conf_absolute, '--skippgpcheck', *makepkg_compile_opts]
|
||||
logging.debug(f'Building: Running {build_cmd}')
|
||||
result = build_root.run_cmd(
|
||||
build_cmd,
|
||||
inner_env=env,
|
||||
cwd=os.path.join(CHROOT_PATHS['pkgbuilds'], package.path),
|
||||
switch_user=build_user,
|
||||
stderr=sys.stdout,
|
||||
)
|
||||
assert isinstance(result, subprocess.CompletedProcess)
|
||||
if result.returncode != 0:
|
||||
raise Exception(f'Failed to compile package {package.path}')
|
||||
|
||||
|
||||
def get_dependants(
|
||||
repo: dict[str, Pkgbuild],
|
||||
packages: Iterable[Pkgbuild],
|
||||
arch: Arch,
|
||||
recursive: bool = True,
|
||||
) -> set[Pkgbuild]:
|
||||
names = set([pkg.name for pkg in packages])
|
||||
to_add = set[Pkgbuild]()
|
||||
for pkg in repo.values():
|
||||
if set.intersection(names, set(pkg.depends)):
|
||||
if not set([arch, 'any']).intersection(pkg.arches):
|
||||
logging.warn(f'get_dependants: skipping matched pkg {pkg.name} due to wrong arch: {pkg.arches}')
|
||||
continue
|
||||
to_add.add(pkg)
|
||||
if recursive and to_add:
|
||||
to_add.update(get_dependants(repo, to_add, arch=arch))
|
||||
return to_add
|
||||
|
||||
|
||||
def get_pkg_names_str(pkgs: Iterable[Pkgbuild]) -> str:
|
||||
return ', '.join(x.name for x in pkgs)
|
||||
|
||||
|
||||
def get_pkg_levels_str(pkg_levels: Iterable[Iterable[Pkgbuild]]):
|
||||
return '\n'.join(f'{i}: {get_pkg_names_str(level)}' for i, level in enumerate(pkg_levels))
|
||||
|
||||
|
||||
def get_unbuilt_package_levels(
|
||||
packages: Iterable[Pkgbuild],
|
||||
arch: Arch,
|
||||
repo: Optional[dict[str, Pkgbuild]] = None,
|
||||
force: bool = False,
|
||||
rebuild_dependants: bool = False,
|
||||
try_download: bool = False,
|
||||
refresh_sources: bool = True,
|
||||
) -> list[set[Pkgbuild]]:
|
||||
repo = repo or discover_pkgbuilds()
|
||||
dependants = set[Pkgbuild]()
|
||||
if rebuild_dependants:
|
||||
dependants = get_dependants(repo, packages, arch=arch)
|
||||
package_levels = generate_dependency_chain(repo, set(packages).union(dependants))
|
||||
build_names = set[str]()
|
||||
build_levels = list[set[Pkgbuild]]()
|
||||
includes_dependants = " (includes dependants)" if rebuild_dependants else ""
|
||||
logging.info(f"Checking for unbuilt packages ({arch}) in dependency order{includes_dependants}:\n{get_pkg_levels_str(package_levels)}")
|
||||
i = 0
|
||||
total_levels = len(package_levels)
|
||||
package_bar = get_levels_bar(
|
||||
total=sum([len(lev) for lev in package_levels]),
|
||||
desc=f"Checking pkgs ({arch})",
|
||||
unit='pkgs',
|
||||
fields={"levels_total": total_levels},
|
||||
enable_rate=False,
|
||||
)
|
||||
counter_built = package_bar.add_subcounter('green')
|
||||
counter_unbuilt = package_bar.add_subcounter('blue')
|
||||
for level_num, level_packages in enumerate(package_levels):
|
||||
level_num = level_num + 1
|
||||
package_bar.update(0, name=" " * BAR_PADDING, level=level_num)
|
||||
level = set[Pkgbuild]()
|
||||
if not level_packages:
|
||||
continue
|
||||
|
||||
def add_to_level(pkg, level, reason=''):
|
||||
if reason:
|
||||
reason = f': {reason}'
|
||||
counter_unbuilt.update(force=True)
|
||||
logging.info(f"Level {level}/{total_levels} ({arch}): Adding {package.path}{reason}")
|
||||
level.add(package)
|
||||
build_names.update(package.names())
|
||||
|
||||
for package in level_packages:
|
||||
package_bar.update(0, force=True, name=ellipsize(package.name, padding=" ", length=BAR_PADDING))
|
||||
if (force and package in packages):
|
||||
add_to_level(package, level, 'query match and force=True')
|
||||
elif rebuild_dependants and package in dependants:
|
||||
add_to_level(package, level, 'package is a dependant, dependant-rebuilds requested')
|
||||
elif not check_package_version_built(package, arch, try_download=try_download, refresh_sources=refresh_sources):
|
||||
add_to_level(package, level, 'package unbuilt')
|
||||
else:
|
||||
logging.info(f"Level {level_num}/{total_levels} ({arch}): {package.path}: Package doesn't need [re]building")
|
||||
counter_built.update(force=True)
|
||||
|
||||
logging.debug(f'Finished checking level {level_num}/{total_levels} ({arch}). Adding unbuilt pkgs: {get_pkg_names_str(level)}')
|
||||
if level:
|
||||
build_levels.append(level)
|
||||
i += 1
|
||||
package_bar.close(clear=True)
|
||||
return build_levels
|
||||
|
||||
|
||||
def build_packages(
|
||||
packages: Iterable[Pkgbuild],
|
||||
arch: Arch,
|
||||
repo: Optional[dict[str, Pkgbuild]] = None,
|
||||
force: bool = False,
|
||||
rebuild_dependants: bool = False,
|
||||
try_download: bool = False,
|
||||
enable_crosscompile: bool = True,
|
||||
enable_crossdirect: bool = True,
|
||||
enable_ccache: bool = True,
|
||||
clean_chroot: bool = False,
|
||||
):
|
||||
check_programs_wrap(['makepkg', 'pacman', 'pacstrap'])
|
||||
init_prebuilts(arch)
|
||||
build_levels = get_unbuilt_package_levels(
|
||||
packages,
|
||||
arch,
|
||||
repo=repo,
|
||||
force=force,
|
||||
rebuild_dependants=rebuild_dependants,
|
||||
try_download=try_download,
|
||||
)
|
||||
|
||||
if not build_levels:
|
||||
logging.info('Everything built already')
|
||||
return
|
||||
|
||||
logging.info(f"Build plan made:\n{get_pkg_levels_str(build_levels)}")
|
||||
|
||||
total_levels = len(build_levels)
|
||||
package_bar = get_levels_bar(
|
||||
desc=f'Building pkgs ({arch})',
|
||||
color='purple',
|
||||
unit='pkgs',
|
||||
total=sum([len(lev) for lev in build_levels]),
|
||||
fields={"levels_total": total_levels},
|
||||
enable_rate=False,
|
||||
)
|
||||
files = []
|
||||
updated_repos: set[str] = set()
|
||||
package_bar.update(-1)
|
||||
for level, need_build in enumerate(build_levels):
|
||||
level = level + 1
|
||||
package_bar.update(incr=0, force=True, name=" " * BAR_PADDING, level=level)
|
||||
logging.info(f"(Level {level}/{total_levels}) Building {get_pkg_names_str(need_build)}")
|
||||
for package in need_build:
|
||||
package_bar.update(force=True, name=ellipsize(package.name, padding=" ", length=BAR_PADDING))
|
||||
base = package.pkgbase if isinstance(package, SubPkgbuild) else package
|
||||
assert isinstance(base, Pkgbase)
|
||||
if package.is_built(arch):
|
||||
logging.info(f"Skipping building {package.name} since it was already built this run as part of pkgbase {base.name}")
|
||||
continue
|
||||
build_package(
|
||||
package,
|
||||
arch=arch,
|
||||
enable_crosscompile=enable_crosscompile,
|
||||
enable_crossdirect=enable_crossdirect,
|
||||
enable_ccache=enable_ccache,
|
||||
clean_chroot=clean_chroot,
|
||||
repo=repo,
|
||||
)
|
||||
files += add_package_to_repo(package, arch)
|
||||
updated_repos.add(package.repo)
|
||||
for _arch in ['any', arch]:
|
||||
if _arch in base.arches:
|
||||
base._built_for.add(_arch)
|
||||
package_bar.update()
|
||||
# rescan affected repos
|
||||
local_repos = get_kupfer_local(arch, in_chroot=False, scan=False)
|
||||
for repo_name in updated_repos:
|
||||
assert repo_name in local_repos.repos
|
||||
local_repos.repos[repo_name].scan()
|
||||
|
||||
package_bar.close(clear=True)
|
||||
return files
|
||||
|
||||
|
||||
def build_packages_by_paths(
|
||||
paths: Iterable[str],
|
||||
arch: Arch,
|
||||
repo: Optional[dict[str, Pkgbuild]] = None,
|
||||
force=False,
|
||||
rebuild_dependants: bool = False,
|
||||
try_download: bool = False,
|
||||
enable_crosscompile: bool = True,
|
||||
enable_crossdirect: bool = True,
|
||||
enable_ccache: bool = True,
|
||||
clean_chroot: bool = False,
|
||||
):
|
||||
if isinstance(paths, str):
|
||||
paths = [paths]
|
||||
|
||||
check_programs_wrap(['makepkg', 'pacman', 'pacstrap'])
|
||||
assert config.runtime.arch
|
||||
for _arch in set([arch, config.runtime.arch]):
|
||||
init_prebuilts(_arch)
|
||||
packages = filter_pkgbuilds(paths, arch=arch, repo=repo, allow_empty_results=False)
|
||||
return build_packages(
|
||||
packages,
|
||||
arch,
|
||||
repo=repo,
|
||||
force=force,
|
||||
rebuild_dependants=rebuild_dependants,
|
||||
try_download=try_download,
|
||||
enable_crosscompile=enable_crosscompile,
|
||||
enable_crossdirect=enable_crossdirect,
|
||||
enable_ccache=enable_ccache,
|
||||
clean_chroot=clean_chroot,
|
||||
)
|
||||
|
||||
|
||||
_qemu_enabled: dict[Arch, bool] = {arch: False for arch in ARCHES}
|
||||
|
||||
|
||||
def build_enable_qemu_binfmt(arch: Arch, repo: Optional[dict[str, Pkgbuild]] = None, lazy: bool = True, native_chroot: Optional[BuildChroot] = None):
|
||||
"""
|
||||
Build and enable qemu-user-static, binfmt and crossdirect
|
||||
Specify lazy=False to force building the packages.
|
||||
"""
|
||||
if arch not in ARCHES:
|
||||
raise Exception(f'Unknown binfmt architecture "{arch}". Choices: {", ".join(ARCHES)}')
|
||||
if _qemu_enabled[arch] or (lazy and binfmt_is_registered(arch)):
|
||||
if not _qemu_enabled[arch]:
|
||||
logging.info(f"qemu binfmt for {arch} was already enabled!")
|
||||
return
|
||||
native = config.runtime.arch
|
||||
assert native
|
||||
if arch == native:
|
||||
_qemu_enabled[arch] = True
|
||||
logging.warning("Not enabling binfmt for host architecture!")
|
||||
return
|
||||
logging.info('Installing qemu-user (building if necessary)')
|
||||
check_programs_wrap(['pacman', 'makepkg', 'pacstrap'])
|
||||
# build qemu-user, binfmt, crossdirect
|
||||
packages = list(CROSSDIRECT_PKGS)
|
||||
hostspec = GCC_HOSTSPECS[arch][arch]
|
||||
cross_gcc = f"{hostspec}-gcc"
|
||||
if repo:
|
||||
for pkg in repo.values():
|
||||
if (pkg.name == cross_gcc or cross_gcc in pkg.provides):
|
||||
if config.runtime.arch not in pkg.arches:
|
||||
logging.debug(f"Package {pkg.path} matches {cross_gcc=} name but not arch: {pkg.arches=}")
|
||||
continue
|
||||
packages.append(pkg.path)
|
||||
logging.debug(f"Adding gcc package {pkg.path} to the necessary crosscompilation tools")
|
||||
break
|
||||
build_packages_by_paths(
|
||||
packages,
|
||||
native,
|
||||
repo=repo,
|
||||
try_download=True,
|
||||
enable_crosscompile=False,
|
||||
enable_crossdirect=False,
|
||||
enable_ccache=False,
|
||||
)
|
||||
crossrepo = get_kupfer_local(native, in_chroot=False, scan=True).repos['cross'].packages
|
||||
pkgfiles = [os.path.join(crossrepo[pkg].resolved_url.split('file://')[1]) for pkg in QEMU_BINFMT_PKGS] # type: ignore
|
||||
runcmd = run_root_cmd
|
||||
if native_chroot or not is_wrapped():
|
||||
native_chroot = native_chroot or setup_build_chroot(native)
|
||||
runcmd = native_chroot.run_cmd
|
||||
hostdir = config.get_path('packages')
|
||||
_files = []
|
||||
# convert host paths to in-chroot paths
|
||||
for p in pkgfiles:
|
||||
assert p.startswith(hostdir)
|
||||
_files.append(os.path.join(CHROOT_PATHS['packages'], p[len(hostdir):].lstrip('/')))
|
||||
pkgfiles = _files
|
||||
runcmd(['pacman', '-U', '--noconfirm', '--needed'] + pkgfiles, stderr=sys.stdout)
|
||||
binfmt_register(arch, chroot=native_chroot)
|
||||
_qemu_enabled[arch] = True
|
||||
496
packages/cli.py
496
packages/cli.py
@@ -1,496 +0,0 @@
|
||||
import click
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
|
||||
from glob import glob
|
||||
from typing import Iterable, Optional
|
||||
|
||||
from config.state import config
|
||||
from constants import Arch, ARCHES, SRCINFO_FILE, SRCINFO_INITIALISED_FILE, SRCINFO_METADATA_FILE, SRCINFO_TARBALL_FILE, SRCINFO_TARBALL_URL
|
||||
from exec.cmd import run_cmd, shell_quote, CompletedProcess
|
||||
from exec.file import get_temp_dir, makedir, remove_file
|
||||
from devices.device import get_profile_device
|
||||
from distro.distro import get_kupfer_local, get_kupfer_url, get_kupfer_repo_names
|
||||
from distro.package import LocalPackage
|
||||
from net.ssh import run_ssh_command, scp_put_files
|
||||
from utils import download_file, git, sha256sum
|
||||
from wrapper import check_programs_wrap, enforce_wrap
|
||||
|
||||
from .build import build_packages_by_paths, init_prebuilts
|
||||
from .pkgbuild import discover_pkgbuilds, filter_pkgbuilds, get_pkgbuild_dirs, init_pkgbuilds
|
||||
|
||||
SRCINFO_CACHE_FILES = [SRCINFO_FILE, SRCINFO_INITIALISED_FILE, SRCINFO_METADATA_FILE]
|
||||
|
||||
|
||||
def build(
|
||||
paths: Iterable[str],
|
||||
force: bool,
|
||||
arch: Optional[Arch] = None,
|
||||
rebuild_dependants: bool = False,
|
||||
try_download: bool = False,
|
||||
):
|
||||
config.enforce_config_loaded()
|
||||
enforce_wrap()
|
||||
arch = arch or get_profile_device(hint_or_set_arch=True).arch
|
||||
|
||||
if arch not in ARCHES:
|
||||
raise Exception(f'Unknown architecture "{arch}". Choices: {", ".join(ARCHES)}')
|
||||
|
||||
return build_packages_by_paths(
|
||||
paths,
|
||||
arch,
|
||||
force=force,
|
||||
rebuild_dependants=rebuild_dependants,
|
||||
try_download=try_download,
|
||||
enable_crosscompile=config.file.build.crosscompile,
|
||||
enable_crossdirect=config.file.build.crossdirect,
|
||||
enable_ccache=config.file.build.ccache,
|
||||
clean_chroot=config.file.build.clean_mode,
|
||||
)
|
||||
|
||||
|
||||
def init_pkgbuild_caches(clean_src_dirs: bool = True, remote_branch: Optional[str] = None):
|
||||
|
||||
def read_srcinitialised_checksum(src_initialised):
|
||||
with open(src_initialised) as fd:
|
||||
d = json.load(fd)
|
||||
if isinstance(d, dict):
|
||||
return d.get('PKGBUILD', '!!!ERROR!!!')
|
||||
raise Exception("JSON content not a dictionary!")
|
||||
|
||||
# get_kupfer_url() resolves repo branch variable in url
|
||||
url = get_kupfer_url(url=SRCINFO_TARBALL_URL, branch=remote_branch)
|
||||
cachetar = os.path.join(config.get_path('packages'), SRCINFO_TARBALL_FILE)
|
||||
makedir(os.path.dirname(cachetar))
|
||||
logging.info(f"Updating PKGBUILD caches from {url}" + (", pruning outdated src/ directories" if clean_src_dirs else ""))
|
||||
updated = download_file(cachetar, url)
|
||||
logging.info("Cache tarball was " + ('downloaded successfully' if updated else 'already up to date'))
|
||||
tmpdir = get_temp_dir()
|
||||
logging.debug(f"Extracting {cachetar} to {tmpdir}")
|
||||
res = run_cmd(['tar', 'xf', cachetar], cwd=tmpdir)
|
||||
assert isinstance(res, CompletedProcess)
|
||||
if res.returncode:
|
||||
raise Exception(f"failed to extract srcinfo cache archive '{cachetar}'")
|
||||
pkgbuild_dirs = get_pkgbuild_dirs()
|
||||
for pkg in pkgbuild_dirs:
|
||||
logging.info(f"{pkg}: analyzing cache")
|
||||
pkgdir = os.path.join(config.get_path('pkgbuilds'), pkg)
|
||||
srcdir = os.path.join(pkgdir, 'src')
|
||||
src_initialised = os.path.join(pkgdir, SRCINFO_INITIALISED_FILE)
|
||||
cachedir = os.path.join(tmpdir, pkg)
|
||||
pkgbuild_checksum = sha256sum(os.path.join(pkgdir, 'PKGBUILD'))
|
||||
copy_files: set[str] = set(SRCINFO_CACHE_FILES)
|
||||
if os.path.exists(src_initialised):
|
||||
try:
|
||||
if read_srcinitialised_checksum(src_initialised) == pkgbuild_checksum:
|
||||
copy_files.remove(SRCINFO_INITIALISED_FILE)
|
||||
for f in copy_files.copy():
|
||||
fpath = os.path.join(pkgdir, f)
|
||||
if os.path.exists(fpath):
|
||||
copy_files.remove(f)
|
||||
if not copy_files:
|
||||
logging.info(f"{pkg}: SRCINFO cache already up to date")
|
||||
continue
|
||||
except Exception as ex:
|
||||
logging.warning(f"{pkg}: Something went wrong parsing {SRCINFO_INITIALISED_FILE}, treating as outdated!:\n{ex}")
|
||||
if clean_src_dirs and os.path.exists(srcdir):
|
||||
logging.info(f"{pkg}: outdated src/ detected, removing")
|
||||
remove_file(srcdir, recursive=True)
|
||||
remove_file(src_initialised)
|
||||
if not os.path.exists(cachedir):
|
||||
logging.info(f"{pkg}: not found in remote repo cache, skipping")
|
||||
continue
|
||||
cache_initialised = os.path.join(cachedir, SRCINFO_INITIALISED_FILE)
|
||||
try:
|
||||
if read_srcinitialised_checksum(cache_initialised) != pkgbuild_checksum:
|
||||
logging.info(f"{pkg}: PKGBUILD checksum differs from remote repo cache, skipping")
|
||||
continue
|
||||
except Exception as ex:
|
||||
logging.warning(f"{pkg}: Failed to parse the remote repo's cached {SRCINFO_INITIALISED_FILE}, skipping!:\n{ex}")
|
||||
continue
|
||||
if not copy_files:
|
||||
continue
|
||||
logging.info(f"{pkg}: Copying srcinfo cache from remote repo")
|
||||
logging.debug(f'{pkg}: copying {copy_files}')
|
||||
copy_files_list = [shell_quote(os.path.join(cachedir, f)) for f in copy_files]
|
||||
res = run_cmd(f"cp {' '.join(copy_files_list)} {shell_quote(pkgdir)}/")
|
||||
assert isinstance(res, CompletedProcess)
|
||||
if res.returncode:
|
||||
raise Exception(f"{pkg}: failed to copy cache contents from {cachedir}")
|
||||
|
||||
|
||||
non_interactive_flag = click.option('--non-interactive', is_flag=True)
|
||||
init_caches_flag = click.option(
|
||||
'--init-caches/--no-init-caches',
|
||||
is_flag=True,
|
||||
default=True,
|
||||
show_default=True,
|
||||
help="Fill PKGBUILDs caches from HTTPS repo where checksums match",
|
||||
)
|
||||
remove_outdated_src_flag = click.option(
|
||||
'--clean-src-dirs/--no-clean-src-dirs',
|
||||
is_flag=True,
|
||||
default=True,
|
||||
show_default=True,
|
||||
help="Remove outdated src/ directories to avoid problems",
|
||||
)
|
||||
switch_branch_flag = click.option('--switch-branch', is_flag=True, help="Force the branch to be corrected even in non-interactive mode")
|
||||
discard_changes_flag = click.option('--discard-changes', is_flag=True, help="When switching branches, discard any locally changed conflicting files")
|
||||
|
||||
|
||||
@click.group(name='packages')
|
||||
def cmd_packages():
|
||||
"""Build and manage packages and PKGBUILDs"""
|
||||
|
||||
|
||||
@cmd_packages.command(name='update')
|
||||
@non_interactive_flag
|
||||
@init_caches_flag
|
||||
@switch_branch_flag
|
||||
@discard_changes_flag
|
||||
@remove_outdated_src_flag
|
||||
def cmd_update(
|
||||
non_interactive: bool = False,
|
||||
init_caches: bool = False,
|
||||
clean_src_dirs: bool = True,
|
||||
switch_branch: bool = False,
|
||||
discard_changes: bool = False,
|
||||
):
|
||||
"""Update PKGBUILDs git repo"""
|
||||
enforce_wrap()
|
||||
init_pkgbuilds(interactive=not non_interactive, lazy=False, update=True, switch_branch=switch_branch, discard_changes=discard_changes)
|
||||
if init_caches:
|
||||
init_pkgbuild_caches(clean_src_dirs=clean_src_dirs)
|
||||
logging.info("Refreshing outdated SRCINFO caches")
|
||||
discover_pkgbuilds(lazy=False)
|
||||
|
||||
|
||||
@cmd_packages.command(name='init')
|
||||
@non_interactive_flag
|
||||
@init_caches_flag
|
||||
@switch_branch_flag
|
||||
@discard_changes_flag
|
||||
@remove_outdated_src_flag
|
||||
@click.option('-u', '--update', is_flag=True, help='Use git pull to update the PKGBUILDs')
|
||||
def cmd_init(
|
||||
non_interactive: bool = False,
|
||||
init_caches: bool = True,
|
||||
clean_src_dirs: bool = True,
|
||||
switch_branch: bool = False,
|
||||
discard_changes: bool = False,
|
||||
update: bool = False,
|
||||
):
|
||||
"Ensure PKGBUILDs git repo is checked out locally"
|
||||
init_pkgbuilds(interactive=not non_interactive, lazy=False, update=update, switch_branch=switch_branch, discard_changes=discard_changes)
|
||||
if init_caches:
|
||||
init_pkgbuild_caches(clean_src_dirs=clean_src_dirs)
|
||||
for arch in ARCHES:
|
||||
init_prebuilts(arch)
|
||||
|
||||
|
||||
@cmd_packages.command(name='build')
|
||||
@click.option('--force', is_flag=True, default=False, help='Rebuild even if package is already built')
|
||||
@click.option('--arch', default=None, required=False, type=click.Choice(ARCHES), help="The CPU architecture to build for")
|
||||
@click.option('--rebuild-dependants', is_flag=True, default=False, help='Rebuild packages that depend on packages that will be [re]built')
|
||||
@click.option('--no-download', is_flag=True, default=False, help="Don't try downloading packages from online repos before building")
|
||||
@click.argument('paths', nargs=-1)
|
||||
def cmd_build(paths: list[str], force=False, arch: Optional[Arch] = None, rebuild_dependants: bool = False, no_download: bool = False):
|
||||
"""
|
||||
Build packages (and dependencies) by paths as required.
|
||||
|
||||
The paths are specified relative to the PKGBUILDs dir, eg. "cross/crossdirect".
|
||||
|
||||
Multiple paths may be specified as separate arguments.
|
||||
|
||||
Packages that aren't built already will be downloaded from HTTPS repos unless --no-download is passed,
|
||||
if an exact version match exists on the server.
|
||||
"""
|
||||
build(paths, force, arch=arch, rebuild_dependants=rebuild_dependants, try_download=not no_download)
|
||||
|
||||
|
||||
@cmd_packages.command(name='sideload')
|
||||
@click.argument('paths', nargs=-1)
|
||||
@click.option('--arch', default=None, required=False, type=click.Choice(ARCHES), help="The CPU architecture to build for")
|
||||
@click.option('-B', '--no-build', is_flag=True, default=False, help="Don't try to build packages, just copy and install")
|
||||
def cmd_sideload(paths: Iterable[str], arch: Optional[Arch] = None, no_build: bool = False):
|
||||
"""Build packages, copy to the device via SSH and install them"""
|
||||
if not paths:
|
||||
raise Exception("No packages specified")
|
||||
arch = arch or get_profile_device(hint_or_set_arch=True).arch
|
||||
if not no_build:
|
||||
build(paths, False, arch=arch, try_download=True)
|
||||
repo: dict[str, LocalPackage] = get_kupfer_local(arch=arch, scan=True, in_chroot=False).get_packages()
|
||||
files = [pkg.resolved_url.split('file://')[1] for pkg in repo.values() if pkg.resolved_url and pkg.name in paths]
|
||||
logging.debug(f"Sideload: Found package files: {files}")
|
||||
if not files:
|
||||
logging.fatal("No packages matched")
|
||||
return
|
||||
scp_put_files(files, '/tmp').check_returncode()
|
||||
run_ssh_command(
|
||||
[
|
||||
'sudo',
|
||||
'pacman',
|
||||
'-U',
|
||||
*[os.path.join('/tmp', os.path.basename(file)) for file in files],
|
||||
'--noconfirm',
|
||||
"'--overwrite=\\*'",
|
||||
],
|
||||
alloc_tty=True,
|
||||
).check_returncode()
|
||||
|
||||
|
||||
CLEAN_LOCATIONS = ['src', 'pkg', *SRCINFO_CACHE_FILES]
|
||||
|
||||
|
||||
@cmd_packages.command(name='clean')
|
||||
@click.option('-f', '--force', is_flag=True, default=False, help="Don't prompt for confirmation")
|
||||
@click.option('-n', '--noop', is_flag=True, default=False, help="Print what would be removed but dont execute")
|
||||
@click.argument('what', type=click.Choice(['all', 'git', *CLEAN_LOCATIONS]), nargs=-1)
|
||||
def cmd_clean(what: Iterable[str] = ['all'], force: bool = False, noop: bool = False):
|
||||
"""
|
||||
Clean temporary files from PKGBUILDs
|
||||
|
||||
Specifying no location defaults to the special value 'all', meaning all regular locations.
|
||||
|
||||
There is also the special value 'git' which uses git to clean everything.
|
||||
Be careful with it, as it means re-downloading sources for your packages.
|
||||
"""
|
||||
if noop:
|
||||
logging.debug('Running in noop mode!')
|
||||
if force:
|
||||
logging.debug('Running in FORCE mode!')
|
||||
what = what or ['all']
|
||||
logging.debug(f'Clearing {what} from PKGBUILDs')
|
||||
pkgbuilds = config.get_path('pkgbuilds')
|
||||
if 'git' in what:
|
||||
check_programs_wrap(['git'])
|
||||
warning = "Really reset PKGBUILDs to git state completely?\nThis will erase any untracked changes to your PKGBUILDs directory."
|
||||
if not (noop or force or click.confirm(warning)):
|
||||
return
|
||||
result = git(
|
||||
[
|
||||
'clean',
|
||||
'-dffX' + ('n' if noop else ''),
|
||||
] + get_kupfer_repo_names(local=True),
|
||||
dir=pkgbuilds,
|
||||
)
|
||||
if result.returncode != 0:
|
||||
logging.fatal('Failed to git clean')
|
||||
exit(1)
|
||||
else:
|
||||
if 'all' in what:
|
||||
what = CLEAN_LOCATIONS
|
||||
what = set(what)
|
||||
dirs = []
|
||||
for loc in CLEAN_LOCATIONS:
|
||||
if loc in what:
|
||||
logging.info(f'gathering {loc} instances')
|
||||
dirs += glob(os.path.join(pkgbuilds, '*', '*', loc))
|
||||
|
||||
dir_lines = '\n'.join(dirs)
|
||||
verb = 'Would remove' if noop else 'Removing'
|
||||
logging.info(verb + ':\n' + dir_lines)
|
||||
|
||||
if not (noop or force):
|
||||
if not click.confirm("Really remove all of these?", default=True):
|
||||
return
|
||||
|
||||
if not noop:
|
||||
for dir in dirs:
|
||||
remove_file(dir, recursive=True)
|
||||
|
||||
|
||||
@cmd_packages.command(name='list')
|
||||
def cmd_list():
|
||||
"List information about available source packages (PKGBUILDs)"
|
||||
pkgdir = os.path.join(config.get_path('pkgbuilds'), get_kupfer_repo_names(local=False)[0])
|
||||
if not os.path.exists(pkgdir):
|
||||
raise Exception(f"PKGBUILDs seem not to be initialised yet: {pkgdir} doesn't exist!\n"
|
||||
f"Try running `kupferbootstrap packages init` first!")
|
||||
check_programs_wrap(['makepkg', 'pacman'])
|
||||
packages = discover_pkgbuilds()
|
||||
logging.info(f'Done! {len(packages)} Pkgbuilds:')
|
||||
for name in sorted(packages.keys()):
|
||||
p = packages[name]
|
||||
print(f'name: {p.name}; ver: {p.version}; mode: {p.mode}; crossdirect: {p.crossdirect} provides: {p.provides}; replaces: {p.replaces};'
|
||||
f'local_depends: {p.local_depends}; depends: {p.depends}')
|
||||
|
||||
|
||||
@cmd_packages.command(name='check')
|
||||
@click.argument('paths', nargs=-1)
|
||||
def cmd_check(paths):
|
||||
"""Check that specified PKGBUILDs are formatted correctly"""
|
||||
config.enforce_config_loaded()
|
||||
check_programs_wrap(['makepkg'])
|
||||
|
||||
def check_quoteworthy(s: str) -> bool:
|
||||
quoteworthy = ['"', "'", "$", " ", ";", "&", "<", ">", "*", "?"]
|
||||
for symbol in quoteworthy:
|
||||
if symbol in s:
|
||||
return True
|
||||
return False
|
||||
|
||||
paths = list(paths) or ['all']
|
||||
packages = filter_pkgbuilds(paths, allow_empty_results=False)
|
||||
|
||||
for package in packages:
|
||||
name = package.name
|
||||
|
||||
is_git_package = False
|
||||
if name.endswith('-git'):
|
||||
is_git_package = True
|
||||
|
||||
required_arches = ''
|
||||
provided_arches: list[str] = []
|
||||
|
||||
mode_key = '_mode'
|
||||
nodeps_key = '_nodeps'
|
||||
crossdirect_key = '_crossdirect'
|
||||
pkgbase_key = 'pkgbase'
|
||||
pkgname_key = 'pkgname'
|
||||
arches_key = '_arches'
|
||||
arch_key = 'arch'
|
||||
commit_key = '_commit'
|
||||
source_key = 'source'
|
||||
sha256sums_key = 'sha256sums'
|
||||
required = {
|
||||
mode_key: True,
|
||||
nodeps_key: False,
|
||||
crossdirect_key: False,
|
||||
pkgbase_key: False,
|
||||
pkgname_key: True,
|
||||
'pkgdesc': False,
|
||||
'pkgver': True,
|
||||
'pkgrel': True,
|
||||
arches_key: True,
|
||||
arch_key: True,
|
||||
'license': True,
|
||||
'url': False,
|
||||
'provides': is_git_package,
|
||||
'conflicts': False,
|
||||
'replaces': False,
|
||||
'depends': False,
|
||||
'optdepends': False,
|
||||
'makedepends': False,
|
||||
'backup': False,
|
||||
'install': False,
|
||||
'options': False,
|
||||
commit_key: is_git_package,
|
||||
source_key: False,
|
||||
sha256sums_key: False,
|
||||
'noextract': False,
|
||||
}
|
||||
pkgbuild_path = os.path.join(config.get_path('pkgbuilds'), package.path, 'PKGBUILD')
|
||||
with open(pkgbuild_path, 'r') as file:
|
||||
content = file.read()
|
||||
if '\t' in content:
|
||||
logging.fatal(f'\\t is not allowed in {pkgbuild_path}')
|
||||
exit(1)
|
||||
lines = content.split('\n')
|
||||
if len(lines) == 0:
|
||||
logging.fatal(f'Empty {pkgbuild_path}')
|
||||
exit(1)
|
||||
line_index = 0
|
||||
key_index = 0
|
||||
hold_key = False
|
||||
key = ""
|
||||
while True:
|
||||
line = lines[line_index]
|
||||
|
||||
if line.startswith('#'):
|
||||
line_index += 1
|
||||
continue
|
||||
|
||||
if line.startswith('_') and line.split('=', 1)[0] not in [mode_key, nodeps_key, arches_key, commit_key]:
|
||||
line_index += 1
|
||||
continue
|
||||
|
||||
formatted = True
|
||||
next_key = False
|
||||
next_line = False
|
||||
reason = ""
|
||||
|
||||
if hold_key:
|
||||
next_line = True
|
||||
else:
|
||||
if key_index < len(required):
|
||||
key = list(required)[key_index]
|
||||
if line.startswith(key):
|
||||
if key == pkgbase_key:
|
||||
required[pkgname_key] = False
|
||||
if key == source_key:
|
||||
required[sha256sums_key] = True
|
||||
next_key = True
|
||||
next_line = True
|
||||
elif key in required and not required[key]:
|
||||
next_key = True
|
||||
|
||||
if line == ')':
|
||||
hold_key = False
|
||||
next_key = True
|
||||
|
||||
if key == arches_key:
|
||||
required_arches = line.split('=')[1]
|
||||
|
||||
if line.endswith('=('):
|
||||
hold_key = True
|
||||
|
||||
if line.startswith(' ') or line == ')':
|
||||
next_line = True
|
||||
|
||||
if line.startswith(' ') and not line.startswith(' '):
|
||||
formatted = False
|
||||
reason = 'Multiline variables should be indented with 4 spaces'
|
||||
|
||||
if '"' in line and not check_quoteworthy(line):
|
||||
formatted = False
|
||||
reason = 'Found literal " although no special character was found in the line to justify the usage of a literal "'
|
||||
|
||||
if "'" in line and '"' not in line:
|
||||
formatted = False
|
||||
reason = 'Found literal \' although either a literal " or no qoutes should be used'
|
||||
|
||||
if ('=(' in line and ' ' in line and '"' not in line and not line.endswith('=(')) or (hold_key and line.endswith(')')):
|
||||
formatted = False
|
||||
reason = 'Multiple elements in a list need to be in separate lines'
|
||||
|
||||
if formatted and not next_key and not next_line:
|
||||
if key_index == len(required):
|
||||
if lines[line_index] == '':
|
||||
break
|
||||
else:
|
||||
formatted = False
|
||||
reason = 'Expected final emtpy line after all variables'
|
||||
else:
|
||||
formatted = False
|
||||
reason = f'Expected to find "{key}"'
|
||||
|
||||
if not formatted:
|
||||
logging.fatal(f'Formatting error in {pkgbuild_path}: Line {line_index+1}: "{line}"')
|
||||
if reason != "":
|
||||
logging.fatal(reason)
|
||||
exit(1)
|
||||
|
||||
if key == arch_key:
|
||||
if line.endswith(')'):
|
||||
if line.startswith(f'{arch_key}=('):
|
||||
check_arches_hint(pkgbuild_path, required_arches, [line[6:-1]])
|
||||
else:
|
||||
check_arches_hint(pkgbuild_path, required_arches, provided_arches)
|
||||
elif line.startswith(' '):
|
||||
provided_arches.append(line[4:])
|
||||
|
||||
if next_key and not hold_key:
|
||||
key_index += 1
|
||||
if next_line:
|
||||
line_index += 1
|
||||
|
||||
logging.info(f'{package.path} nicely formatted!')
|
||||
|
||||
|
||||
def check_arches_hint(path: str, required: str, provided: list[str]):
|
||||
if required == 'all':
|
||||
for arch in ARCHES:
|
||||
if arch not in provided:
|
||||
logging.warning(f'Missing {arch} in arches list in {path}, because _arches hint is `all`')
|
||||
117
packages/device.py
Normal file
117
packages/device.py
Normal file
@@ -0,0 +1,117 @@
|
||||
import logging
|
||||
import os
|
||||
|
||||
from typing import Optional
|
||||
|
||||
from config import config
|
||||
from constants import Arch, ARCHES
|
||||
from config.scheme import DataClass, munchclass
|
||||
from .pkgbuild import discover_pkgbuilds, get_pkgbuild_by_path, _pkgbuilds_cache, Pkgbuild
|
||||
|
||||
DEVICE_DEPRECATIONS = {
|
||||
"oneplus-enchilada": "sdm845-oneplus-enchilada",
|
||||
"oneplus-fajita": "sdm845-oneplus-fajita",
|
||||
"xiaomi-beryllium-ebbg": "sdm845-sdm845-xiaomi-beryllium-ebbg",
|
||||
"xiaomi-beryllium-tianma": "sdm845-sdm845-xiaomi-tianma",
|
||||
"bq-paella": "msm8916-bq-paella",
|
||||
}
|
||||
|
||||
|
||||
@munchclass()
|
||||
class Device(DataClass):
|
||||
name: str
|
||||
arch: Arch
|
||||
package: Pkgbuild
|
||||
|
||||
def parse_deviceinfo(self):
|
||||
pass
|
||||
|
||||
|
||||
def check_devicepkg_name(name: str, log_level: Optional[int] = None):
|
||||
valid = True
|
||||
if not name.startswith('device-'):
|
||||
valid = False
|
||||
if log_level is not None:
|
||||
logging.log(log_level, f'invalid device package name "{name}": doesn\'t start with "device-"')
|
||||
if name.endswith('-common'):
|
||||
valid = False
|
||||
if log_level is not None:
|
||||
logging.log(log_level, f'invalid device package name "{name}": ends with "-common"')
|
||||
return valid
|
||||
|
||||
|
||||
def parse_device_pkg(pkgbuild: Pkgbuild) -> Device:
|
||||
if len(pkgbuild.arches) != 1:
|
||||
raise Exception(f"{pkgbuild.name}: Device package must have exactly one arch, but has {pkgbuild.arches}")
|
||||
arch = pkgbuild.arches[0]
|
||||
if arch == 'any' or arch not in ARCHES:
|
||||
raise Exception(f'unknown arch for device package: {arch}')
|
||||
if pkgbuild.repo != 'device':
|
||||
logging.warning(f'device package {pkgbuild.name} is in unexpected repo "{pkgbuild.repo}", expected "device"')
|
||||
name = pkgbuild.name
|
||||
prefix = 'device-'
|
||||
if name.startswith(prefix):
|
||||
name = name[len(prefix):]
|
||||
return Device(name=name, arch=arch, package=pkgbuild)
|
||||
|
||||
|
||||
_device_cache: dict[str, Device] = {}
|
||||
_device_cache_populated: bool = False
|
||||
|
||||
|
||||
def get_devices(pkgbuilds: Optional[dict[str, Pkgbuild]] = None, lazy: bool = True) -> dict[str, Device]:
|
||||
global _device_cache, _device_cache_populated
|
||||
use_cache = _device_cache_populated and lazy
|
||||
if not use_cache:
|
||||
if not pkgbuilds:
|
||||
pkgbuilds = discover_pkgbuilds(lazy=lazy)
|
||||
_device_cache.clear()
|
||||
for pkgbuild in pkgbuilds.values():
|
||||
if not (pkgbuild.repo == 'device' and check_devicepkg_name(pkgbuild.name, log_level=None)):
|
||||
continue
|
||||
dev = parse_device_pkg(pkgbuild)
|
||||
_device_cache[dev.name] = dev
|
||||
_device_cache_populated = True
|
||||
return _device_cache.copy()
|
||||
|
||||
|
||||
def get_device(name: str, pkgbuilds: Optional[dict[str, Pkgbuild]] = None, lazy: bool = True, scan_all=False) -> Device:
|
||||
global _device_cache, _device_cache_populated
|
||||
assert lazy or pkgbuilds
|
||||
if name in DEVICE_DEPRECATIONS:
|
||||
warning = f"Deprecated device {name}"
|
||||
replacement = DEVICE_DEPRECATIONS[name]
|
||||
if replacement:
|
||||
warning += (f': Device has been renamed to {replacement}! Please adjust your profile config!\n'
|
||||
'This will become an error in a future version!')
|
||||
name = replacement
|
||||
logging.warning(warning)
|
||||
if lazy and name in _device_cache:
|
||||
return _device_cache[name]
|
||||
if scan_all:
|
||||
devices = get_devices(pkgbuilds=pkgbuilds, lazy=lazy)
|
||||
if name not in devices:
|
||||
raise Exception(f'Unknown device {name}!')
|
||||
return devices[name]
|
||||
else:
|
||||
pkgname = f'device-{name}'
|
||||
if pkgbuilds:
|
||||
if pkgname not in pkgbuilds:
|
||||
raise Exception(f'Unknown device {name}!')
|
||||
pkgbuild = pkgbuilds[pkgname]
|
||||
else:
|
||||
if lazy and pkgname in _pkgbuilds_cache:
|
||||
pkgbuild = _pkgbuilds_cache[pkgname]
|
||||
else:
|
||||
relative_path = os.path.join('device', pkgname)
|
||||
assert os.path.exists(os.path.join(config.get_path('pkgbuilds'), relative_path))
|
||||
pkgbuild = [p for p in get_pkgbuild_by_path(relative_path, lazy=lazy, _config=config) if p.name == pkgname][0]
|
||||
device = parse_device_pkg(pkgbuild)
|
||||
if lazy:
|
||||
_device_cache[name] = device
|
||||
return device
|
||||
|
||||
|
||||
def get_profile_device(profile_name: Optional[str] = None, hint_or_set_arch: bool = False):
|
||||
profile = config.enforce_profile_device_set(profile_name, hint_or_set_arch=hint_or_set_arch)
|
||||
return get_device(profile.device)
|
||||
@@ -4,177 +4,78 @@ import click
|
||||
import logging
|
||||
import multiprocessing
|
||||
import os
|
||||
import subprocess
|
||||
|
||||
from joblib import Parallel, delayed
|
||||
from typing import Iterable, Optional
|
||||
from typing import Optional
|
||||
|
||||
from config.state import config, ConfigStateHolder
|
||||
from constants import Arch
|
||||
from distro.distro import get_kupfer_repo_names
|
||||
from config import config, ConfigStateHolder
|
||||
from constants import REPOSITORIES
|
||||
from exec.cmd import run_cmd
|
||||
from constants import Arch, MAKEPKG_CMD
|
||||
from distro.package import PackageInfo
|
||||
from exec.file import remove_file
|
||||
from logger import setup_logging
|
||||
from utils import git, git_get_branch
|
||||
from utils import git
|
||||
from wrapper import check_programs_wrap
|
||||
from typehelpers import TypeAlias
|
||||
|
||||
from .srcinfo_cache import SrcinfoMetaFile
|
||||
|
||||
|
||||
def clone_pkgbuilds(
|
||||
pkgbuilds_dir: str,
|
||||
repo_url: str,
|
||||
branch: str,
|
||||
interactive=False,
|
||||
update=True,
|
||||
switch_branch: bool = False,
|
||||
discard_changes: bool = False,
|
||||
):
|
||||
def clone_pkbuilds(pkgbuilds_dir: str, repo_url: str, branch: str, interactive=False, update=True):
|
||||
check_programs_wrap(['git'])
|
||||
git_dir = os.path.join(pkgbuilds_dir, '.git')
|
||||
if not os.path.exists(git_dir):
|
||||
logging.info(f'Cloning branch {branch} from {repo_url}')
|
||||
logging.info('Cloning branch {branch} from {repo}')
|
||||
result = git(['clone', '-b', branch, repo_url, pkgbuilds_dir])
|
||||
if result.returncode != 0:
|
||||
raise Exception('Error cloning pkgbuilds')
|
||||
else:
|
||||
current_branch = git_get_branch(pkgbuilds_dir)
|
||||
result = git(['--git-dir', git_dir, 'branch', '--show-current'], capture_output=True)
|
||||
current_branch = result.stdout.decode().strip()
|
||||
if current_branch != branch:
|
||||
logging.warning(f'pkgbuilds repository is on the wrong branch: {current_branch}, requested: {branch}')
|
||||
if switch_branch or (interactive and click.confirm('Would you like to switch branches?', default=False)):
|
||||
result = git(['remote', 'update'], dir=pkgbuilds_dir)
|
||||
if interactive and click.confirm('Would you like to switch branches?', default=False):
|
||||
result = git(['switch', branch], dir=pkgbuilds_dir)
|
||||
if result.returncode != 0:
|
||||
raise Exception('failed updating PKGBUILDs branches')
|
||||
result = git(['switch', *(['-f'] if discard_changes else []), branch], dir=pkgbuilds_dir)
|
||||
if result.returncode != 0:
|
||||
raise Exception('failed switching PKGBUILDs branches')
|
||||
logging.warning('Hint: you can use `kupferbootstrap packages update` to switch branches')
|
||||
|
||||
raise Exception('failed switching branches')
|
||||
if update:
|
||||
if interactive:
|
||||
if not click.confirm('Would you like to try updating the PKGBUILDs repo?', default=True):
|
||||
if not click.confirm('Would you like to try updating the PKGBUILDs repo?'):
|
||||
return
|
||||
result = git(['fetch'], dir=pkgbuilds_dir)
|
||||
result = git(['pull'], pkgbuilds_dir)
|
||||
if result.returncode != 0:
|
||||
raise Exception("Failed to fetch updates with git")
|
||||
|
||||
pull_cmd = ['pull', '--ff-only']
|
||||
result = git(pull_cmd, dir=pkgbuilds_dir)
|
||||
if result.returncode != 0:
|
||||
if discard_changes:
|
||||
logging.info("git pull failed, detecting conflicting changes")
|
||||
# '@{u}' is a git placeholder for the latest upstream commit
|
||||
result = git(['diff', '--name-only', '--diff-filter=UD', '@{u}'], capture_output=True, dir=pkgbuilds_dir)
|
||||
result.check_returncode()
|
||||
if result.stdout:
|
||||
logging.info("Discarding conflicting changes")
|
||||
for f in result.stdout.decode().split('\n'):
|
||||
path = os.path.join(pkgbuilds_dir, f)
|
||||
if not os.path.exists(path):
|
||||
continue
|
||||
result = git(['checkout', '--', f], dir=pkgbuilds_dir, capture_output=True)
|
||||
if result.returncode != 0:
|
||||
logging.debug(f'git checkout of file "{f}" failed; removing.')
|
||||
remove_file(path)
|
||||
logging.info("Retrying git pull")
|
||||
result = git(pull_cmd, dir=pkgbuilds_dir)
|
||||
if result.returncode != 0:
|
||||
logging.info("Last resort: git reset --hard")
|
||||
result = git(['reset', '--hard', '@{u}'], capture_output=True, dir=pkgbuilds_dir)
|
||||
if result.returncode == 0:
|
||||
return
|
||||
raise Exception('`git pull` failed to update pkgbuilds')
|
||||
raise Exception('failed to update pkgbuilds')
|
||||
|
||||
|
||||
_pkgbuilds_initialised: bool = False
|
||||
|
||||
|
||||
def init_pkgbuilds(
|
||||
interactive=False,
|
||||
lazy: bool = True,
|
||||
update: bool = False,
|
||||
switch_branch: bool = False,
|
||||
discard_changes: bool = False,
|
||||
):
|
||||
global _pkgbuilds_initialised
|
||||
if lazy and _pkgbuilds_initialised:
|
||||
return
|
||||
def init_pkgbuilds(interactive=False):
|
||||
pkgbuilds_dir = config.get_path('pkgbuilds')
|
||||
repo_url = config.file.pkgbuilds.git_repo
|
||||
branch = config.file.pkgbuilds.git_branch
|
||||
clone_pkgbuilds(
|
||||
pkgbuilds_dir,
|
||||
repo_url,
|
||||
branch,
|
||||
interactive=interactive,
|
||||
update=update,
|
||||
switch_branch=switch_branch,
|
||||
discard_changes=discard_changes,
|
||||
)
|
||||
_pkgbuilds_initialised = True
|
||||
|
||||
|
||||
VersionSpec: TypeAlias = Optional[str]
|
||||
VersionSpecs: TypeAlias = dict[str, Optional[list[VersionSpec]]]
|
||||
|
||||
|
||||
def parse_version_spec(spec: str) -> tuple[str, VersionSpec]:
|
||||
for op in ['<', '>', '=']:
|
||||
if op in spec:
|
||||
name, ver = spec.split(op, 1)
|
||||
assert name and ver
|
||||
ver = op + ver
|
||||
if name[-1] == '=':
|
||||
assert op != '='
|
||||
name = name[:-1]
|
||||
ver = '=' + ver
|
||||
return name, ver
|
||||
return spec.strip(), None
|
||||
|
||||
|
||||
def get_version_specs(spec: str, existing_specs: Optional[VersionSpecs] = None) -> VersionSpecs:
|
||||
specs = existing_specs or {}
|
||||
name, ver = parse_version_spec(spec)
|
||||
_specs = specs.get(name, None)
|
||||
if ver:
|
||||
_specs = _specs or []
|
||||
if ver not in _specs:
|
||||
_specs.append(ver)
|
||||
specs[name] = _specs
|
||||
return specs
|
||||
repo_url = config.file['pkgbuilds']['git_repo']
|
||||
branch = config.file['pkgbuilds']['git_branch']
|
||||
clone_pkbuilds(pkgbuilds_dir, repo_url, branch, interactive=interactive, update=False)
|
||||
|
||||
|
||||
class Pkgbuild(PackageInfo):
|
||||
name: str
|
||||
version: str
|
||||
arches: list[Arch]
|
||||
depends: VersionSpecs
|
||||
makedepends: VersionSpecs
|
||||
provides: VersionSpecs
|
||||
depends: list[str]
|
||||
provides: list[str]
|
||||
replaces: list[str]
|
||||
local_depends: list[str]
|
||||
repo: str
|
||||
mode: str
|
||||
nodeps: bool
|
||||
crossdirect: bool
|
||||
path: str
|
||||
pkgver: str
|
||||
pkgrel: str
|
||||
description: str
|
||||
sources_refreshed: bool
|
||||
srcinfo_cache: Optional[SrcinfoMetaFile]
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
relative_path: str,
|
||||
arches: list[Arch] = [],
|
||||
depends: VersionSpecs = {},
|
||||
makedepends: VersionSpecs = {},
|
||||
provides: VersionSpecs = {},
|
||||
depends: list[str] = [],
|
||||
provides: list[str] = [],
|
||||
replaces: list[str] = [],
|
||||
repo: Optional[str] = None,
|
||||
sources_refreshed: bool = False,
|
||||
srcinfo_cache: Optional[SrcinfoMetaFile] = None,
|
||||
) -> None:
|
||||
"""
|
||||
Create new Pkgbuild representation for file located at `{relative_path}/PKGBUILD`.
|
||||
@@ -183,32 +84,27 @@ class Pkgbuild(PackageInfo):
|
||||
self.name = os.path.basename(relative_path)
|
||||
self.version = ''
|
||||
self.arches = list(arches)
|
||||
self.depends = dict(depends)
|
||||
self.makedepends = dict(makedepends)
|
||||
self.provides = dict(provides)
|
||||
self.depends = list(depends)
|
||||
self.provides = list(provides)
|
||||
self.replaces = list(replaces)
|
||||
self.local_depends = []
|
||||
self.repo = repo or ''
|
||||
self.mode = ''
|
||||
self.nodeps = False
|
||||
self.crossdirect = True
|
||||
self.path = relative_path
|
||||
self.pkgver = ''
|
||||
self.pkgrel = ''
|
||||
self.description = ''
|
||||
self.sources_refreshed = sources_refreshed
|
||||
self.srcinfo_cache = srcinfo_cache
|
||||
|
||||
def __repr__(self):
|
||||
return ','.join([
|
||||
'Pkgbuild(' + self.name,
|
||||
repr(self.path),
|
||||
str(self.version) + ("🔄" if self.sources_refreshed else ""),
|
||||
repr(self.mode) + ')',
|
||||
self.version + ("🔄" if self.sources_refreshed else ""),
|
||||
self.mode + ')',
|
||||
])
|
||||
|
||||
def names(self) -> list[str]:
|
||||
return list({self.name, *self.provides, *self.replaces})
|
||||
def names(self):
|
||||
return list(set([self.name] + self.provides + self.replaces))
|
||||
|
||||
def update_version(self):
|
||||
"""updates `self.version` from `self.pkgver` and `self.pkgrel`"""
|
||||
@@ -217,42 +113,26 @@ class Pkgbuild(PackageInfo):
|
||||
def update(self, pkg: Pkgbuild):
|
||||
self.version = pkg.version
|
||||
self.arches = list(pkg.arches)
|
||||
self.depends = dict(pkg.depends)
|
||||
self.makedepends = dict(pkg.makedepends)
|
||||
self.provides = dict(pkg.provides)
|
||||
self.depends = list(pkg.depends)
|
||||
self.provides = list(pkg.provides)
|
||||
self.replaces = list(pkg.replaces)
|
||||
self.local_depends = list(pkg.local_depends)
|
||||
self.repo = pkg.repo
|
||||
self.mode = pkg.mode
|
||||
self.nodeps = pkg.nodeps
|
||||
self.crossdirect = pkg.crossdirect
|
||||
self.path = pkg.path
|
||||
self.pkgver = pkg.pkgver
|
||||
self.pkgrel = pkg.pkgrel
|
||||
self.description = pkg.description
|
||||
self.sources_refreshed = self.sources_refreshed or pkg.sources_refreshed
|
||||
self.update_version()
|
||||
|
||||
def refresh_sources(self):
|
||||
raise NotImplementedError()
|
||||
|
||||
def get_filename(self, arch: Arch):
|
||||
if not self.version:
|
||||
self.update_version()
|
||||
if self.arches[0] == 'any':
|
||||
arch = 'any'
|
||||
return f'{self.name}-{self.version}-{arch}.pkg.tar.zst'
|
||||
|
||||
def is_built(self, arch: Arch, tolerate_archless: bool = True) -> bool:
|
||||
raise NotImplementedError()
|
||||
|
||||
|
||||
class Pkgbase(Pkgbuild):
|
||||
subpackages: list[SubPkgbuild]
|
||||
_built_for: set[Arch]
|
||||
|
||||
def __init__(self, relative_path: str, subpackages: list[SubPkgbuild] = [], **args):
|
||||
self._built_for = set()
|
||||
self.subpackages = list(subpackages)
|
||||
super().__init__(relative_path, **args)
|
||||
|
||||
@@ -260,7 +140,6 @@ class Pkgbase(Pkgbuild):
|
||||
if not isinstance(pkg, Pkgbase):
|
||||
raise Exception(f"Tried to update pkgbase {self.name} with non-base pkg {pkg}")
|
||||
Pkgbuild.update(self, pkg)
|
||||
self._built_for.update(pkg._built_for)
|
||||
sub_dict = {p.name: p for p in self.subpackages}
|
||||
self.subpackages.clear()
|
||||
for new_pkg in pkg.subpackages:
|
||||
@@ -281,24 +160,11 @@ class Pkgbase(Pkgbuild):
|
||||
if lazy and self.sources_refreshed:
|
||||
return
|
||||
parsed = parse_pkgbuild(self.path, sources_refreshed=True)
|
||||
basepkg = parsed[0]
|
||||
assert isinstance(basepkg, (Pkgbase, SubPkgbuild))
|
||||
if isinstance(basepkg, SubPkgbuild):
|
||||
basepkg = basepkg.pkgbase
|
||||
basepkgs = [p for p in parsed if isinstance(p, Pkgbase)]
|
||||
if not len(basepkgs) == 1:
|
||||
raise Exception(f"error refreshing {self.name}: wrong number of base packages found: {basepkgs}")
|
||||
self.sources_refreshed = True
|
||||
self.update(basepkg)
|
||||
|
||||
def names(self) -> list[str]:
|
||||
names = set(Pkgbuild.names(self))
|
||||
for pkg in self.subpackages:
|
||||
names.update(pkg.names())
|
||||
return list(names)
|
||||
|
||||
def is_built(self, arch: Arch, tolerate_archless: bool = True) -> bool:
|
||||
arches = {arch}
|
||||
if tolerate_archless:
|
||||
arches.add('any')
|
||||
return bool(self._built_for.intersection(arches))
|
||||
self.update(basepkgs[0])
|
||||
|
||||
|
||||
class SubPkgbuild(Pkgbuild):
|
||||
@@ -308,66 +174,51 @@ class SubPkgbuild(Pkgbuild):
|
||||
|
||||
self.name = name
|
||||
self.pkgbase = pkgbase
|
||||
self.srcinfo_cache = pkgbase.srcinfo_cache
|
||||
|
||||
self.sources_refreshed = False
|
||||
self.update(pkgbase)
|
||||
|
||||
# set to None - will be replaced with base_pkg if still None after parsing
|
||||
self.depends = None # type: ignore[assignment]
|
||||
self.makedepends = None # type: ignore[assignment]
|
||||
self.provides = None # type: ignore[assignment]
|
||||
self.replaces = None # type: ignore[assignment]
|
||||
self.provides = []
|
||||
self.replaces = []
|
||||
|
||||
def refresh_sources(self, lazy: bool = True):
|
||||
assert self.pkgbase
|
||||
self.pkgbase.refresh_sources(lazy=lazy)
|
||||
|
||||
def is_built(self, arch: Arch, tolerate_archless: bool = True) -> bool:
|
||||
return self.pkgbase.is_built(arch)
|
||||
|
||||
|
||||
def parse_pkgbuild(
|
||||
relative_pkg_dir: str,
|
||||
_config: Optional[ConfigStateHolder] = None,
|
||||
force_refresh_srcinfo: bool = False,
|
||||
sources_refreshed: bool = False,
|
||||
) -> list[Pkgbuild]:
|
||||
def parse_pkgbuild(relative_pkg_dir: str, _config: Optional[ConfigStateHolder] = None, sources_refreshed: bool = False) -> list[Pkgbuild]:
|
||||
"""
|
||||
Since function may run in a different subprocess, we need to be passed the config via parameter
|
||||
"""
|
||||
global config
|
||||
if _config:
|
||||
config = _config
|
||||
setup_logging(verbose=config.runtime.verbose, force_colors=config.runtime.colors, log_setup=False) # different subprocess needs log setup.
|
||||
logging.info(f"Discovering PKGBUILD for {relative_pkg_dir}")
|
||||
|
||||
if force_refresh_srcinfo:
|
||||
logging.info('force-refreshing SRCINFOs')
|
||||
# parse SRCINFO cache metadata and get correct SRCINFO lines
|
||||
srcinfo_cache, lines = SrcinfoMetaFile.handle_directory(relative_pkg_dir, force_refresh=force_refresh_srcinfo, write=True)
|
||||
assert lines and srcinfo_cache
|
||||
assert 'build_mode' in srcinfo_cache
|
||||
mode = srcinfo_cache.build_mode
|
||||
assert 'build_nodeps' in srcinfo_cache
|
||||
nodeps = srcinfo_cache.build_nodeps
|
||||
setup_logging(verbose=config.runtime['verbose'], log_setup=False) # different thread needs log setup.
|
||||
logging.info(f"Parsing PKGBUILD for {relative_pkg_dir}")
|
||||
pkgbuilds_dir = config.get_path('pkgbuilds')
|
||||
pkgdir = os.path.join(pkgbuilds_dir, relative_pkg_dir)
|
||||
filename = os.path.join(pkgdir, 'PKGBUILD')
|
||||
logging.debug(f"Parsing {filename}")
|
||||
mode = None
|
||||
with open(filename, 'r') as file:
|
||||
for line in file.read().split('\n'):
|
||||
if line.startswith('_mode='):
|
||||
mode = line.split('=')[1]
|
||||
break
|
||||
if mode not in ['host', 'cross']:
|
||||
err = 'an invalid' if mode is not None else 'no'
|
||||
err_end = f": {repr(mode)}" if mode is not None else "."
|
||||
msg = f'{relative_pkg_dir}/PKGBUILD has {err} mode configured{err_end}'
|
||||
if mode is None:
|
||||
logging.warning(msg)
|
||||
else:
|
||||
raise Exception(msg)
|
||||
raise Exception((f'{relative_pkg_dir}/PKGBUILD has {"no" if mode is None else "an invalid"} mode configured') +
|
||||
(f': "{mode}"' if mode is not None else ''))
|
||||
|
||||
# if _crossdirect is unset (None), it defaults to True
|
||||
crossdirect_enabled = srcinfo_cache.build_crossdirect in (None, True)
|
||||
|
||||
base_package = Pkgbase(relative_pkg_dir, sources_refreshed=sources_refreshed, srcinfo_cache=srcinfo_cache)
|
||||
base_package.crossdirect = crossdirect_enabled
|
||||
base_package = Pkgbase(relative_pkg_dir, sources_refreshed=sources_refreshed)
|
||||
base_package.mode = mode
|
||||
base_package.nodeps = nodeps
|
||||
base_package.repo = relative_pkg_dir.split('/')[0]
|
||||
srcinfo = run_cmd(
|
||||
MAKEPKG_CMD + ['--printsrcinfo'],
|
||||
cwd=pkgdir,
|
||||
stdout=subprocess.PIPE,
|
||||
)
|
||||
assert (isinstance(srcinfo, subprocess.CompletedProcess))
|
||||
lines = srcinfo.stdout.decode('utf-8').split('\n')
|
||||
|
||||
current: Pkgbuild = base_package
|
||||
multi_pkgs = False
|
||||
@@ -378,57 +229,40 @@ def parse_pkgbuild(
|
||||
splits = line.split(' = ')
|
||||
if line.startswith('pkgbase'):
|
||||
base_package.name = splits[1]
|
||||
multi_pkgs = True
|
||||
elif line.startswith('pkgname'):
|
||||
current = SubPkgbuild(splits[1], base_package)
|
||||
assert isinstance(base_package.subpackages, list)
|
||||
base_package.subpackages.append(current)
|
||||
if current.name != base_package.name:
|
||||
multi_pkgs = True
|
||||
if multi_pkgs:
|
||||
current = SubPkgbuild(splits[1], base_package)
|
||||
assert isinstance(base_package.subpackages, list)
|
||||
base_package.subpackages.append(current)
|
||||
else:
|
||||
current.name = splits[1]
|
||||
elif line.startswith('pkgver'):
|
||||
current.pkgver = splits[1]
|
||||
elif line.startswith('pkgrel'):
|
||||
current.pkgrel = splits[1]
|
||||
elif line.startswith('pkgdesc'):
|
||||
current.description = splits[1]
|
||||
elif line.startswith('arch'):
|
||||
current.arches.append(splits[1])
|
||||
elif line.startswith('provides'):
|
||||
if not current.provides:
|
||||
current.provides = {}
|
||||
current.provides = get_version_specs(splits[1], current.provides)
|
||||
current.provides.append(splits[1])
|
||||
elif line.startswith('replaces'):
|
||||
if not current.replaces:
|
||||
current.replaces = []
|
||||
current.replaces.append(splits[1])
|
||||
elif splits[0] in ['depends', 'makedepends', 'checkdepends', 'optdepends']:
|
||||
spec = splits[1].split(': ', 1)[0]
|
||||
if not current.depends:
|
||||
current.depends = (base_package.makedepends or {}).copy()
|
||||
current.depends = get_version_specs(spec, current.depends)
|
||||
if splits[0] == 'makedepends':
|
||||
if not current.makedepends:
|
||||
current.makedepends = {}
|
||||
current.makedepends = get_version_specs(spec, current.makedepends)
|
||||
elif line.startswith('depends') or line.startswith('makedepends') or line.startswith('checkdepends') or line.startswith('optdepends'):
|
||||
current.depends.append(splits[1].split('=')[0].split(': ')[0])
|
||||
|
||||
results: list[Pkgbuild] = list(base_package.subpackages)
|
||||
if multi_pkgs:
|
||||
if len(results) > 1:
|
||||
logging.debug(f" Split package detected: {base_package.name}: {results}")
|
||||
base_package.update_version()
|
||||
else:
|
||||
results = [base_package]
|
||||
|
||||
base_package.update_version()
|
||||
for pkg in results:
|
||||
assert isinstance(pkg, Pkgbuild)
|
||||
pkg.depends = list(set(pkg.depends)) # deduplicate dependencies
|
||||
pkg.update_version()
|
||||
if not (pkg.version == base_package.version):
|
||||
raise Exception(f'Subpackage malformed! Versions differ! base: {base_package}, subpackage: {pkg}')
|
||||
if isinstance(pkg, SubPkgbuild):
|
||||
if pkg.depends is None:
|
||||
pkg.depends = base_package.depends
|
||||
if pkg.makedepends is None:
|
||||
pkg.makedepends = base_package.makedepends
|
||||
if pkg.replaces is None:
|
||||
pkg.replaces = base_package.replaces
|
||||
if pkg.provides is None:
|
||||
pkg.provides = base_package.provides
|
||||
return results
|
||||
|
||||
|
||||
@@ -437,161 +271,79 @@ _pkgbuilds_paths = dict[str, list[Pkgbuild]]()
|
||||
_pkgbuilds_scanned: bool = False
|
||||
|
||||
|
||||
def get_pkgbuild_by_path(
|
||||
relative_path: str,
|
||||
force_refresh_srcinfo: bool = False,
|
||||
lazy: bool = True,
|
||||
_config: Optional[ConfigStateHolder] = None,
|
||||
) -> list[Pkgbuild]:
|
||||
def get_pkgbuild_by_path(relative_path: str, lazy: bool = True, _config: Optional[ConfigStateHolder] = None) -> list[Pkgbuild]:
|
||||
global _pkgbuilds_cache, _pkgbuilds_paths
|
||||
if lazy and not force_refresh_srcinfo and relative_path in _pkgbuilds_paths:
|
||||
if lazy and relative_path in _pkgbuilds_paths:
|
||||
return _pkgbuilds_paths[relative_path]
|
||||
parsed = parse_pkgbuild(relative_path, force_refresh_srcinfo=force_refresh_srcinfo, _config=_config)
|
||||
parsed = parse_pkgbuild(relative_path, _config=_config)
|
||||
_pkgbuilds_paths[relative_path] = parsed
|
||||
for pkg in parsed:
|
||||
_pkgbuilds_cache[pkg.name] = pkg
|
||||
return parsed
|
||||
|
||||
|
||||
def get_pkgbuild_by_name(name: str, lazy: bool = True):
|
||||
if lazy and name in _pkgbuilds_cache:
|
||||
return _pkgbuilds_cache[name]
|
||||
if _pkgbuilds_scanned and lazy:
|
||||
raise Exception(f"couldn't find PKGBUILD for package with name {name}")
|
||||
discover_pkgbuilds(lazy=lazy)
|
||||
assert _pkgbuilds_scanned
|
||||
return get_pkgbuild_by_name(name=name, lazy=lazy)
|
||||
|
||||
|
||||
def get_pkgbuild_dirs(quiet: bool = True, repositories: Optional[list[str]] = None) -> list[str]:
|
||||
"""Gets the relative paths to directories containing PKGBUILDs, optionally warns about dirs without a PKGBUILD"""
|
||||
pkgbuilds_dir = config.get_path('pkgbuilds')
|
||||
paths = []
|
||||
for repo in repositories or get_kupfer_repo_names(local=True):
|
||||
path = os.path.join(pkgbuilds_dir, repo)
|
||||
if not os.path.exists(path):
|
||||
if not quiet:
|
||||
logging.warning(f'repo "{repo}" can\'t be listed: "{path}" doesn\'t exist; skipping')
|
||||
continue
|
||||
for dir in os.listdir(path):
|
||||
p = os.path.join(repo, dir)
|
||||
if not os.path.exists(os.path.join(pkgbuilds_dir, p, 'PKGBUILD')):
|
||||
if not quiet:
|
||||
logging.warning(f"{p} doesn't include a PKGBUILD file; skipping")
|
||||
continue
|
||||
paths.append(p)
|
||||
return paths
|
||||
|
||||
|
||||
def discover_pkgbuilds(parallel: bool = True, lazy: bool = True, repositories: Optional[list[str]] = None) -> dict[str, Pkgbuild]:
|
||||
def discover_pkgbuilds(parallel: bool = True, lazy: bool = True) -> dict[str, Pkgbuild]:
|
||||
global _pkgbuilds_cache, _pkgbuilds_scanned
|
||||
if lazy and _pkgbuilds_scanned:
|
||||
logging.debug("Reusing cached pkgbuilds repo")
|
||||
return _pkgbuilds_cache.copy()
|
||||
check_programs_wrap(['makepkg'])
|
||||
pkgbuilds_dir = config.get_path('pkgbuilds')
|
||||
packages: dict[str, Pkgbuild] = {}
|
||||
paths = []
|
||||
init_pkgbuilds(interactive=False)
|
||||
paths = get_pkgbuild_dirs(quiet=False, repositories=repositories)
|
||||
logging.info(f"Discovering PKGBUILDs{f' in repositories: {repositories}' if repositories else ''}")
|
||||
for repo in REPOSITORIES:
|
||||
for dir in os.listdir(os.path.join(pkgbuilds_dir, repo)):
|
||||
paths.append(os.path.join(repo, dir))
|
||||
|
||||
logging.info("Parsing PKGBUILDs")
|
||||
|
||||
results = []
|
||||
if parallel:
|
||||
paths_filtered = paths
|
||||
backend = 'threading'
|
||||
pass_config = config if backend != 'threading' else None
|
||||
chunks = (Parallel(n_jobs=multiprocessing.cpu_count() * 4,
|
||||
backend=backend)(delayed(get_pkgbuild_by_path)(path, lazy=lazy, _config=pass_config) for path in paths_filtered))
|
||||
if lazy:
|
||||
# filter out cached packages as the caches don't cross process boundaries
|
||||
paths_filtered = []
|
||||
for p in paths:
|
||||
if p in _pkgbuilds_paths:
|
||||
# use cache
|
||||
results += _pkgbuilds_paths[p]
|
||||
else:
|
||||
paths_filtered += [p]
|
||||
chunks = (Parallel(n_jobs=multiprocessing.cpu_count() * 4)(
|
||||
delayed(get_pkgbuild_by_path)(path, lazy=lazy, _config=config) for path in paths_filtered))
|
||||
else:
|
||||
chunks = (get_pkgbuild_by_path(path, lazy=lazy) for path in paths)
|
||||
|
||||
if repositories is None:
|
||||
_pkgbuilds_paths.clear()
|
||||
_pkgbuilds_paths.clear()
|
||||
# one list of packages per path
|
||||
for pkglist in chunks:
|
||||
_pkgbuilds_paths[pkglist[0].path] = pkglist
|
||||
results += pkglist
|
||||
|
||||
logging.info('Building package dictionary')
|
||||
logging.debug('Building package dictionary!')
|
||||
for package in results:
|
||||
for name in [package.name] + package.replaces:
|
||||
if name in packages:
|
||||
logging.warning(f'Overriding {packages[package.name]} with {package}')
|
||||
packages[name] = package
|
||||
|
||||
if repositories is None:
|
||||
# partial scans (specific repos) don't count as truly scanned
|
||||
_pkgbuilds_cache.clear()
|
||||
_pkgbuilds_scanned = True
|
||||
_pkgbuilds_cache.update(packages)
|
||||
|
||||
# This filters local_depends to only include the ones that are provided by local PKGBUILDs
|
||||
# we need to iterate over the entire cache in case partial scans happened
|
||||
for package in _pkgbuilds_cache.values():
|
||||
package.local_depends = list(package.depends.keys())
|
||||
# This filters the deps to only include the ones that are provided in this repo
|
||||
for package in packages.values():
|
||||
package.local_depends = package.depends.copy()
|
||||
for dep in package.depends.copy():
|
||||
found = dep in _pkgbuilds_cache
|
||||
for pkg in _pkgbuilds_cache.values():
|
||||
found = dep in packages
|
||||
for pkg in packages.values():
|
||||
if found:
|
||||
break
|
||||
if dep in pkg.names():
|
||||
logging.debug(f'{package.path}: Found {pkg.name} that provides {dep}')
|
||||
logging.debug(f'Found {pkg.name} that provides {dep}')
|
||||
found = True
|
||||
break
|
||||
if not found:
|
||||
logging.debug(f'{package.path}: Removing {dep} from local dependencies')
|
||||
logging.debug(f'Removing {dep} from dependencies')
|
||||
package.local_depends.remove(dep)
|
||||
|
||||
_pkgbuilds_cache.clear()
|
||||
_pkgbuilds_cache.update(packages)
|
||||
_pkgbuilds_scanned = True
|
||||
return packages
|
||||
|
||||
|
||||
def filter_pkgbuilds(
|
||||
paths: Iterable[str],
|
||||
repo: Optional[dict[str, Pkgbuild]] = None,
|
||||
arch: Optional[Arch] = None,
|
||||
allow_empty_results=True,
|
||||
use_paths=True,
|
||||
use_names=True,
|
||||
) -> Iterable[Pkgbuild]:
|
||||
if not (use_names or use_paths):
|
||||
raise Exception('Error: filter_packages instructed to match neither by names nor paths; impossible!')
|
||||
paths = list(paths)
|
||||
plural = 's' if len(paths) > 1 else ''
|
||||
fields = []
|
||||
if use_names:
|
||||
fields.append('name' + plural)
|
||||
if use_paths:
|
||||
fields.append('path' + plural)
|
||||
fields_err = ' or '.join(fields)
|
||||
if not allow_empty_results and not paths:
|
||||
raise Exception(f"Can't search for packages: no {fields_err} given")
|
||||
repo = repo or discover_pkgbuilds()
|
||||
if 'all' in paths:
|
||||
all_pkgs = list(repo.values())
|
||||
if arch:
|
||||
all_pkgs = [pkg for pkg in all_pkgs if set([arch, 'any']).intersection(pkg.arches)]
|
||||
return all_pkgs
|
||||
result = []
|
||||
to_find = list(paths)
|
||||
for pkg in repo.values():
|
||||
comparison = set()
|
||||
if use_paths:
|
||||
comparison.add(pkg.path)
|
||||
if use_names:
|
||||
comparison.add(pkg.name)
|
||||
matches = list(comparison.intersection(paths))
|
||||
if matches:
|
||||
assert pkg.arches
|
||||
if arch and not set([arch, 'any']).intersection(pkg.arches):
|
||||
logging.warn(f"Pkg {pkg.name} matches query {matches[0]} but isn't available for architecture {arch}: {pkg.arches}")
|
||||
continue
|
||||
result += [pkg]
|
||||
for m in set(matches).intersection(to_find):
|
||||
to_find.remove(m)
|
||||
|
||||
if not allow_empty_results:
|
||||
if not result:
|
||||
raise Exception(f'No packages matched by {fields_err}: ' + ', '.join([f'"{p}"' for p in paths]))
|
||||
if to_find:
|
||||
raise Exception(f"No packagages matched by {fields_err}: " + ', '.join([f'"{p}"' for p in to_find]))
|
||||
|
||||
return result
|
||||
|
||||
@@ -1,244 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import subprocess
|
||||
|
||||
from typing import Any, ClassVar, Optional
|
||||
|
||||
from config.state import config
|
||||
from constants import MAKEPKG_CMD, SRCINFO_FILE, SRCINFO_METADATA_FILE, SRCINFO_INITIALISED_FILE
|
||||
from dictscheme import DictScheme
|
||||
from exec.cmd import run_cmd
|
||||
from utils import sha256sum
|
||||
|
||||
SRCINFO_CHECKSUM_FILES = ['PKGBUILD', SRCINFO_FILE]
|
||||
|
||||
|
||||
class JsonFile(DictScheme):
|
||||
|
||||
_filename: ClassVar[str]
|
||||
_relative_path: str
|
||||
_strip_hidden: ClassVar[bool] = True
|
||||
_sparse: ClassVar[bool] = False
|
||||
|
||||
def toJSON(self) -> str:
|
||||
'Returns a json representation, with private keys that start with "_" filtered out'
|
||||
return json.dumps(self.toDict(), indent=2)
|
||||
|
||||
def write(self):
|
||||
'Write the filtered json representation to disk'
|
||||
filepath = os.path.join(config.get_path('pkgbuilds'), self._relative_path, self._filename)
|
||||
logging.debug(f'{self._relative_path}: writing {self._filename}')
|
||||
with open(filepath, 'w') as fd:
|
||||
fd.write(self.toJSON())
|
||||
|
||||
@classmethod
|
||||
def _read_file(cls, relative_path) -> Optional[dict]:
|
||||
pkgdir = os.path.join(config.get_path('pkgbuilds'), relative_path)
|
||||
filepath = os.path.join(pkgdir, cls._filename)
|
||||
if not os.path.exists(filepath):
|
||||
raise Exception(f"{relative_path}: {cls._filename} doesn't exist")
|
||||
with open(filepath, 'r') as fd:
|
||||
contents = json.load(fd)
|
||||
return contents
|
||||
|
||||
def read(self) -> Optional[dict[str, Any]]:
|
||||
"""
|
||||
Try reading and parsing the JSON file. Due to the way this class works, it should be a dict (or empty).
|
||||
No error handling is provided, bring your own try/catch!
|
||||
"""
|
||||
return type(self)._read_file(self._relative_path)
|
||||
|
||||
|
||||
class SrcInitialisedFile(JsonFile):
|
||||
|
||||
PKGBUILD: str
|
||||
_filename: ClassVar[str] = SRCINFO_INITIALISED_FILE
|
||||
|
||||
def __init__(self, relative_path: str, raise_exception: bool = False):
|
||||
self._relative_path = relative_path
|
||||
try:
|
||||
content = self.read()
|
||||
assert isinstance(content, dict)
|
||||
self.update(content)
|
||||
except Exception as ex:
|
||||
if raise_exception:
|
||||
raise ex
|
||||
|
||||
|
||||
srcinfo_meta_defaults = {
|
||||
'build_mode': None,
|
||||
"build_nodeps": None,
|
||||
"build_crossdirect": None,
|
||||
}
|
||||
|
||||
|
||||
class SrcinfoMetaFile(JsonFile):
|
||||
|
||||
checksums: dict[str, str]
|
||||
build_mode: Optional[str]
|
||||
build_nodeps: Optional[bool]
|
||||
build_crossdirect: Optional[bool]
|
||||
|
||||
_changed: bool
|
||||
_filename: ClassVar[str] = SRCINFO_METADATA_FILE
|
||||
|
||||
@staticmethod
|
||||
def parse_existing(relative_pkg_dir: str) -> SrcinfoMetaFile:
|
||||
'tries to parse the srcinfo_meta.json file in the specified pkgbuild dir'
|
||||
metadata_raw = SrcinfoMetaFile._read_file(relative_pkg_dir)
|
||||
return SrcinfoMetaFile.fromDict(metadata_raw | {
|
||||
'_relative_path': relative_pkg_dir,
|
||||
'_changed': False,
|
||||
})
|
||||
|
||||
@staticmethod
|
||||
def generate_new(relative_pkg_dir: str, write: bool = True) -> tuple[SrcinfoMetaFile, list[str]]:
|
||||
'Creates a new SrcinfoMetaFile object with checksums, creating a SRCINFO as necessary'
|
||||
s = SrcinfoMetaFile({
|
||||
'_relative_path': relative_pkg_dir,
|
||||
'_changed': True,
|
||||
'checksums': {},
|
||||
**srcinfo_meta_defaults,
|
||||
})
|
||||
return s, s.refresh_all()
|
||||
|
||||
@staticmethod
|
||||
def handle_directory(relative_pkg_dir: str, force_refresh: bool = False, write: bool = True) -> tuple[SrcinfoMetaFile, list[str]]:
|
||||
lines = None
|
||||
# try reading existing cache metadata
|
||||
try:
|
||||
metadata = SrcinfoMetaFile.parse_existing(relative_pkg_dir)
|
||||
except Exception as ex:
|
||||
logging.debug(f"{relative_pkg_dir}: something went wrong parsing json from {SrcinfoMetaFile._filename},"
|
||||
f"running `makepkg --printsrcinfo` instead instead: {ex}")
|
||||
return SrcinfoMetaFile.generate_new(relative_pkg_dir, write=write)
|
||||
# if for whatever reason only the SRCINFO got deleted but PKGBUILD has not been modified,
|
||||
# we do want the checksum verification to work. So regenerate SRCINFO first.
|
||||
if not os.path.exists(os.path.join(config.get_path('pkgbuilds'), relative_pkg_dir, SRCINFO_FILE)):
|
||||
lines = metadata.refresh_srcinfo()
|
||||
if not metadata.validate_checksums():
|
||||
# metadata is invalid
|
||||
return SrcinfoMetaFile.generate_new(relative_pkg_dir, write=write)
|
||||
# metadata is valid
|
||||
assert metadata
|
||||
if not force_refresh:
|
||||
logging.debug(f'{metadata._relative_path}: srcinfo checksums match!')
|
||||
lines = lines or metadata.read_srcinfo_file()
|
||||
for build_field in srcinfo_meta_defaults.keys():
|
||||
if build_field not in metadata:
|
||||
metadata.refresh_build_fields()
|
||||
if write:
|
||||
metadata.write()
|
||||
break
|
||||
else:
|
||||
lines = metadata.refresh_all(write=write)
|
||||
return metadata, lines
|
||||
|
||||
def refresh_checksums(self):
|
||||
pkgdir = os.path.join(config.get_path('pkgbuilds'), self._relative_path)
|
||||
if 'checksums' not in self:
|
||||
self['checksums'] = None
|
||||
checksums_old = self.checksums.copy()
|
||||
checksums = {p: sha256sum(os.path.join(pkgdir, p)) for p in SRCINFO_CHECKSUM_FILES}
|
||||
if self.checksums is None:
|
||||
self.checksums = checksums
|
||||
else:
|
||||
self.checksums.clear()
|
||||
self.checksums.update(checksums)
|
||||
if checksums != checksums_old:
|
||||
self._changed = True
|
||||
|
||||
def refresh_build_fields(self):
|
||||
self.update(srcinfo_meta_defaults)
|
||||
with open(os.path.join(config.get_path('pkgbuilds'), self._relative_path, 'PKGBUILD'), 'r') as file:
|
||||
lines = file.read().split('\n')
|
||||
for line in lines:
|
||||
if not line.startswith('_') or '=' not in line:
|
||||
continue
|
||||
key, val = line.split('=', 1)
|
||||
val = val.strip("\"'")
|
||||
if key == '_mode':
|
||||
self.build_mode = val
|
||||
elif key == '_nodeps':
|
||||
self.build_nodeps = val.lower() == 'true'
|
||||
elif key == '_crossdirect':
|
||||
self.build_crossdirect = val.lower() == 'true'
|
||||
else:
|
||||
continue
|
||||
|
||||
def refresh_srcinfo(self) -> list[str]:
|
||||
'Run `makepkg --printsrcinfo` to create an updated SRCINFO file and return the lines from it'
|
||||
logging.info(f"{self._relative_path}: Generating SRCINFO with makepkg")
|
||||
pkgdir = os.path.join(config.get_path('pkgbuilds'), self._relative_path)
|
||||
srcinfo_file = os.path.join(pkgdir, SRCINFO_FILE)
|
||||
sproc = run_cmd(
|
||||
MAKEPKG_CMD + ['--printsrcinfo'],
|
||||
cwd=pkgdir,
|
||||
stdout=subprocess.PIPE,
|
||||
)
|
||||
assert (isinstance(sproc, subprocess.CompletedProcess))
|
||||
if sproc.returncode:
|
||||
raise Exception(f"{self._relative_path}: makepkg failed to parse the PKGBUILD! Error code: {sproc.returncode}")
|
||||
output = sproc.stdout.decode('utf-8')
|
||||
with open(srcinfo_file, 'w') as srcinfo_fd:
|
||||
srcinfo_fd.write(output)
|
||||
return output.split('\n')
|
||||
|
||||
def read_srcinfo_file(self) -> list[str]:
|
||||
with open(os.path.join(config.get_path('pkgbuilds'), self._relative_path, SRCINFO_FILE), 'r') as srcinfo_fd:
|
||||
lines = srcinfo_fd.read().split('\n')
|
||||
return lines
|
||||
|
||||
def refresh_all(self, write: bool = True) -> list[str]:
|
||||
lines = self.refresh_srcinfo()
|
||||
self.refresh_checksums()
|
||||
self.refresh_build_fields()
|
||||
if write:
|
||||
self.write()
|
||||
return lines
|
||||
|
||||
def validate_checksums(self) -> bool:
|
||||
"Returns True if all checksummed files exist and checksums match"
|
||||
pkgdir = os.path.join(config.get_path('pkgbuilds'), self._relative_path)
|
||||
assert self.checksums
|
||||
for filename in SRCINFO_CHECKSUM_FILES:
|
||||
if filename not in self.checksums:
|
||||
logging.debug(f"{self._relative_path}: No checksum for {filename} available")
|
||||
return False
|
||||
checksum = self.checksums[filename]
|
||||
path = os.path.join(pkgdir, filename)
|
||||
if not os.path.exists(path):
|
||||
logging.debug(f"{self._relative_path}: can't checksum'{filename}: file doesn't exist")
|
||||
return False
|
||||
file_sum = sha256sum(path)
|
||||
if file_sum != checksum:
|
||||
logging.debug(f'{self._relative_path}: Checksum for file "{filename}" doesn\'t match')
|
||||
return False
|
||||
return True
|
||||
|
||||
def is_src_initialised(self) -> bool:
|
||||
checksum = self.checksums["PKGBUILD"]
|
||||
assert checksum
|
||||
try:
|
||||
initfile = SrcInitialisedFile(self._relative_path, raise_exception=True)
|
||||
if "PKGBUILD" not in initfile:
|
||||
raise Exception("'PKGBUILD' not in parser output")
|
||||
initialised_checksum = initfile.PKGBUILD
|
||||
except Exception as ex:
|
||||
logging.debug(f"{self._relative_path}: Couldn't read or parse {SRCINFO_INITIALISED_FILE}: {ex}")
|
||||
initialised_checksum = None
|
||||
result = checksum == initialised_checksum
|
||||
if initialised_checksum and not result:
|
||||
logging.debug("Sources were set up for a different version. "
|
||||
f"Current PKGBUILD checksum: {checksum}; "
|
||||
f"Initialised for: {initialised_checksum}")
|
||||
return result
|
||||
|
||||
def write_src_initialised(self):
|
||||
initfile = SrcInitialisedFile(self._relative_path)
|
||||
self.refresh_checksums()
|
||||
initfile.PKGBUILD = self.checksums["PKGBUILD"]
|
||||
initfile.write()
|
||||
@@ -4,8 +4,8 @@ import os
|
||||
|
||||
from copy import copy
|
||||
|
||||
from config.state import ConfigStateHolder, config
|
||||
from packages.pkgbuild import init_pkgbuilds, discover_pkgbuilds, Pkgbuild, parse_pkgbuild
|
||||
from config import ConfigStateHolder, config
|
||||
from .pkgbuild import init_pkgbuilds, discover_pkgbuilds, Pkgbuild, parse_pkgbuild
|
||||
from .device import Device, DEVICE_DEPRECATIONS, get_device, get_devices, parse_device_pkg, check_devicepkg_name
|
||||
|
||||
|
||||
@@ -42,7 +42,7 @@ ONEPLUS_ENCHILADA_PKG = f'device-{ONEPLUS_ENCHILADA}'
|
||||
def enchilada_pkgbuild(initialise_pkgbuilds_dir: ConfigStateHolder):
|
||||
config = initialise_pkgbuilds_dir
|
||||
config.try_load_file()
|
||||
return parse_pkgbuild(os.path.join('device', ONEPLUS_ENCHILADA_PKG))[0]
|
||||
return parse_pkgbuild(os.path.join('device', ONEPLUS_ENCHILADA_PKG), _config=config)[0]
|
||||
|
||||
|
||||
def validate_oneplus_enchilada(d: Device):
|
||||
@@ -1,52 +0,0 @@
|
||||
import click
|
||||
import sys
|
||||
|
||||
from enlighten import Counter, Manager, get_manager as _getmanager
|
||||
from typing import Hashable, Optional
|
||||
|
||||
from config.state import config
|
||||
|
||||
BAR_PADDING = 25
|
||||
DEFAULT_OUTPUT = sys.stderr
|
||||
|
||||
managers: dict[Hashable, Manager] = {}
|
||||
|
||||
progress_bars_option = click.option(
|
||||
'--force-progress-bars/--no-progress-bars',
|
||||
is_flag=True,
|
||||
default=None,
|
||||
help='Force enable/disable progress bars. Defaults to autodetection.',
|
||||
)
|
||||
|
||||
|
||||
def get_manager(file=DEFAULT_OUTPUT, enabled: Optional[bool] = None) -> Manager:
|
||||
global managers
|
||||
m = managers.get(file, None)
|
||||
if not m:
|
||||
kwargs = {}
|
||||
if enabled is None or config.runtime.progress_bars is False:
|
||||
enabled = config.runtime.progress_bars
|
||||
if enabled is not None:
|
||||
kwargs = {"enabled": enabled}
|
||||
m = _getmanager(file, **kwargs)
|
||||
managers[file] = m
|
||||
return m
|
||||
|
||||
|
||||
def get_progress_bar(*kargs, file=DEFAULT_OUTPUT, leave=False, **kwargs) -> Counter:
|
||||
m = get_manager(file=file)
|
||||
|
||||
kwargs["file"] = file
|
||||
kwargs["leave"] = leave
|
||||
return m.counter(*kargs, **kwargs)
|
||||
|
||||
|
||||
def get_levels_bar(*kargs, file=DEFAULT_OUTPUT, enable_rate=True, **kwargs):
|
||||
kwargs["fields"] = {"name": "None", "level": 1, "levels_total": 1} | (kwargs.get("fields", None) or {})
|
||||
f = (u'{desc}: {name}{desc_pad}{percentage:3.0f}%|{bar}| '
|
||||
u'{count:{len_total}d}/{total:d} '
|
||||
u'[lvl: {level}/{levels_total}] ')
|
||||
if enable_rate:
|
||||
f += u'[{elapsed}<{eta}, {rate:.2f}{unit_pad}{unit}/s]'
|
||||
kwargs["bar_format"] = f
|
||||
return get_progress_bar(*kargs, **kwargs)
|
||||
@@ -1,4 +1,4 @@
|
||||
#!/bin/bash
|
||||
|
||||
sudo -v
|
||||
python -m pytest -v --cov=. --cov-branch --cov-report=term "$@" ./*/test_*.py
|
||||
python -m pytest --junit-xml=pytest-report.xml -v "$@" ./*/test_*.py
|
||||
|
||||
@@ -1,12 +1,7 @@
|
||||
click>=8.0.1
|
||||
appdirs>=1.4.4
|
||||
joblib>=1.0.1
|
||||
click==8.0.1
|
||||
appdirs==1.4.4
|
||||
joblib==1.0.1
|
||||
toml
|
||||
typing_extensions
|
||||
coloredlogs
|
||||
munch
|
||||
setuptools # required by munch
|
||||
requests
|
||||
python-dateutil
|
||||
enlighten
|
||||
PyYAML
|
||||
|
||||
@@ -4,11 +4,9 @@ import os
|
||||
import pathlib
|
||||
import click
|
||||
|
||||
from config.state import config
|
||||
from config import config
|
||||
from constants import SSH_COMMON_OPTIONS, SSH_DEFAULT_HOST, SSH_DEFAULT_PORT
|
||||
from chroot.abstract import Chroot
|
||||
from exec.cmd import run_cmd
|
||||
from exec.file import write_file
|
||||
from wrapper import check_programs_wrap
|
||||
|
||||
|
||||
@@ -34,7 +32,7 @@ def run_ssh_command(cmd: list[str] = [],
|
||||
extra_args = []
|
||||
if len(keys) > 0:
|
||||
extra_args += ['-i', keys[0]]
|
||||
if config.runtime.verbose:
|
||||
if config.runtime['verbose']:
|
||||
extra_args += ['-v']
|
||||
if alloc_tty:
|
||||
extra_args += ['-t']
|
||||
@@ -53,7 +51,7 @@ def run_ssh_command(cmd: list[str] = [],
|
||||
return run_cmd(full_cmd)
|
||||
|
||||
|
||||
def scp_put_files(src: list[str], dst: str, user: Optional[str] = None, host: str = SSH_DEFAULT_HOST, port: int = SSH_DEFAULT_PORT):
|
||||
def scp_put_files(src: list[str], dst: str, user: str = None, host: str = SSH_DEFAULT_HOST, port: int = SSH_DEFAULT_PORT):
|
||||
check_programs_wrap(['scp'])
|
||||
if not user:
|
||||
user = config.get_profile()['username']
|
||||
@@ -85,16 +83,21 @@ def find_ssh_keys():
|
||||
return keys
|
||||
|
||||
|
||||
def copy_ssh_keys(chroot: Chroot, user: str, allow_fail: bool = False):
|
||||
def copy_ssh_keys(root_dir: str, user: str):
|
||||
check_programs_wrap(['ssh-keygen'])
|
||||
ssh_dir_relative = os.path.join('/home', user, '.ssh')
|
||||
ssh_dir = chroot.get_path(ssh_dir_relative)
|
||||
authorized_keys_file_rel = os.path.join(ssh_dir_relative, 'authorized_keys')
|
||||
authorized_keys_file = chroot.get_path(authorized_keys_file_rel)
|
||||
authorized_keys_file = os.path.join(
|
||||
root_dir,
|
||||
'home',
|
||||
user,
|
||||
'.ssh',
|
||||
'authorized_keys',
|
||||
)
|
||||
if os.path.exists(authorized_keys_file):
|
||||
os.unlink(authorized_keys_file)
|
||||
|
||||
keys = find_ssh_keys()
|
||||
if len(keys) == 0:
|
||||
logging.warning("Could not find any ssh key to copy")
|
||||
logging.info("Could not find any ssh key to copy")
|
||||
create = click.confirm("Do you want me to generate an ssh key for you?", True)
|
||||
if not create:
|
||||
return
|
||||
@@ -113,34 +116,15 @@ def copy_ssh_keys(chroot: Chroot, user: str, allow_fail: bool = False):
|
||||
logging.fatal("Failed to generate ssh key")
|
||||
keys = find_ssh_keys()
|
||||
|
||||
if not keys:
|
||||
logging.warning("No SSH keys to be copied. Skipping.")
|
||||
return
|
||||
|
||||
auth_key_lines = []
|
||||
for key in keys:
|
||||
pub = f'{key}.pub'
|
||||
if not os.path.exists(pub):
|
||||
logging.debug(f'Skipping key {key}: {pub} not found')
|
||||
continue
|
||||
try:
|
||||
with open(pub, 'r') as file:
|
||||
contents = file.read()
|
||||
if not contents.strip():
|
||||
continue
|
||||
auth_key_lines.append(contents)
|
||||
except Exception as ex:
|
||||
logging.warning(f"Could not read ssh pub key {pub}", exc_info=ex)
|
||||
continue
|
||||
|
||||
ssh_dir = os.path.join(root_dir, 'home', user, '.ssh')
|
||||
if not os.path.exists(ssh_dir):
|
||||
logging.info(f"Creating {ssh_dir_relative!r} dir in chroot {chroot.path!r}")
|
||||
chroot.run_cmd(["mkdir", "-p", "-m", "700", ssh_dir_relative], switch_user=user)
|
||||
logging.info(f"Writing SSH pub keys to {authorized_keys_file}")
|
||||
try:
|
||||
write_file(authorized_keys_file, "\n".join(auth_key_lines), user=str(chroot.get_uid(user)), mode="644")
|
||||
except Exception as ex:
|
||||
logging.error(f"Failed to write SSH authorized_keys_file at {authorized_keys_file!r}:", exc_info=ex)
|
||||
if allow_fail:
|
||||
return
|
||||
raise ex from ex
|
||||
os.makedirs(ssh_dir, exist_ok=True, mode=0o700)
|
||||
|
||||
with open(authorized_keys_file, 'a') as authorized_keys:
|
||||
for key in keys:
|
||||
pub = f'{key}.pub'
|
||||
if not os.path.exists(pub):
|
||||
logging.debug(f'Skipping key {key}: {pub} not found')
|
||||
continue
|
||||
with open(pub, 'r') as file:
|
||||
authorized_keys.write(file.read())
|
||||
@@ -1,5 +0,0 @@
|
||||
autoflake
|
||||
mypy
|
||||
yapf
|
||||
pytest
|
||||
pytest-cov
|
||||
@@ -1,2 +1,2 @@
|
||||
#!/bin/bash
|
||||
git ls-files \*.py | sort -u | xargs mypy --pretty --show-error-codes --check-untyped-defs --install-types --ignore-missing-imports "$@"
|
||||
git ls-files \*.py | sort -u | xargs mypy --pretty --show-error-codes --install-types --ignore-missing-imports "$@"
|
||||
|
||||
@@ -1,18 +0,0 @@
|
||||
from typing import Union
|
||||
|
||||
try:
|
||||
from typing import TypeAlias # type: ignore[attr-defined]
|
||||
except ImportError:
|
||||
from typing_extensions import TypeAlias
|
||||
|
||||
TypeAlias = TypeAlias
|
||||
|
||||
try:
|
||||
from types import UnionType
|
||||
except ImportError:
|
||||
UnionType: TypeAlias = Union # type: ignore[no-redef]
|
||||
|
||||
try:
|
||||
from types import NoneType
|
||||
except ImportError:
|
||||
NoneType: TypeAlias = type(None) # type: ignore[no-redef]
|
||||
147
utils.py
147
utils.py
@@ -1,18 +1,10 @@
|
||||
import atexit
|
||||
import click
|
||||
import datetime
|
||||
import grp
|
||||
import hashlib
|
||||
import logging
|
||||
import os
|
||||
import pwd
|
||||
import requests
|
||||
import subprocess
|
||||
import tarfile
|
||||
|
||||
from dateutil.parser import parse as parsedate
|
||||
from shutil import which
|
||||
from typing import Any, Generator, IO, Optional, Union, Sequence
|
||||
from typing import Optional, Union, Sequence
|
||||
|
||||
from exec.cmd import run_cmd, run_root_cmd
|
||||
|
||||
@@ -32,7 +24,7 @@ def programs_available(programs: Union[str, Sequence[str]], lazy: bool = True) -
|
||||
return True
|
||||
|
||||
|
||||
def umount(dest: str, lazy=False) -> subprocess.CompletedProcess:
|
||||
def umount(dest: str, lazy=False):
|
||||
return run_root_cmd(
|
||||
[
|
||||
'umount',
|
||||
@@ -63,7 +55,7 @@ def mount(src: str, dest: str, options: list[str] = ['bind'], fs_type: Optional[
|
||||
return result
|
||||
|
||||
|
||||
def check_findmnt(path: str) -> subprocess.CompletedProcess:
|
||||
def check_findmnt(path: str):
|
||||
result = run_root_cmd(
|
||||
[
|
||||
'findmnt',
|
||||
@@ -77,27 +69,12 @@ def check_findmnt(path: str) -> subprocess.CompletedProcess:
|
||||
return result.stdout.decode().strip()
|
||||
|
||||
|
||||
def git(
|
||||
cmd: list[str],
|
||||
dir: Optional[str] = None,
|
||||
use_git_dir: bool = False,
|
||||
git_dir: str = './.git',
|
||||
capture_output=False,
|
||||
user: Optional[str] = None,
|
||||
) -> subprocess.CompletedProcess:
|
||||
dirarg = [f'--git-dir={git_dir}'] if use_git_dir else []
|
||||
result = run_cmd(['git', *dirarg] + cmd, cwd=dir, capture_output=capture_output, switch_user=user)
|
||||
def git(cmd: list[str], dir='.', capture_output=False, user: Optional[str] = None) -> subprocess.CompletedProcess:
|
||||
result = run_cmd(['git'] + cmd, cwd=dir, capture_output=capture_output, switch_user=user)
|
||||
assert isinstance(result, subprocess.CompletedProcess)
|
||||
return result
|
||||
|
||||
|
||||
def git_get_branch(path, use_git_dir: bool = True, git_dir='./.git') -> str:
|
||||
result = git(['branch', '--show-current'], dir=path, use_git_dir=True, git_dir=git_dir, capture_output=True)
|
||||
if result.returncode:
|
||||
raise Exception(f'Error getting git branch for {path}: {result.stderr}')
|
||||
return result.stdout.decode().strip()
|
||||
|
||||
|
||||
def log_or_exception(raise_exception: bool, msg: str, exc_class=Exception, log_level=logging.WARNING):
|
||||
if raise_exception:
|
||||
raise exc_class(msg)
|
||||
@@ -127,117 +104,3 @@ def get_gid(group: Union[int, str]) -> int:
|
||||
if isinstance(group, int) or group.isnumeric():
|
||||
return int(group)
|
||||
return grp.getgrnam(group).gr_gid
|
||||
|
||||
|
||||
def read_files_from_tar(tar_file: str, files: Sequence[str]) -> Generator[tuple[str, IO], None, None]:
|
||||
assert os.path.exists(tar_file)
|
||||
with tarfile.open(tar_file) as index:
|
||||
for path in files:
|
||||
fd = index.extractfile(index.getmember(path))
|
||||
assert fd
|
||||
yield path, fd
|
||||
|
||||
|
||||
def download_file(path: str, url: str, update: bool = True):
|
||||
"""Download a file over http[s]. With `update`, tries to use mtime timestamps to download only changed files."""
|
||||
url_time = None
|
||||
if os.path.exists(path) and update:
|
||||
headers = requests.head(url).headers
|
||||
file_size = os.path.getsize(path)
|
||||
missing = [i for i in ['Content-Length', 'last-modified'] if i not in headers]
|
||||
if missing:
|
||||
logging.debug(f"Headers not specified: {missing}")
|
||||
if 'Content-Length' in headers and int(headers['Content-Length']) != file_size:
|
||||
logging.debug(f"{path} size differs: local: {file_size}, http: {headers['Content-Length']}")
|
||||
elif 'last-modified' in headers:
|
||||
url_time = parsedate(headers['last-modified']).astimezone()
|
||||
file_time = datetime.datetime.fromtimestamp(os.path.getmtime(path)).astimezone()
|
||||
if url_time == file_time:
|
||||
logging.debug(f"{path} seems already up to date")
|
||||
return False
|
||||
user_agent = {"User-agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:46.0) Gecko/20100101 Firefox/46.0"}
|
||||
download = requests.get(url, headers=user_agent)
|
||||
with open(path, 'wb') as fd:
|
||||
for chunk in download.iter_content(4096):
|
||||
fd.write(chunk)
|
||||
if 'last-modified' in download.headers:
|
||||
url_time = parsedate(download.headers['last-modified']).astimezone()
|
||||
os.utime(path, (datetime.datetime.now().timestamp(), url_time.timestamp()))
|
||||
logging.debug(f"{path} downloaded!")
|
||||
return True
|
||||
|
||||
|
||||
# stackoverflow magic from https://stackoverflow.com/a/44873382
|
||||
def sha256sum(filename):
|
||||
h = hashlib.sha256()
|
||||
b = bytearray(128 * 1024)
|
||||
mv = memoryview(b)
|
||||
with open(filename, 'rb', buffering=0) as f:
|
||||
while n := f.readinto(mv):
|
||||
h.update(mv[:n])
|
||||
return h.hexdigest()
|
||||
|
||||
|
||||
def ellipsize(s: str, length: int = 25, padding: Optional[str] = None, ellipsis: str = '...', rjust: bool = False):
|
||||
"""
|
||||
Ellipsize `s`, shortening it to `(length - len(ellipsis))` and appending `ellipsis` if `s` is longer than `length`.
|
||||
If `padding` is non-empty and `s` is shorter than length, `s` is padded with `padding` until it's `length` long.
|
||||
"""
|
||||
if len(s) > length:
|
||||
return s[:length - len(ellipsis)] + ellipsis
|
||||
if not padding:
|
||||
return s
|
||||
pad = s.rjust if rjust else s.ljust
|
||||
return pad(length, padding)
|
||||
|
||||
|
||||
def colors_supported(force_colors: Optional[bool] = None) -> bool:
|
||||
"If force_colors is None, returns isatty(stdout)"
|
||||
# stdout is fd 1
|
||||
return force_colors if force_colors is not None else os.isatty(1)
|
||||
|
||||
|
||||
def color_str(s: str, use_colors: Optional[bool] = None, **kwargs) -> str:
|
||||
if colors_supported(use_colors):
|
||||
return click.style(s, **kwargs)
|
||||
return s
|
||||
|
||||
|
||||
def color_green(s: str, **kwargs):
|
||||
return color_str(s, fg="bright_green", **kwargs)
|
||||
|
||||
|
||||
def color_bold(s: str, **kwargs):
|
||||
return color_str(s, bold=True, **kwargs)
|
||||
|
||||
|
||||
def color_mark_selected(
|
||||
item: str,
|
||||
profile_name: str,
|
||||
inherited_from: Optional[str] = None,
|
||||
msg_fmt: str = 'Currently selected by profile "%s"%s',
|
||||
msg_item_colors: dict[str, Any] = dict(bold=True, fg="bright_green"),
|
||||
marker: str = '>>> ',
|
||||
marker_config: dict[str, Any] = dict(bold=True, fg="bright_green"),
|
||||
split_on: str = '\n',
|
||||
suffix: str = '\n\n',
|
||||
use_colors: Optional[bool] = None,
|
||||
) -> str:
|
||||
|
||||
def bold(s: str, _bold=True, **kwargs):
|
||||
return color_bold(s, use_colors=use_colors, **kwargs)
|
||||
|
||||
def green(s: str, **kwargs):
|
||||
return color_green(s, use_colors=use_colors, **kwargs)
|
||||
|
||||
marker_full = color_str(marker, use_colors=use_colors, **marker_config)
|
||||
|
||||
msg_items = [color_str(profile_name, use_colors=use_colors, **msg_item_colors), '']
|
||||
if inherited_from and inherited_from != profile_name:
|
||||
msg_items[1] = ''.join([
|
||||
bold(' (inherited from profile "'),
|
||||
green(inherited_from, bold=True),
|
||||
bold('")'),
|
||||
])
|
||||
output = f'{item}{suffix}{msg_fmt % tuple(msg_items)}'
|
||||
return '\n'.join([(marker_full + o) for o in output.split(split_on)])
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
import click
|
||||
import logging
|
||||
|
||||
from typing import Optional, Sequence, Union
|
||||
from typing import Sequence, Union
|
||||
|
||||
from config.state import config
|
||||
from config import config
|
||||
from constants import Arch
|
||||
from utils import programs_available
|
||||
from .docker import DockerWrapper
|
||||
@@ -14,33 +14,27 @@ wrapper_impls: dict[str, Wrapper] = {
|
||||
}
|
||||
|
||||
|
||||
def get_wrapper_type(wrapper_type: Optional[str] = None) -> str:
|
||||
return wrapper_type or config.file.wrapper.type
|
||||
def get_wrapper_type(wrapper_type: str = None):
|
||||
return wrapper_type or config.file['wrapper']['type']
|
||||
|
||||
|
||||
def get_wrapper_impl(wrapper_type: Optional[str] = None) -> Wrapper:
|
||||
def get_wrapper_impl(wrapper_type: str = None) -> Wrapper:
|
||||
return wrapper_impls[get_wrapper_type(wrapper_type)]
|
||||
|
||||
|
||||
def wrap(wrapper_type: Optional[str] = None):
|
||||
def wrap(wrapper_type: str = None):
|
||||
wrapper_type = get_wrapper_type(wrapper_type)
|
||||
if wrapper_type != 'none':
|
||||
get_wrapper_impl(wrapper_type).wrap()
|
||||
|
||||
|
||||
def is_wrapped(wrapper_type: Optional[str] = None) -> bool:
|
||||
wrapper_type = get_wrapper_type(wrapper_type)
|
||||
return wrapper_type != 'none' and get_wrapper_impl(wrapper_type).is_wrapped()
|
||||
|
||||
|
||||
def needs_wrap(wrapper_type: Optional[str] = None) -> bool:
|
||||
wrapper_type = wrapper_type or get_wrapper_type()
|
||||
return wrapper_type != 'none' and not is_wrapped(wrapper_type) and not config.runtime.no_wrap
|
||||
def is_wrapped(wrapper_type: str = None):
|
||||
return get_wrapper_impl(wrapper_type).is_wrapped()
|
||||
|
||||
|
||||
def enforce_wrap(no_wrapper=False):
|
||||
wrapper_type = get_wrapper_type()
|
||||
if needs_wrap(wrapper_type) and not no_wrapper:
|
||||
if wrapper_type != 'none' and not is_wrapped(wrapper_type) and not config.runtime['no_wrap'] and not no_wrapper:
|
||||
logging.info(f'Wrapping in {wrapper_type}')
|
||||
wrap()
|
||||
|
||||
@@ -56,26 +50,6 @@ def wrap_if_foreign_arch(arch: Arch):
|
||||
enforce_wrap()
|
||||
|
||||
|
||||
def execute_without_exit(f, argv_override: Optional[list[str]], *args, **kwargs):
|
||||
"""If no wrap is needed, executes and returns f(*args, **kwargs).
|
||||
If a wrap is determined to be necessary, force a wrap with argv_override applied.
|
||||
If a wrap was forced, None is returned.
|
||||
WARNING: No protection against f() returning None is taken."""
|
||||
if not needs_wrap():
|
||||
return f(*args, **kwargs)
|
||||
assert get_wrapper_type() != 'none', "needs_wrap() should've returned False"
|
||||
w = get_wrapper_impl()
|
||||
w_cmd = w.argv_override
|
||||
# we need to avoid throwing and catching SystemExit due to FDs getting closed otherwise
|
||||
w_should_exit = w.should_exit
|
||||
w.argv_override = argv_override
|
||||
w.should_exit = False
|
||||
w.wrap()
|
||||
w.argv_override = w_cmd
|
||||
w.should_exit = w_should_exit
|
||||
return None
|
||||
|
||||
|
||||
nowrapper_option = click.option(
|
||||
'-w/-W',
|
||||
'--force-wrapper/--no-wrapper',
|
||||
|
||||
@@ -4,12 +4,13 @@ import pathlib
|
||||
import subprocess
|
||||
import sys
|
||||
|
||||
from config.state import config
|
||||
from config import config
|
||||
from constants import CHROOT_PATHS
|
||||
from exec.file import makedir
|
||||
|
||||
from .wrapper import Wrapper, WRAPPER_PATHS
|
||||
from .wrapper import BaseWrapper
|
||||
|
||||
DOCKER_PATHS = WRAPPER_PATHS.copy()
|
||||
DOCKER_PATHS = CHROOT_PATHS.copy()
|
||||
|
||||
|
||||
def docker_volumes_args(volume_mappings: dict[str, str]) -> list[str]:
|
||||
@@ -19,13 +20,11 @@ def docker_volumes_args(volume_mappings: dict[str, str]) -> list[str]:
|
||||
return result
|
||||
|
||||
|
||||
class DockerWrapper(Wrapper):
|
||||
class DockerWrapper(BaseWrapper):
|
||||
type: str = 'docker'
|
||||
|
||||
def wrap(self):
|
||||
super().wrap()
|
||||
script_path = config.runtime.script_source_dir
|
||||
assert script_path
|
||||
script_path = config.runtime['script_source_dir']
|
||||
with open(os.path.join(script_path, 'version.txt')) as version_file:
|
||||
version = version_file.read().replace('\n', '')
|
||||
tag = f'registry.gitlab.com/kupfer/kupferbootstrap:{version}'
|
||||
@@ -37,17 +36,11 @@ class DockerWrapper(Wrapper):
|
||||
'.',
|
||||
'-t',
|
||||
tag,
|
||||
] + (['-q'] if not config.runtime.verbose else [])
|
||||
] + (['-q'] if not config.runtime['verbose'] else [])
|
||||
logging.debug('Running docker cmd: ' + ' '.join(cmd))
|
||||
mute_docker = not config.runtime.verbose
|
||||
result = subprocess.run(
|
||||
cmd,
|
||||
cwd=script_path,
|
||||
capture_output=mute_docker,
|
||||
)
|
||||
result = subprocess.run(cmd, cwd=script_path, capture_output=True)
|
||||
if result.returncode != 0:
|
||||
error_msg = ('\n' + result.stderr.decode() + '\n') if mute_docker else ''
|
||||
logging.fatal(f'Docker error: {error_msg}Failed to build docker image: see errors above: ^^^^')
|
||||
logging.fatal('Failed to build docker image:\n' + result.stderr.decode())
|
||||
exit(1)
|
||||
else:
|
||||
# Check if the image for the version already exists
|
||||
@@ -71,13 +64,11 @@ class DockerWrapper(Wrapper):
|
||||
|
||||
wrapped_config = self.generate_wrapper_config()
|
||||
|
||||
target_user = 'root' if config.runtime.uid == 0 else 'kupfer'
|
||||
target_home = '/root' if target_user == 'root' else f'/home/{target_user}'
|
||||
|
||||
ssh_dir = os.path.join(pathlib.Path.home(), '.ssh')
|
||||
if not os.path.exists(ssh_dir):
|
||||
os.makedirs(ssh_dir, mode=0o700)
|
||||
volumes = self.get_bind_mounts_default(wrapped_config, ssh_dir=ssh_dir, target_home=target_home)
|
||||
|
||||
volumes = self.get_bind_mounts_default(wrapped_config)
|
||||
for vol_name, vol_dest in DOCKER_PATHS.items():
|
||||
vol_src = config.get_path(vol_name)
|
||||
makedir(vol_src)
|
||||
@@ -93,21 +84,13 @@ class DockerWrapper(Wrapper):
|
||||
'--privileged',
|
||||
] + docker_volumes_args(volumes) + [tag]
|
||||
|
||||
kupfer_cmd = [
|
||||
'kupferbootstrap',
|
||||
'--config',
|
||||
volumes[wrapped_config],
|
||||
]
|
||||
kupfer_cmd += self.argv_override or self.filter_args_wrapper(sys.argv[1:])
|
||||
if config.runtime.uid:
|
||||
kupfer_cmd = ['wrapper_su_helper', '--uid', str(config.runtime.uid), '--username', 'kupfer', '--'] + kupfer_cmd
|
||||
kupfer_cmd = ['kupferbootstrap', '--config', '/root/.config/kupfer/kupferbootstrap.toml'] + self.filter_args_wrapper(sys.argv[1:])
|
||||
|
||||
cmd = docker_cmd + kupfer_cmd
|
||||
logging.debug('Wrapping in docker:' + repr(cmd))
|
||||
result = subprocess.run(cmd)
|
||||
if self.should_exit:
|
||||
exit(result.returncode)
|
||||
return result.returncode
|
||||
|
||||
exit(result.returncode)
|
||||
|
||||
def stop(self):
|
||||
subprocess.run(
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user