Compare commits

...

441 Commits

Author SHA1 Message Date
raihan2000
041b2d7dc5 integration_tests.py: add kbs.main in import 2024-10-20 20:49:47 +05:30
raihan2000
7560af89f0 kbs.chroot.device: add cast to satisfy typecheker 2024-10-20 20:21:19 +05:30
raihan2000
31cd87cea7 kbs.net.ssh: add definition Chroot 2024-10-19 19:41:34 +05:30
InsanePrawn
e55fa49f31 kupferbootstrap: add typed marker and convert non-relative imports 2024-10-07 20:01:17 +02:00
InsanePrawn
d71a33bb33 docs: WIP 2024-10-07 20:01:17 +02:00
InsanePrawn
3ffe702301 WIP CHANGES 2024-10-07 20:01:17 +02:00
InsanePrawn
5e10565c70 gitlab-ci: add sanity check for docker images by running kbs --help 2024-10-07 20:01:17 +02:00
InsanePrawn
49bae9d1f9 integration_tests: adapt to pip installation 2024-10-07 20:01:17 +02:00
InsanePrawn
a193a3f6b6 fix docker wrapper 2024-10-07 20:01:17 +02:00
InsanePrawn
6419a29dd0 docker: install kbs via pip 2024-10-07 20:01:17 +02:00
InsanePrawn
93685499fa gitlab-ci: update to install kbs via pip 2024-10-07 20:01:17 +02:00
InsanePrawn
2edea4ca5c make kupferbootstrap package installable 2024-10-07 20:01:17 +02:00
InsanePrawn
d3b8452146 fix imports with sed 2024-10-07 20:01:15 +02:00
InsanePrawn
adeec7a6e3 move kbs libary files to src/ 2024-10-07 19:59:08 +02:00
InsanePrawn
a28550825f image/image: tolerate pub-key copying to fail during image build 2024-03-23 17:53:59 +00:00
InsanePrawn
a176fad05a net/ssh: copy_ssh_keys(): pass chroot for uid resolution 2024-03-23 17:53:59 +00:00
InsanePrawn
a4cfc3c3e5 exec/file: makedir(): add mode=None arg 2024-03-23 17:53:59 +00:00
Syboxez Blank
cebac83186 packages/pkgbuild: parse_pkgbuild(): Reuse pkgbase's makedepends as dependencies
Authored-by: InsanePrawn <insane.prawny@gmail.com>
2024-03-23 17:48:38 +00:00
InsanePrawn
f05de7738a integration_tests: test importing main.cli 2024-01-08 04:25:42 +01:00
InsanePrawn
b006cd8f4d packages/pkgbuild: support new key "_crossdirect" to enable/disable crossdirect for single packages 2024-01-08 03:04:43 +01:00
InsanePrawn
4b2150940d packages/build: use copy && remove_file() instead of shutil.move() 2023-12-22 05:07:55 +01:00
InsanePrawn
eaac9195ea packages/build: build_enable_qemu_binfmt(): also build gcc package if available 2023-12-20 03:36:13 +01:00
InsanePrawn
c074fbe42c packages/pkgbuild: parse_pkgbuild(): inherit depends, makedepends, provides, replaces from pkgbase unless overriden 2023-12-20 03:33:28 +01:00
InsanePrawn
a75f32b4b1 chroot/build: mount_crossdirect(): fix symlink creation if link exists 2023-12-20 01:56:27 +01:00
InsanePrawn
4cce7e57ae constants: use ALARM's aarch64 gcc that we package 2023-12-20 01:56:27 +01:00
InsanePrawn
95147cecea docs: convert absolute links to relative 2023-12-19 23:34:33 +01:00
InsanePrawn
ff8a529690 docs: move usage guides to usage/, add quickstart and porting 2023-12-11 17:13:11 +01:00
InsanePrawn
2e504b7b00 dictscheme: fix type hinting 2023-12-11 12:53:03 +01:00
InsanePrawn
a0c4036390 packages: try_download_package(): check pacman cache if file in db but doesn't exist in db folder 2023-12-11 12:49:17 +01:00
InsanePrawn
4c5fe2cb1c config/cli: prompt_choice(): fix change detection logical inversion 2023-08-29 02:13:41 +02:00
Hacker1245
6bcd132b53 docs: update device names in profiles 2023-07-12 18:23:30 +00:00
InsanePrawn
c70b52e5c1 utils: color_mark_selected: fix msg_items tuple size to 2 2023-07-09 03:21:33 +02:00
InsanePrawn
60b38d895c devices, flavours, config: add "inherited from ABC" to "selected by XYZ" output 2023-06-25 16:04:35 +02:00
InsanePrawn
7425356f10 devices/device: extract sanitize_device_name() into own function 2023-06-25 16:04:00 +02:00
InsanePrawn
bfce7c466d utils: add color_mark_selected() to add ">>> selected by profile" msgs 2023-06-25 16:03:08 +02:00
InsanePrawn
16f351a41c docker, gitlab-ci: use pip --break-system-packages until we figure out pip packaging 2023-06-25 04:03:03 +02:00
InsanePrawn
8376725652 test_requirements.txt: add formatters and mypy for easier development 2023-06-25 04:03:03 +02:00
InsanePrawn
5b2f36c74d config/cli: highlight currently selected devices 2023-06-25 04:03:03 +02:00
InsanePrawn
0951865868 config/profile: add resolve_profile_attr() 2023-06-25 04:03:03 +02:00
InsanePrawn
e6f4a68c6b utils: add color_mark_selected() 2023-06-25 04:03:03 +02:00
InsanePrawn
1374e2be74 wrapper/docker: fix logging of docker build failures 2023-06-25 04:03:03 +02:00
InsanePrawn
9bd2bd46a9 wrapper/wrapper: handle unset self.wrapped_config_path 2023-06-25 04:03:03 +02:00
InsanePrawn
8b0ca115a7 config/cli: prompt_profile_{flavour,device}: improve device/flavour printing and wrapper-mode warning 2023-06-25 00:19:09 +02:00
InsanePrawn
fc690eca8a packages: build_enable_qemu_binfmt(): only show message and enable when not already active 2023-06-14 09:24:58 +00:00
InsanePrawn
eb2b0a6c75 binfmt/cli: add cmd_status(), improve logging 2023-06-14 09:24:58 +00:00
InsanePrawn
fd4495dd58 binfmt: improve logging 2023-06-14 09:24:58 +00:00
InsanePrawn
933b7c42ef binfmt: binfmt_ensure_mounted(): use chroot.mount() with chroots 2023-06-14 09:24:58 +00:00
InsanePrawn
c86ce577d1 binfmt: move to own module, add cmd_register(), cmd_unregister() to cli 2023-06-14 09:24:58 +00:00
InsanePrawn
46507f8dbe binfmt: rename {,un}register() to binfmt_{,un}register() 2023-06-14 09:24:58 +00:00
InsanePrawn
0d866c6287 binfmt: pass through chroot properly 2023-06-14 09:24:58 +00:00
InsanePrawn
3c9b96f03f image: rename aboot to abootimg and rootfs to full 2023-06-12 01:10:54 +02:00
InsanePrawn
407d8893a3 image/cli: cmd_flash: improve log messages and order of partitions in CLI help 2023-06-12 01:00:24 +02:00
InsanePrawn
379e951526 packages/cli: cmd_list(): print package mode 2023-06-10 20:19:33 +02:00
InsanePrawn
ad80b3e889 image/flash: give user output while copying image for shrinking 2023-04-30 16:58:56 +02:00
InsanePrawn
efe4bf085d image: shrink_filesystem(): align file end to 4096b
otherwise fastboot seems to get upset
2023-04-30 16:58:56 +02:00
InsanePrawn
de76641fa1 image: dump_file_from_image(): try to detect debugfs failure
Try to detect missing file after supposedly dumping it since debugfs doesn't always error out correctly
2023-04-30 16:58:56 +02:00
InsanePrawn
edcad72f7a image: use correct deviceinfo value for device sector size 2023-04-30 16:58:56 +02:00
InsanePrawn
33e1214aef image/fastboot: add --confirm option and generalize fastboot_erase{_dtbo,}() 2023-04-30 05:04:57 +02:00
InsanePrawn
4ba5f87f1e image: factor out get_fs_size() from shrink_fs() 2023-04-30 00:00:32 +02:00
InsanePrawn
3ac8fc0689 image/flash: actually use --split-size 2023-04-26 14:21:51 +02:00
InsanePrawn
6648a77822 image/cli: add --sector-size option 2023-04-26 14:21:50 +02:00
InsanePrawn
69b7ea9db2 image/flash: implement fastboot rootfs image flashing, add --no-shrink
Use fastboot by default instead of jumpdrive, respecting the deviceinfo
2023-04-26 14:20:37 +02:00
InsanePrawn
8a266f9149 image/fastboot: use exec.cmd.run_cmd() for loggability 2023-04-26 14:20:37 +02:00
InsanePrawn
604f123067 image/fastboot: flash_image(): add optional sparse_size parameter 2023-04-26 14:20:37 +02:00
InsanePrawn
08285a7931 packages/pkgbuild: fix null deref in __repr__() 2023-04-24 17:15:51 +02:00
InsanePrawn
68154467f3 distro/repo_config: reformat with yapf 0.33 2023-04-24 15:34:02 +02:00
InsanePrawn
dbc512ee3f packages/cli: cmd_check(): add noextract PKGBUILD field 2023-04-24 01:40:10 +02:00
InsanePrawn
7945a4756f distro/repo: use persistent dir for repo db if RemoteRepo.cache_repo_db == True 2023-04-17 05:32:12 +02:00
InsanePrawn
fd2abd3805 exec/file: chmod(): add privileged=True, use False for get_temp_dir() 2023-04-17 04:49:29 +02:00
InsanePrawn
44eaf0d767 utils: add content-size to download_file 2023-04-17 04:49:29 +02:00
InsanePrawn
74a7aeb668 packages/cli: cmd_update(): add enforce_wrap() 2023-04-17 04:49:29 +02:00
InsanePrawn
acee95a003 dictscheme: rename from dataclass as it's confusing with builtin dataclasses 2023-04-17 02:37:10 +02:00
InsanePrawn
b84d2202db python 3.9 compat: introduce typehelpers.py for NoneType, UnionType, TypeAlias 2023-04-17 02:37:10 +02:00
InsanePrawn
c357b0a968 wrapper: only run at_exit handler once 2023-04-16 06:00:50 +02:00
InsanePrawn
67590fe12b config/cli: drop obsolete warning when pkgbuilds arent initialised in prompt_profile() 2023-04-16 06:00:50 +02:00
InsanePrawn
cd1d0543fe wrapper: move at_exit handling into wrap() 2023-04-16 05:23:31 +02:00
InsanePrawn
6961cb7f36 gitlab-ci: override docker dind mtu
sigh. -.-
2023-04-16 05:23:31 +02:00
InsanePrawn
eb13a7d093 image/cli: improve help for cmd_inspect() 2023-04-16 05:23:31 +02:00
InsanePrawn
1a695adff4 wrapper/docker: don't suppress docker build stdout when verbose enabled 2023-04-16 05:23:31 +02:00
InsanePrawn
d3cc5e9483 main.py: announce force-enabling wrapper 2023-04-16 04:03:49 +02:00
InsanePrawn
cfd65f9638 gitignore: add kate swap files 2023-04-16 04:03:49 +02:00
InsanePrawn
61b1444360 wrapper_su_helper.py: tolerate non-unique uid 2023-04-16 04:03:49 +02:00
InsanePrawn
4115d6ba00 packages/build: build_package(): source /etc/profile before building so PATH is complete 2023-04-16 04:03:49 +02:00
InsanePrawn
0353693025 exec/cmd: flatten_shell_script(): specifically quote empty strings even when shell_quote is disabled 2023-04-16 04:03:49 +02:00
InsanePrawn
f6fb521c8a packages: build_enable_qemu_binfmt(): don't use is_registered() 2023-04-16 04:03:49 +02:00
InsanePrawn
f113faa201 constants: add kupfer-config --user 2023-04-16 03:42:12 +02:00
InsanePrawn
91d2cd3681 config/cli: use wrapper.execute_without_exit() for prompt_profile_{flavour,device}() to avoid prompting in docker 2023-04-16 03:37:22 +02:00
InsanePrawn
b9969d8feb wrapper: add execute_without_exit() 2023-04-16 03:37:22 +02:00
InsanePrawn
389d44e776 wrapper: add Wrapper.should_exit 2023-04-16 03:37:22 +02:00
InsanePrawn
d2d9cb6c7c wrapper: add Wrapper.argv_override 2023-04-16 03:37:22 +02:00
InsanePrawn
ec0e430c00 config/cli: save main config body separately from profiles to support flavour and device listing 2023-04-16 03:37:22 +02:00
InsanePrawn
954592fc62 config/cli: warn when saving config in container 2023-04-16 03:37:22 +02:00
InsanePrawn
e07306d5c4 wrapper: add needs_wrap(), typehint return values 2023-04-16 03:37:22 +02:00
InsanePrawn
dfd191060a config/state: remove field_name from missing device/flavour hint as it gets used as the profile name 2023-04-16 03:37:22 +02:00
InsanePrawn
13aa258794 distro: use repo_config properly 2023-04-16 03:31:35 +02:00
InsanePrawn
572142bf0b dataclass: replace print spam with decent logging 2023-04-16 03:28:49 +02:00
InsanePrawn
28a5400d48 distro/distro: get_base_distro(): add unsiged=True to keep old default behaviour 2023-03-27 21:27:10 +02:00
InsanePrawn
78f9f31e7a repo_config: add default remote_url=None to BaseDistro 2023-03-27 20:03:47 +02:00
InsanePrawn
9766c70523 fixup! requirements.txt: add yaml 2023-03-27 19:49:14 +02:00
InsanePrawn
49c5ec7fd7 repo_config: assert config non-null 2023-03-27 19:47:20 +02:00
InsanePrawn
3ed0721d52 formatting cleanups 2023-03-27 19:45:35 +02:00
InsanePrawn
8ea1ae98be repo_config; changed detection fixes 2023-03-27 19:41:47 +02:00
InsanePrawn
7fa6f80305 requirements.txt: add yaml 2023-03-27 19:40:46 +02:00
InsanePrawn
b9e044f932 constants: rename 'distro.yaml' to 'distro.yml' 2023-03-27 18:59:55 +02:00
InsanePrawn
f313334a27 distro/repo_config: use correct default remote url for base distros 2023-03-27 18:58:29 +02:00
InsanePrawn
e400361dc1 distro/repo_config: remove unnecessary name attribute from RepoConfig 2023-03-27 18:58:03 +02:00
InsanePrawn
e7fbcb6fbe distro: bugfixes: use get_kupfer_url() in the right place to resolve branch placeholder, default some values 2023-03-27 18:48:23 +02:00
InsanePrawn
f3c53e449b constants: rename %branch% URL marker to %kupfer_branch% and give it its own constant 2023-03-27 18:48:04 +02:00
InsanePrawn
4517201118 repo_config: switch to .yaml 2023-03-27 09:23:16 +02:00
InsanePrawn
d77adf9b21 packages/cli: cmd_init(): also initialise binary repos 2023-03-27 09:19:08 +02:00
InsanePrawn
21c5992bde packages/build: improve packages init logging 2023-03-27 09:19:08 +02:00
InsanePrawn
ff1c31e157 TEMP: WIP: add repo_config
TEMP cause it spams a bunch of prints in dataclass handling
2023-03-27 09:18:57 +02:00
InsanePrawn
72f4d4948e TEMP: override DataClass.ToDict() 2023-03-21 20:53:17 +01:00
InsanePrawn
53ef22d6b8 distro/repo.py: fix accidental global variable 2023-03-21 20:52:54 +01:00
InsanePrawn
2ad4690c0a {devices,flavours}/cli: add --output-file for json dumping 2023-03-17 16:34:20 +01:00
InsanePrawn
161e14a438 distro/repo: scan(): add allow_failure parameter 2023-03-17 16:34:20 +01:00
InsanePrawn
066b6abaaa distro/distro: add scan parameter to get_base_distro() 2023-03-17 16:34:20 +01:00
InsanePrawn
9f5bafab57 distro/package: fix DESC parser 2023-03-17 16:34:20 +01:00
InsanePrawn
272d55b735 devices/cli: add --force-parse-deviceinfo and --download-packages 2023-03-17 16:34:20 +01:00
InsanePrawn
af1d8d1737 flavours/cli: clean up json. (add architectures, flatten flavour_info, etc.) 2023-03-17 16:34:20 +01:00
InsanePrawn
78874a15e6 packages/cli: linter fixes 2023-03-17 16:34:20 +01:00
InsanePrawn
f38fb798bc devices: don't pass config to parse_pkgbuild*() unnecessarily, that's only for multiprocessing 2023-03-17 16:34:20 +01:00
InsanePrawn
de7b597518 logger: add --quiet flag to disable non-error logging 2023-03-17 16:34:20 +01:00
InsanePrawn
f140fa36ce flavours/cli: colorise output, add -j/--json arg 2023-03-17 16:34:20 +01:00
InsanePrawn
69c73e41dd devices/cli: colorize output 2023-03-17 16:34:20 +01:00
InsanePrawn
e269841038 utils: add colors_supported() and color_str() for terminal colors 2023-03-17 16:34:20 +01:00
InsanePrawn
932e739255 devices/cli: add --json parameter 2023-03-17 16:34:20 +01:00
InsanePrawn
63156776a2 devices/cli: make device list way more readable, add package name and path, mark currently selected 2023-03-17 16:34:20 +01:00
InsanePrawn
5edfac42ce main.py: default colors to isatty(stdout) if force_colors is None 2023-03-17 16:34:20 +01:00
InsanePrawn
00613096d5 config/state: add config.runtime.colors, fill in main.py 2023-03-17 16:34:20 +01:00
InsanePrawn
c4797c709f logger: disable raising exceptions, e.g. when stdout is closed 2023-03-17 16:34:20 +01:00
InsanePrawn
28c68418a6 packages/build: get_unbuilt_package_levels(): use force=True while updating pkgbar 2023-03-17 16:34:20 +01:00
InsanePrawn
cc1b4b3ee2 packages/build: redirect output from stderr to stdout 2023-03-17 16:34:20 +01:00
InsanePrawn
ff3b5e70dd progressbar: add ellipsize() 2023-03-17 16:34:20 +01:00
InsanePrawn
ac25266a00 packages: build: use progress bars for get_unbuilt_pkg_levels() and build_packages() 2023-03-17 16:34:20 +01:00
InsanePrawn
c99463a0f6 progressbar: new module based on enlighten 2023-03-17 16:34:20 +01:00
InsanePrawn
6d6f582b71 exec/cmd: fix up stderr and stdout handling, fix capture_output overwriting env 2023-03-17 16:34:20 +01:00
InsanePrawn
785e41f8b7 logger: add --force-colors/--no-colors cli flag 2023-03-17 16:34:20 +01:00
InsanePrawn
4d03f238bb CI: fix for docker buildx 2023-03-17 16:26:53 +01:00
InsanePrawn
e758e3c343 docs/versions: fix version selector being hidden behind TOC sidebar
also make it scrollable on _really_ small screens

ci-kbs-docs-build-full
2023-01-04 01:21:03 +01:00
InsanePrawn
7955842da9 docs: cache versioned html to tarball for speed and CI uploads 2023-01-04 01:21:00 +01:00
InsanePrawn
c7084895d6 docs: add versions target to produce multi-version docs
ci-kbs-docs-build-full
2022-12-16 05:30:53 +01:00
InsanePrawn
dc59378243 .gitignore: add *.xml 2022-12-15 07:09:08 +01:00
InsanePrawn
cec828553d packages/pkgbuild: track whether pkg is built on a per-architecture basis 2022-12-11 03:02:00 +01:00
InsanePrawn
45eba305cb packages/cli: add --switch-branch to cmd_init 2022-12-11 03:01:55 +01:00
InsanePrawn
60d8cb77ea packages/cli: add .srcinfo_meta.json and .SRCINFO to packages clean 2022-12-10 19:47:32 +01:00
InsanePrawn
6fa717ce64 packages/build: setup_sources(): drop --holdver
Dubious gain with gratis issues
2022-12-10 19:47:32 +01:00
InsanePrawn
795878cfeb packages/cli: add --init-caches/--no-init-caches to cmd_init() and cmd_update() 2022-12-10 19:47:32 +01:00
InsanePrawn
0693792791 exec/cmd: expose [subprocess.]CompletedProcess for easy import in other modules 2022-12-09 05:45:43 +01:00
InsanePrawn
9e81fbf345 packages/pkgbuild: split out get_pkgbuild_dirs() from discover_pkgbuilds() 2022-12-09 03:47:14 +01:00
InsanePrawn
12b414fe79 constants: add SRCINFO_TARBALL_FILE and SRCINFO_TARBALL_URL 2022-12-09 03:47:14 +01:00
InsanePrawn
61a9b53c5f distro/distro: add get_kupfer_url() for resolving the repo branch 2022-12-08 17:51:37 +01:00
InsanePrawn
db4fbc083a utils: add download_file() 2022-12-08 17:51:37 +01:00
InsanePrawn
4112f5a56e packages/build: check_package_version_built(): use local repo db and validate checksum before calling repo-add again 2022-12-08 01:44:33 +01:00
InsanePrawn
c36087308f distro/package: add parsed _desc to package 2022-12-07 16:56:35 +01:00
InsanePrawn
3a44cb6d42 distro/package: BinaryPackage: Track Arch 2022-12-07 15:25:01 +01:00
InsanePrawn
75d6ea8c3c packages/build: build_enable_qemu_binfmt(): use a chroot if not wrapped 2022-12-07 15:05:00 +01:00
InsanePrawn
e33f1a97d5 binfmt: refactor to support optionally working in Chroot 2022-12-07 15:04:14 +01:00
InsanePrawn
162691e4b5 binfmt: unify arch checking, rename is_registered to binfmt_is_registered 2022-12-07 13:49:48 +01:00
InsanePrawn
07ccc26d95 dataclass: support UnionType (A | B) 2022-12-07 13:21:52 +01:00
InsanePrawn
95c2ffd133 utils: type-hint mount helpers 2022-12-07 13:21:42 +01:00
InsanePrawn
97bfc541c5 packages/cli: cmd_check(): allow optional replaces field 2022-11-24 01:48:58 +01:00
InsanePrawn
6821949620 packages/cli: cmd_clean(): clean up .srcinfo_initialised.json, not .srcinfo_meta.json 2022-11-20 19:27:21 +01:00
InsanePrawn
2237b940c4 .gitlab-ci: docs: install the main requirements.txt, treat warnings as errors 2022-11-12 03:31:26 +01:00
InsanePrawn
33b52b2774 gitlab-ci: fix docker job to run on custom runners without overlayfs as well 2022-11-11 17:55:53 +01:00
InsanePrawn
bce4f03c09 gitlab-ci: build docker for non-protected branches too 2022-11-10 02:55:06 +01:00
InsanePrawn
fbd06eded5 docs: convert to markdown with rst2myst 2022-11-10 02:25:05 +01:00
InsanePrawn
a8e8ddc4b4 typecheck: add --check-untyped-defs and fix some associated type errors 2022-11-09 20:19:04 +01:00
InsanePrawn
c46cd0cd4f integration_test: write config file to a temp dir, ensure it gets loaded 2022-11-09 20:19:04 +01:00
InsanePrawn
3eeeafb30f config.state: try_load_file(): clear state.exception after successful load 2022-11-09 20:19:04 +01:00
InsanePrawn
5951c8c296 packages/cli: check for loadable config before wrapping 2022-11-09 20:19:04 +01:00
InsanePrawn
11125e525f config: remove accidental scheme validation circumvention, improve errors, warn on config loading failure in main()
This will fail on values of the wrong type, but still warn about and allow unknown keys.
2022-11-09 20:19:04 +01:00
InsanePrawn
7d96d05165 dataclass: transform(): format types in error for wrong value type better 2022-11-09 20:19:04 +01:00
InsanePrawn
82a87e7ea9 implement explicit Optional type hints for =None parameters
Thanks https://github.com/hauntsaninja/no_implicit_optional
2022-11-09 20:19:04 +01:00
InsanePrawn
669c82a10b constants: switch archlinux x86 mirrors to https://geo.mirror.pkgbuild.com/$repo/os/$arch 2022-11-09 20:19:04 +01:00
InsanePrawn
185894842f packages: clone_pkgbuilds() and friends: add discard_changes arg, use in integration_tests 2022-11-09 20:19:04 +01:00
InsanePrawn
12554ebf18 integration_tests: ensure that failing branch doesn't exist using git ls-remote 2022-11-09 20:19:04 +01:00
InsanePrawn
f127ff7427 packages/build: explain what's going on and print the plan while lookin for unbuilts and building 2022-11-09 20:19:04 +01:00
InsanePrawn
447046d24a packages/build: fix up incomplete f-strings 2022-11-09 20:19:04 +01:00
InsanePrawn
ec323ce8d7 packages: make _mode in PKGBUILD optional for building (but not for cmd_check), warn if missing 2022-11-09 20:19:04 +01:00
InsanePrawn
46f1e91f88 packages/build: build_package(): print package path in all info msgs 2022-11-09 20:19:04 +01:00
InsanePrawn
2cff31973e packages/build: use makepkg --nodeps if package.nodeps is set... 2022-11-09 20:19:04 +01:00
InsanePrawn
71e33ab8e1 chroot/abstract: remove leftover print statement 2022-11-09 20:19:04 +01:00
InsanePrawn
b17cf3584a packages/srcinfo_cache: move initialised_for to new .srcinfo_initialised.json file 2022-11-09 20:19:04 +01:00
InsanePrawn
276933036c packages/srcinfo_cache: extract read() and class._read_file() methods into JsonFile 2022-11-09 20:19:04 +01:00
InsanePrawn
931e09c3d3 packages/srcinfo_cache: extract JsonFile class 2022-11-09 20:19:04 +01:00
InsanePrawn
04f9173233 .gitlab-ci.yml: pytest: install rsync and arch-install-scripts for cmd_build() 2022-11-09 20:19:04 +01:00
InsanePrawn
ff8001881a integration_tests: add tests for cmd_build() 2022-11-09 20:19:04 +01:00
InsanePrawn
84d2d40f04 .gitlab-ci.yml: pytest: export KUPFERBOOTSTRAP_WRAPPED=DOCKER 2022-11-09 20:19:04 +01:00
InsanePrawn
402179d2ee devices/test_deviceinfo.py: add test for deviceinfo python parser variant handling 2022-11-09 20:19:04 +01:00
InsanePrawn
3dcaefe35b devices/test_deviceinfo: add test for device variants ("kernels" in pmb) 2022-11-09 20:19:04 +01:00
InsanePrawn
0e3f0b8771 .gitignore: add .coverage* 2022-11-09 20:19:04 +01:00
InsanePrawn
3b7465dccb devices/{device,deviceinfo}: tolerate device variants that live inside split packages 2022-11-09 20:19:04 +01:00
InsanePrawn
22bd6193dd devices: fix up beryllium deprecations copy-paste fail 2022-11-09 20:19:04 +01:00
InsanePrawn
809dfe6a7f packages/build: build_package(): install makedeps for _nodeps=true packages 2022-11-09 20:19:04 +01:00
InsanePrawn
a0c2061c8b pkgbuild.py: track makedepends both in depends and separate variable for _nodeps=true usage 2022-11-09 20:19:04 +01:00
InsanePrawn
dc2d826362 packages: clean up cmd_init 2022-11-09 20:19:04 +01:00
InsanePrawn
14b0a4a666 docs: templates/command.rst: remove unnecessary $cmd.cmd_$cmd import as all cmds are moved to $cmd.cli.cmd_$cmd now. 2022-11-09 20:19:04 +01:00
InsanePrawn
5d0d76fff2 cache: move cache.py to new module cache/cli.py for uniformity 2022-11-09 20:19:04 +01:00
InsanePrawn
50bcd0de63 image: create new module from {image,boot,fastboot,flash}.py 2022-11-09 20:19:04 +01:00
InsanePrawn
afca099ede net: make new module from {forwarding,ssh,telnet}.py 2022-11-09 20:19:04 +01:00
InsanePrawn
6fed749488 docs: Makefile: clean up .buildinfo and .doctrees 2022-11-09 20:19:04 +01:00
InsanePrawn
d1267d4082 chroot/cli: import get_profile_device() properly now that import loop is fixed 2022-11-09 20:19:04 +01:00
InsanePrawn
254d59959b config: add new __init__.py 2022-11-09 20:19:04 +01:00
InsanePrawn
33f4a81981 config: move code from __init__.py into cli.py 2022-11-09 20:19:04 +01:00
InsanePrawn
777b3172d8 move packages/flavour.py to flavours/ 2022-11-09 20:19:04 +01:00
InsanePrawn
ed4226a505 chroot: add empty __init__.py 2022-11-09 20:19:04 +01:00
InsanePrawn
6569fee6a2 chroot: move __init__.py to cli.py 2022-11-09 20:19:04 +01:00
InsanePrawn
827550e972 docs: scan for $module.cli.cmd_$module 2022-11-09 20:19:04 +01:00
InsanePrawn
0e86de1f44 packages/flavours: rename cmd_flavours_list() to cmd_flavours(), add to main cli directly 2022-11-09 20:19:04 +01:00
InsanePrawn
6581e34087 move packages/{device,deviceinfo}.py to devices/ 2022-11-09 20:19:04 +01:00
InsanePrawn
b2112026d2 packages/build: skip packages that were already built this run (e.g. split packages) 2022-11-09 20:19:04 +01:00
InsanePrawn
ebd541e039 packages/build: exclude package names from dependencies, use pkgbase.names() 2022-11-09 20:19:04 +01:00
InsanePrawn
e91a8c796c packages/pkgbuild: parse version specs from dependencies, provides, etc.
handles e.g. "git>=1.0" properly
2022-11-09 20:19:04 +01:00
InsanePrawn
f16ea1684b constants: rename SRCINFO to .SRCINFO 2022-11-09 20:19:04 +01:00
InsanePrawn
9ae721d888 packages/pkgbuild: filter_pkgbuilds(): fix up to_match tracking 2022-11-09 20:19:04 +01:00
InsanePrawn
9fbb2dec46 chroot: remove initialize=False arg from Chroot() 2022-11-09 20:19:04 +01:00
InsanePrawn
3c315d7899 dataclass: specify _type_hints class variable, filter class variables from type hints. 2022-11-09 20:19:04 +01:00
InsanePrawn
746e42a4f6 chroot: clean up the copy_base instance var mess 2022-11-09 20:19:04 +01:00
InsanePrawn
8a31a98946 chroots: clean up get_*chroot() function signatures 2022-11-09 20:19:04 +01:00
InsanePrawn
07a8c3c79a chroot/abstract: clean up 2022-11-09 20:19:04 +01:00
InsanePrawn
4ab3d7ade1 fastboot.py: raise exceptions on failure to boot and flash instead of calling exit(1) 2022-11-09 20:19:04 +01:00
InsanePrawn
c4900cfd00 image.py: factor out and fix up dump_file_from_image() from dump_lk2nd() and friends 2022-11-09 20:19:04 +01:00
InsanePrawn
a3d60e768a constants: remove BOOT_STRATEGIES, read from deviceinfo 2022-11-09 20:19:04 +01:00
InsanePrawn
c50166051d wrapper: Wrapper.generate_wrapper_config(): no need to overwrite wrapper.type, we have the env var for that 2022-11-09 20:19:04 +01:00
InsanePrawn
fccd58c533 wrapper_su_helper.py: inject constants.WRAPPER_ENV_VAR into user env 2022-11-09 20:19:04 +01:00
InsanePrawn
96ead1ec21 wrapper: move env var name to constants.WRAPPER_ENV_VAR 2022-11-09 20:19:04 +01:00
InsanePrawn
c726541967 packages/cli: get rid of overzealus profile device enforcement so --arch can do its job. 2022-11-09 20:19:04 +01:00
InsanePrawn
a5a2668af5 integration_tests: default KBS branch name from CI env vars if kbs_branch comes back empty 2022-11-09 20:19:04 +01:00
InsanePrawn
024075fabd utils.git*(): use --git-dir=./.git to avoid "unsafe directory" issues 2022-11-09 20:19:04 +01:00
InsanePrawn
1275557d71 utils: git_get_branch(): check returncode 2022-11-09 20:19:04 +01:00
InsanePrawn
0c70d6ec59 integration_tests.test_packages_update(): use switch_branch=True and check if branch successfully switched 2022-11-09 20:19:04 +01:00
InsanePrawn
4fc45bf098 packages/cli: cmd_clean(): reword output 2022-11-09 20:19:04 +01:00
InsanePrawn
c12b702383 packages: fix up cmd_update(), {init,clone}_pkgbuilds() 2022-11-09 20:19:04 +01:00
InsanePrawn
d5c5d19c94 .gitlab-ci.yml: add integration_tests.py to pytest 2022-11-09 20:19:04 +01:00
InsanePrawn
c027afd58a add integration_tests.py 2022-11-09 20:19:04 +01:00
InsanePrawn
daff20302a utils: create git_get_branch() from packages.cli 2022-11-09 20:19:04 +01:00
InsanePrawn
f395ef231b packages: clone_pkgbuilds(): use git remote update before git switch 2022-11-09 20:19:04 +01:00
InsanePrawn
56dbd3966c wrapper.is_wrapped(): handle wrapper_type = 'none' 2022-11-09 20:19:04 +01:00
InsanePrawn
b551c89a1c packages: add tests for flavour.py 2022-11-09 20:19:04 +01:00
InsanePrawn
dfb305b362 packages/cli: clean up cmd_list() 2022-11-09 20:19:04 +01:00
InsanePrawn
4320c28c1b packages/cli: clean up wrapping 2022-11-09 20:19:04 +01:00
InsanePrawn
91041ab06d packages/build: build_enable_qemu_binfmt(): don't install packages on unwrapped systems 2022-11-09 20:19:04 +01:00
InsanePrawn
6b4bb27609 packages/pkgbuild: discover_pkgbuilds(): check for makepkg or wrap 2022-11-09 20:19:04 +01:00
InsanePrawn
f016eccc6e image.py: bring back enforce_wrap() 2022-11-09 20:19:04 +01:00
InsanePrawn
99cc0c9845 packages/flavour: add support for optional description in flavourinfo.json 2022-11-09 20:19:04 +01:00
InsanePrawn
f8af06959c cache.py: fix docs, call packages.cmd_clean() for /pkgbuilds 2022-11-09 20:19:04 +01:00
InsanePrawn
5580c48b08 image.cmd_build(): use correct strictness for filter_pkgbuilds() for local vs extra packages 2022-11-09 20:19:04 +01:00
InsanePrawn
c1afc5bb27 constants: split up BASE_PACKAGES into kupfer-specific and base-distro packages 2022-11-09 20:19:04 +01:00
InsanePrawn
8b504142de packages: filter_pkgbuilds(): track which queries were matched and error on incompletely satisified queries 2022-11-09 20:19:04 +01:00
InsanePrawn
103c18a171 packages/build: build_package(): respect Pkgbuild.nodeps 2022-11-09 20:19:04 +01:00
InsanePrawn
6ddab50e21 packages/pkgbuild: parse _nodeps and cache in srcinfo_cache 2022-11-09 20:19:04 +01:00
InsanePrawn
cac150d11b packages/cli: cmd_check(): reindent and add _nodeps= key 2022-11-09 20:19:04 +01:00
InsanePrawn
939683f079 packages/cli: add packages init as an alias to packages update 2022-11-09 20:19:04 +01:00
InsanePrawn
18eba2dffd constants.py: get rid of now unused FLAVOURS 2022-11-09 20:19:04 +01:00
InsanePrawn
707efe6bbd image: use Flavour.parse_flavourinfo() for rootfs_size 2022-11-09 20:19:04 +01:00
InsanePrawn
eebca29c55 packages/flavour: implement flavourinfo.json parsing 2022-11-09 20:19:04 +01:00
InsanePrawn
3c91abd175 image.py: use new constants.POST_CMDS for post-cmd instead of per-flavour cmd 2022-11-09 20:19:04 +01:00
InsanePrawn
b3e2059196 packages/cli: add help str for cmd_list 2022-11-09 20:19:04 +01:00
InsanePrawn
6b613287bf packages.srcinfo_cache: make dumped json more human-readable with indent=2 2022-11-09 20:19:04 +01:00
InsanePrawn
eb63a6869a binfmt: fix up unnecessary shell and missing quoting at once 2022-11-09 20:19:04 +01:00
InsanePrawn
02e1b75a79 image.py: migrate to packages.flavour.get_profile_flavour() for the flavour name 2022-11-09 20:19:04 +01:00
InsanePrawn
4d86962862 packages: check_package_version_built(): clean up logging a bit 2022-11-09 20:19:04 +01:00
InsanePrawn
7ab4904cbc main.py: always print at least the beginning and end of the stack trace 2022-11-09 20:19:04 +01:00
InsanePrawn
42d7a701fb flash.py,boot.py: use packages.flavours.get_profile_flavour(), add --profile option 2022-11-09 20:19:04 +01:00
InsanePrawn
c0b3b15260 packages: cmd_list_{devices,flavours}(): error on empty results 2022-11-09 20:19:04 +01:00
InsanePrawn
34ffbfb0bf pkgbuild: clone_pkgbuilds(): fix typos 2022-11-09 20:19:04 +01:00
InsanePrawn
ba5cda7cfa config: prompt_profile(): handle uninitialised PKGBUILDs gracefully and warn user 2022-11-09 20:19:04 +01:00
InsanePrawn
72d9bf3a37 config: cmd_profile_init(): make profile name optional, default to config.file.profiles.current 2022-11-09 20:19:04 +01:00
InsanePrawn
771199d932 packages.setup_sources(): log when package version changes after refreshing 2022-11-09 20:19:04 +01:00
InsanePrawn
f705e39ca1 chroot.mount_{ccache,rust}: use config.get_path() to resolve %cache_dir% 2022-11-09 20:19:04 +01:00
InsanePrawn
2757490a8f packages: check_package_version_built(): don't refresh sources by default, default to doing it in get_unbuilt_package_levels() though 2022-11-09 20:19:04 +01:00
InsanePrawn
851b4e7477 packages: build_package(): improve logging so the user has a chance to know what's going on. 2022-11-09 20:19:04 +01:00
InsanePrawn
49f452342e packages: setup_sources(): we need to use a chroot or else the paths in pkgbuilds/$pkg/src/ will be hosed up 2022-11-09 20:19:04 +01:00
InsanePrawn
97ae046f1e packages/build: setup_sources(): remove chroot, add srcinfo cache, use in check_package_built() 2022-11-09 20:19:04 +01:00
InsanePrawn
16b2f1a3c2 packages: Pkgbase.update(): fixups 2022-11-09 20:19:04 +01:00
InsanePrawn
e691afd328 packages/srcinfo_cache: add src_initialized=sha256sum(PKGBUILD) 2022-11-09 20:19:04 +01:00
InsanePrawn
0fdb6f891b dataclass: handle non-DataClass Munches properly 2022-11-09 20:19:04 +01:00
InsanePrawn
b709fd73b9 packages/srcinfo_cache: track whether cache was correct or state had been changed 2022-11-09 20:19:04 +01:00
InsanePrawn
0983d3466d packages.pkgbuild: add Pkgbuild.srcinfo_cache 2022-11-09 20:19:04 +01:00
InsanePrawn
3ea7e98a48 pytest: move coverage flags from .gitlab-ci.yml to pytest.sh, add test_requirements.txt for pip 2022-11-09 20:19:04 +01:00
InsanePrawn
5c8f0acfcd exec: run_cmd() include cwd in debug log 2022-11-09 20:19:04 +01:00
InsanePrawn
f535344351 utils.git(): default cwd to None instead of "." 2022-11-09 20:19:04 +01:00
InsanePrawn
686a62685e packages: parse_pkgbuild(): banish SRCINFO caching into new file srcinfo_cache.py 2022-11-09 20:19:04 +01:00
InsanePrawn
2f8d53648e packages: discover_pkgbuilds: caching fixes and cleanups 2022-11-09 20:19:04 +01:00
InsanePrawn
73dc5a287a packages: discover_pkgbuilds: use threading as backend for Parallel instead of loky for speed
benchmark: `time kupferbootstrap -vW packages list`

uncached srcinfos:
- threading: ~83s
- multiprocessing: ~86s
- loky: ~144s (~2x)

cached srcinfos:
- threading: ~0.6s
- multiprocessing: ~0.8s
- loky: ~50s (~100x)
2022-11-09 20:19:04 +01:00
InsanePrawn
3b8242be19 packages: parse_pkgbuilds: cache makepkg --printsrcinfo output to SRCINFO, tie to PKGBUILD checksum in srcinfo_meta.json 2022-11-09 20:19:04 +01:00
InsanePrawn
b31160146b utils.py: add sha256sum(filepath) 2022-11-09 20:19:04 +01:00
InsanePrawn
f77aa4f2a2 config: add listings of devices and flavours to config profile init aka prompt_profile() and --no-parse flag
This is slow-ish without SRCINFO caching
2022-11-09 20:19:04 +01:00
InsanePrawn
7b7caf3f37 dataclass: DataClass.tranform(): handle Optional dict/Munch items 2022-11-09 20:19:04 +01:00
InsanePrawn
adcdf38c14 packages: add flavour.py: initial implementation for Flavour scanning 2022-11-09 20:19:04 +01:00
InsanePrawn
6f9a013c2e packages: add cmd_devices_list() 2022-11-09 20:19:04 +01:00
InsanePrawn
75c832cbfb packages: get_devices(): use lazy scanning, scan only device repository 2022-11-09 20:19:04 +01:00
InsanePrawn
ddc92012b8 packages: discover_pkgbuilds(): introduce ability for partial scanning by limiting repositories 2022-11-09 20:19:04 +01:00
InsanePrawn
2a20f6c45a config: add Config.enforce_profile_flavour_set() 2022-11-09 20:19:04 +01:00
InsanePrawn
d3cdd64aea packages: improve ux around cli and filter_pkgbuilds() to indicate that the query was empty or wrong 2022-11-09 20:19:04 +01:00
InsanePrawn
bca1e29648 packages/pkgbuild: add pkgdesc description parsing 2022-11-09 20:19:04 +01:00
InsanePrawn
1fd84bb9c2 packages/pkgbuild: add get_pkgbuild_by_name(name, lazy=True) for easy cache access 2022-11-09 20:19:04 +01:00
InsanePrawn
e4320578ef packages: init_pkgbuilds(): add lazy param and behaviour 2022-11-09 20:19:04 +01:00
InsanePrawn
69404a4267 packages/build: check_package_built(): add init_prebuilts(arch) 2022-11-09 20:19:04 +01:00
InsanePrawn
7d803fc5bd config: Config.update(): avoid Munch attr error by calling .toDict() on self.file before comparison with merged 2022-11-09 20:19:04 +01:00
InsanePrawn
dd0d848b8f image.cmd_build(): parse deviceinfo only after building the packages 2022-11-09 20:19:04 +01:00
InsanePrawn
98140565ef packages: parse_pkgbuild(): handle split packages properly 2022-11-09 20:19:04 +01:00
InsanePrawn
ad966d6616 chroot.cmd_chroot(): call image.cmd_inspect() for type='rootfs' and clean up 2022-11-09 20:19:04 +01:00
InsanePrawn
1d0a97560b chroot.mount_pacman_cache(): use /var/cache/pacman/pkg to avoid problems while pacstrapping, adjust pacman.conf generator 2022-11-09 20:19:04 +01:00
InsanePrawn
6c269080d6 constants.py: clean up paths 2022-11-09 20:19:04 +01:00
InsanePrawn
eea8b964e0 boot: use get_profile_device.flash_pagesize for sector_size 2022-11-09 20:19:04 +01:00
InsanePrawn
69bed9fc4e readme: point to online kupfer docs, use full config init instead of non-interactive 2022-11-09 20:19:04 +01:00
InsanePrawn
b80f42d48a packages: get_makepkg_env(): don't use host env, but provide standard PATH variable 2022-11-09 20:19:04 +01:00
InsanePrawn
c3e4b5c108 packages/device: get_device: check if device pkgbuild dir exists upfront and raise clear exception if not instead of asserting 2022-11-09 20:11:20 +01:00
InsanePrawn
ad83d14861 config.enforce_profile_device_set(): default profile_name from profiles.current to output in error msgs if "None" passed 2022-11-09 20:11:20 +01:00
InsanePrawn
927fa352c5 image,flash: get sector size from deviceinfo 2022-11-09 20:11:20 +01:00
InsanePrawn
4f7cb8f516 packages/device: implement Device.parse_deviceinfo(), add deviceinfo tests 2022-11-09 20:11:20 +01:00
InsanePrawn
2c70ad6c12 utils: add read_files_from_tar() 2022-11-09 20:11:20 +01:00
InsanePrawn
81f88dd636 packages: clean up check_package_built() and use RemotePackage.acquire() in try_download_package() 2022-11-09 20:11:20 +01:00
InsanePrawn
a9edbfd07d exec/file: document symlink() and check return code 2022-11-09 20:11:19 +01:00
InsanePrawn
4154b2dfe3 distro.package: add Package.acquire() 2022-11-09 20:11:19 +01:00
InsanePrawn
a2c8868d61 packages/build: add_file_to_repo(): add remove_original=True parameter, clean up add_package_to_repo() 2022-11-09 20:11:19 +01:00
InsanePrawn
57fec8fd91 packages/build: split out init_local_repo() from init_prebuilts(), use in add_file_to_repo() 2022-11-09 20:11:19 +01:00
InsanePrawn
bf420a73be distro: refactor BinaryPackage, Repo and Distro into generics for Local and Remote 2022-11-09 20:11:19 +01:00
InsanePrawn
dbe3dc91a3 distro/package: refactor class PackageInfo into class BinaryPackage(PackageInfo) 2022-11-09 19:39:59 +01:00
InsanePrawn
daa7b68738 packages: split up __init__.py into build.py and cli.py 2022-11-09 19:39:59 +01:00
InsanePrawn
c16147ef0c distro.package.parse_desc(): rename resolved_url param to resolved_repo_url for clarity 2022-11-09 19:39:59 +01:00
InsanePrawn
072ce8e4f0 wrapper/docker: create volume dirs ourselfes for better permissions and podman compat 2022-11-09 19:39:59 +01:00
InsanePrawn
69036d008b wrapper/docker: fix indentation (only version needs to be pulled from with open():) 2022-11-09 19:39:59 +01:00
InsanePrawn
040e409620 dataclass.resolve_type_hint(): add conversion from str to [int,float] if str not in types 2022-11-09 19:39:59 +01:00
InsanePrawn
2cd41e75ca config/scheme: move DataClass to dataclass.py 2022-11-09 19:39:59 +01:00
InsanePrawn
76b5b26157 config: DataClass.transform(): add allow_extra=False parameter 2022-11-09 19:39:59 +01:00
InsanePrawn
d7f61f6475 packages: move filter_packages() to pkgbuild, rename to filter_pkgbuilds() 2022-11-09 19:39:59 +01:00
InsanePrawn
003cb7979e constants.py: remove DEVICES array, now comes from pkgbuilds.git 2022-11-09 19:39:59 +01:00
InsanePrawn
5a565662eb image.py: use Device instead of the device name from config 2022-11-09 19:39:59 +01:00
InsanePrawn
606a7a9af3 exec: makedir() accept Union[str, int] for user and group 2022-11-09 19:39:59 +01:00
InsanePrawn
6cce302dcc chroot: add chroot.get_uid(user: str), use in chroot.mount_{ccache,rust} to apply correct ownership 2022-11-09 19:39:59 +01:00
InsanePrawn
035e197f64 wrapper: add WRAPPER_PATHS to point ccache and rust to predictable locations 2022-11-09 19:39:59 +01:00
InsanePrawn
94c9a99e2f requirements.txt: add setuptools required by munch 2022-11-09 19:39:59 +01:00
InsanePrawn
e6718ffc99 Dockerfile: clean up pkgconfig-aarch64 leftover 2022-11-09 19:39:59 +01:00
InsanePrawn
a3ec35bcd6 config: introduce rust cache 2022-11-09 19:39:59 +01:00
InsanePrawn
47e74fb415 config: introduce per-arch persisted ccache dir 2022-11-09 19:39:59 +01:00
InsanePrawn
c0fd1f51b5 chroot: add chroot.mount_chroots() to mount /chroot and use in cmd_chroot() 2022-11-09 19:39:59 +01:00
InsanePrawn
d3e2224b02 pkgbuild.discover_pkgbuilds(): warn and skip directories that don't contain a PKGBUILD 2022-11-09 19:39:59 +01:00
InsanePrawn
5cb747aa26 packages and image: wrap more upfront on missing binaries 2022-11-09 19:39:59 +01:00
InsanePrawn
cdc803031e packages.filter_packages(): only filter by arch if arch is not None 2022-11-09 19:39:59 +01:00
InsanePrawn
54c525c8d9 wrapper_su_helper.py: use su -P to allocate a pseudo-TTY 2022-11-09 19:39:59 +01:00
InsanePrawn
8fc33c85cf packages.cmd_sideload(): fix escape of --overwrite=* 2022-11-09 19:39:59 +01:00
InsanePrawn
32f5fe643f packages: respect package arches before and during building 2022-11-09 19:39:59 +01:00
InsanePrawn
b6239a45ce chroot.create_user(): add primary_group parameter 2022-11-09 19:39:59 +01:00
InsanePrawn
a778f0786e packages.check_package_version_built(): use Pkgbuild.get_filename() instead of running makepkg --packagelist 2022-11-09 19:39:59 +01:00
InsanePrawn
f2ccf06c5b pkgbuild: add get_filename(arch) 2022-11-09 19:39:59 +01:00
InsanePrawn
fb4a12c464 packages.filter_packages(): optionally check package arch 2022-11-09 19:39:59 +01:00
InsanePrawn
bb14c4d779 packages: use user 'kupfer' in chroots for building 2022-11-09 19:39:59 +01:00
InsanePrawn
1cac36b73a chroot: add chroot.add_sudo_config() 2022-11-09 19:39:59 +01:00
InsanePrawn
5329f7a5b0 chroot.create_user(): add optional uid and non_unique parameter 2022-11-09 19:39:59 +01:00
InsanePrawn
17669ea8d2 chroot.run_cmd(): add switch_user parameter 2022-11-09 19:39:59 +01:00
InsanePrawn
2c2e4df638 exec/cmd: generate_cmd_{su,elevated}: tolerate flat string as input for cmd instead of list 2022-11-09 19:39:59 +01:00
InsanePrawn
b622a2ab02 exec.file.write_file(): fix situation where file exists but stat fails due to permissions 2022-11-09 19:39:59 +01:00
InsanePrawn
8ad18c00a6 global: refactor to use config.{file,runtime}.$member instead of config.file["$member"] 2022-11-09 19:39:59 +01:00
InsanePrawn
ba13293b93 DockerWrapper.wrap(): run as config.runtime.uid instead of root 2022-11-02 00:38:18 +01:00
InsanePrawn
a13fdc70e1 config: add config.runtime.uid 2022-11-02 00:38:18 +01:00
InsanePrawn
5e4c038ed0 main: add -w to *enforce* wrapping 2022-11-02 00:38:18 +01:00
InsanePrawn
59028afebf typecheck.sh: show error codes 2022-11-02 00:38:18 +01:00
InsanePrawn
3d6aa98c21 packages: circumvent git dubious ownership errors in pkgbuilds.git due to chrootery 2022-11-02 00:38:18 +01:00
InsanePrawn
2a46e9b361 constants: add QEMU_ARCHES 2022-11-02 00:38:18 +01:00
InsanePrawn
1837069981 constants.py: add armv7h support 2022-11-02 00:38:18 +01:00
InsanePrawn
d03cb39358 Pkgbuild: add refresh_sources() 2022-11-02 00:38:18 +01:00
InsanePrawn
558993b89e packages/pkgbuild: cache parsed pkgbuilds by path, add get_pkgbuild_by_path(), Pkgbuild.update(pkgb) 2022-11-02 00:38:18 +01:00
InsanePrawn
31ab4479c8 Merge tag 'v0.1.4' into prawn/flavours 2022-11-02 00:34:27 +01:00
InsanePrawn
6ed80985a2 Merge branch 'dev' into prawn/flavours 2022-10-13 19:27:14 +02:00
InsanePrawn
2b539f5a5b Merge branch 'dev' into prawn/flavours 2022-08-25 16:53:13 +02:00
InsanePrawn
2db8a0a0cb packages: cleanup unnecessary bits 2022-08-25 13:52:35 +02:00
InsanePrawn
f940fd2301 test_config: remove double import of Profile 2022-08-23 21:43:18 +02:00
InsanePrawn
3952892029 generator.generate_pacman_conf_body(): add in_chroot=False param to use config.get_path('pacman') if necessary 2022-08-23 21:41:41 +02:00
InsanePrawn
5a794ba3dd packages: get_makepkg_env(): actually return env... 2022-08-23 17:38:58 +02:00
InsanePrawn
97d3f05968 gitlab-ci: generate test coverage 2022-08-20 05:59:31 +02:00
InsanePrawn
70c4799385 pytest.sh: generate junit xml 2022-08-20 05:32:52 +02:00
InsanePrawn
c53acbf2f4 .gitlab-ci: run python unit tests as user with passwordless sudo 2022-08-20 05:25:49 +02:00
InsanePrawn
39be2b2fb6 packages/test_device: add test for initialise_pkgbuilds_dir fixture 2022-08-20 04:51:25 +02:00
InsanePrawn
f5e3fa46ad packages/ and image.py: less wrapping, remove hardcoded 'aarch64' where possible 2022-08-20 04:32:33 +02:00
InsanePrawn
657a5fe227 wrapper: add wrap_if_foreign_arch() 2022-08-20 04:32:33 +02:00
InsanePrawn
5b218e64c8 ssh.py: use check_programs_wrap() for ssh and scp binaries 2022-08-20 04:32:33 +02:00
InsanePrawn
27e7fe9a10 utils.programs_available(): add cache 2022-08-20 04:32:33 +02:00
InsanePrawn
8a7f78261f packages: add device.py and initial tests 2022-08-20 04:32:33 +02:00
InsanePrawn
2d13d82943 config/: rework code around parsing profiles, add scheme.SparseProfile to account for the partial profiles in config 2022-08-20 04:32:33 +02:00
InsanePrawn
688f9e2375 config.ConfigStateHolder: initialize self._profile_cache in constructor 2022-08-19 18:12:21 +02:00
InsanePrawn
1c6689f710 packages.cmd_sideload(): fix string escape '\*' -> '\\*' 2022-08-19 17:18:18 +02:00
InsanePrawn
e001d107c2 move packages.{clone_pkbuilds(),discover_packages(),init_pkgbuilds()} to packages.pkgbuild
also rename discover_packages() to discover_pkgbuilds()
2022-08-19 17:13:03 +02:00
InsanePrawn
5baaaaa180 packages.discover_packages(): clean up caching 2022-08-19 16:55:53 +02:00
InsanePrawn
7d9f1b9ed8 pkgbuild.parse_pkgbuild() make config parameter optional 2022-08-19 16:55:17 +02:00
InsanePrawn
aaef4b7699 config/scheme: type annotate ConfigLoadState class 2022-08-18 05:47:05 +02:00
InsanePrawn
91b44299ae config/scheme.py: fix detection of extra profiles as 'unknown keys' and add unit test using pickle 2022-08-18 05:39:51 +02:00
InsanePrawn
30d9be0950 config: more fixes for scheme.py 2022-08-18 05:39:51 +02:00
InsanePrawn
7eefafc386 config: split up into state.py and profile.py, fixup tests 2022-08-18 03:45:12 +02:00
InsanePrawn
16fd2f1590 config: add data schemas based on munch in scheme.py, add unit tests 2022-08-18 02:44:54 +02:00
InsanePrawn
4298d15178 config/test_config.py: add test for saving modifications 2022-08-18 02:20:17 +02:00
InsanePrawn
5e9b0448dc config: improve tests 2022-08-17 23:24:41 +02:00
InsanePrawn
924f125893 config: add rudimentary tests 2022-08-17 21:20:51 +02:00
InsanePrawn
7ca0e80682 config: make filepath the first argument for ConfigStateHolder 2022-08-17 21:17:23 +02:00
InsanePrawn
7f86c80cec create submodule for config (move config.py) 2022-08-17 20:10:20 +02:00
InsanePrawn
36b321aa2d config.prompt_config(): fix changed detection for lists due to conversion to comma-separated strings 2022-08-17 01:10:06 +02:00
InsanePrawn
e17a69ed81 add shellscript for running pytest, shellcheck *.sh 2022-08-17 00:50:20 +02:00
InsanePrawn
8b9fe661cf image.py: remove iflag=direct from dd argv as it errors out on tmpfs 2022-08-17 00:50:20 +02:00
InsanePrawn
4e4e12b6b9 chroot.run_cmd(): don't blindly import outer env into chroot, avoiding a bunch of spam. 2022-08-17 00:50:20 +02:00
InsanePrawn
5eda60c14d chroot/device.py: Use exec.file.get_temp_dir() for pacman.conf 2022-08-17 00:50:20 +02:00
InsanePrawn
1bf397f29f flash.py: use losetup_destroy() before copying shrunk image 2022-08-17 00:50:20 +02:00
InsanePrawn
216050fbb4 image.py: extract losetup_destroy() for reuse 2022-08-17 00:50:20 +02:00
InsanePrawn
7f9f326861 image.shrink_fs(): no need to run truncate elevated, we own the image file 2022-08-17 00:50:20 +02:00
InsanePrawn
6cfd8ae1c2 flash.py: use exec.file.get_temp_dir() 2022-08-17 00:50:20 +02:00
InsanePrawn
0924ea298a exec/file: add get_temp_dir() (for tempdirs without sticky bits) and chmod() 2022-08-17 00:50:20 +02:00
InsanePrawn
1f15d6705c ssh: make alloc_tty=True default for run_ssh_command() 2022-08-16 15:50:14 +02:00
InsanePrawn
0858a64144 wrapper: add -W shorthand for --no-wrapper 2022-08-16 03:41:26 +02:00
InsanePrawn
916be09c61 distro/repo.py: add Repo.__repr__() 2022-08-16 03:40:28 +02:00
InsanePrawn
4ed0b8626b add exec.file.symlink() and use in BuildChroot 2022-08-16 03:39:29 +02:00
InsanePrawn
859b08df6a packages.build_enable_qemu(): fixup for usage without wrapper, drop buildchroot requirement 2022-08-16 03:38:33 +02:00
InsanePrawn
dd7e1716b8 image.py: cleanups, run umount as root 2022-08-16 02:35:40 +02:00
InsanePrawn
dbf65b44df parse_pkgbuild(): update basepkg version before copying to subpkgs 2022-08-16 02:35:40 +02:00
InsanePrawn
25ea4afe9b chroot/: make devicechroot pacstrap work without docker wrapper 2022-08-16 02:35:40 +02:00
InsanePrawn
707c61f026 replace os.makedirs with exec.{root_,}makedir where applicable 2022-08-16 02:35:40 +02:00
InsanePrawn
818b354000 exec/file.py: add {root_,}makedir() 2022-08-16 02:35:40 +02:00
InsanePrawn
2535d6bbd8 exec/cmd.run_cmd(): add stderr param 2022-08-16 02:35:40 +02:00
InsanePrawn
cc29b60f9f use exec.file.{root_write_file,remove_file} where appropiate 2022-08-16 02:35:40 +02:00
InsanePrawn
9d24065258 ssh.py: fixups, use correct mode for .ssh dir 2022-08-16 00:09:23 +02:00
InsanePrawn
ceedf4bced file.chown() add recursive=False parameter 2022-08-15 23:31:57 +02:00
InsanePrawn
774b526925 config.py: silence new click mypy error 2022-08-15 23:12:59 +02:00
InsanePrawn
107ca5d86e exec/file.py: add new module to write to files as root via cmd.run_root_cmd() 2022-08-15 23:12:59 +02:00
InsanePrawn
4eacee8cad utils: add get_{user,group}_name() and get_{uid,gid}() 2022-08-15 23:12:59 +02:00
InsanePrawn
98b835c75a .gitlab-ci.yml: add pytest */test_*.py 2022-08-15 23:12:59 +02:00
InsanePrawn
f3a1a510d9 exec/cmd.py: add tests (needs configured sudo) 2022-08-15 23:07:44 +02:00
InsanePrawn
879fd113f0 exec: migrate exec.py to exec/cmd.py 2022-08-15 17:48:42 +02:00
InsanePrawn
72ca2258d1 exec.py: add elevation_noop() to refresh sudo timestamp and query for password 2022-08-15 17:48:42 +02:00
InsanePrawn
c562271006 image.py: use exec.run_root_cmd() 2022-08-15 17:48:42 +02:00
InsanePrawn
40600855ec flash.py: use exec.run_root_cmd() 2022-08-15 17:48:42 +02:00
InsanePrawn
b32099c4f1 binfmt.py: use exec.run_root_cmd() 2022-08-15 06:42:19 +02:00
InsanePrawn
fdf03e2b97 packages: migrate to exec.run_(root_)cmd() 2022-08-15 06:41:59 +02:00
InsanePrawn
6593471a8e chroot/*: use exec.run_(root_)cmd 2022-08-15 06:32:43 +02:00
InsanePrawn
0465d1035a forwarding.py and ssh.py: use exec.run_(root_)cmd() 2022-08-15 06:20:56 +02:00
InsanePrawn
7fcd68ced9 packages.discover_packages(): don't use chroot for running makepkg --printsrcinfo, pass config, cache results 2022-08-15 06:17:26 +02:00
InsanePrawn
a6129a82bd format.sh: allow selecting file paths 2022-08-15 06:17:26 +02:00
InsanePrawn
de71a71c13 config.py: remove class members that should be instance members from ConfigStateHolder 2022-08-15 06:17:26 +02:00
InsanePrawn
0d4d83f0ed logger.setup_logging(): add log_setup parameter to disable log output from setup 2022-08-15 06:17:26 +02:00
InsanePrawn
66ac56d715 chroot: extract run_cmd() to new exec.py, use in utils.py and chroot/abstract.py 2022-08-15 06:17:26 +02:00
InsanePrawn
e3ad2edc69 packages/__init__.py: cleanups, mostly logging 2022-08-14 04:17:46 +02:00
InsanePrawn
d70805f3a6 pkgbuild.py: add pkgbuild.arches parsing, add SubPkgbuild class 2022-08-14 04:14:47 +02:00
132 changed files with 9430 additions and 3922 deletions

5
.dockerignore Normal file
View File

@@ -0,0 +1,5 @@
/venv
/build
__pycache__
.mypy_cache
*.xml

8
.gitignore vendored
View File

@@ -1,2 +1,8 @@
venv/
*.kate-swp
/venv
/build
__pycache__/
.coverage*
*.xml
*.egg-info
dist

View File

@@ -7,14 +7,18 @@ format:
stage: check
image: python
before_script:
- pip install yapf autoflake
- python3 -m venv venv
- venv/bin/pip3 install yapf autoflake
script:
- source venv/bin/activate
- ./format.sh --check
typecheck:
stage: check
image: python
before_script:
- python3 -m venv venv
- source venv/bin/activate
- pip install mypy
script:
- ./typecheck.sh --non-interactive --junit-xml mypy-report.xml
@@ -22,39 +26,119 @@ typecheck:
reports:
junit: mypy-report.xml
pytest:
stage: check
image: archlinux
before_script:
- pacman -Sy --noconfirm --needed archlinux-keyring && pacman -Su --noconfirm python python-pip sudo git base-devel arch-install-scripts rsync
- python3 -m venv venv
- venv/bin/pip3 install -r test_requirements.txt -r requirements.txt
- 'echo "kupfer ALL = (ALL) NOPASSWD: ALL" > /etc/sudoers.d/kupfer_all'
- useradd -m kupfer
- chmod 777 .
script:
- script -e -c 'su kupfer -s /bin/bash -c ". venv/bin/activate && INTEGRATION_TESTS_USE_GLOBAL_CONFIG=TRUE KUPFERBOOTSTRAP_WRAPPED=DOCKER ./pytest.sh --junit-xml=pytest-report.xml --cov-report=xml:coverage.xml integration_tests.py"'
coverage: '/(?i)total.*? (100(?:\.0+)?\%|[1-9]?\d(?:\.\d+)?\%)$/'
artifacts:
reports:
junit: pytest-report.xml
coverage_report:
coverage_format: cobertura
path: coverage.xml
build_docker:
stage: build
image: docker:latest
services: ['docker:dind']
services:
- name: docker:dind
command: ["--mtu=1100"] # very low, safe value -.-
variables:
DOCKER_TLS_CERTDIR: ""
script:
- 'docker build --pull -t "${CI_REGISTRY_IMAGE}:${CI_COMMIT_SHA}" -t "${CI_REGISTRY_IMAGE}:${CI_COMMIT_REF_SLUG}" .'
- 'echo "running sanity check" && docker run -it --rm "${CI_REGISTRY_IMAGE}:${CI_COMMIT_SHA}" kupferbootstrap --help'
only:
- branches
except:
- main
- dev
push_docker:
extends: build_docker
before_script:
- echo "$CI_REGISTRY_PASSWORD" | docker login -u "$CI_REGISTRY_USER" --password-stdin "$CI_REGISTRY"
script:
- docker build --pull -t "${CI_REGISTRY_IMAGE}:${CI_COMMIT_SHA}" -t "${CI_REGISTRY_IMAGE}:${CI_COMMIT_REF_SLUG}" .
- !reference [build_docker, script]
- if [[ "$CI_COMMIT_REF_NAME" == "main" ]]; then docker image tag "${CI_REGISTRY_IMAGE}:${CI_COMMIT_SHA}" "${CI_REGISTRY_IMAGE}:latest"; fi
- docker push -a "${CI_REGISTRY_IMAGE}"
only:
- main
- dev
except:
.docs:
image: "${CI_REGISTRY_IMAGE}:dev"
before_script:
- pip install -r docs/requirements.txt
script:
- (cd docs && make)
- mv docs/html public
image: "registry.gitlab.com/kupfer/kupferbootstrap:dev"
variables:
DOCS_SPHINXARGS: '-W'
DOCS_MAKE_TARGET: "html"
DOCS_MAKE_THREADS: 6
before_script: &docs_before_script
- python3 -m venv venv
- source venv/bin/activate
- pip install -r requirements.txt -r docs/requirements.txt
script: &docs_script
- make -C docs -j$DOCS_MAKE_THREADS SPHINXARGS="$DOCS_SPHINXARGS" $DOCS_MAKE_TARGET
- mv "docs/$DOCS_MAKE_TARGET" public
- if [[ -e docs/archived ]]; then cp -r docs/archived public/ ; fi
- rm -vf docs/archived/{main,dev,"$CI_COMMIT_REF_NAME"}.tar.gz # we want to cache only old tags as they won't change
after_script:
artifacts:
paths:
- public
cache:
key: docs
paths:
- docs/archived/*.tar.gz
build_docs:
stage: build
extends: .docs
except:
refs:
- main
- dev
- docs
variables:
- '$CI_COMMIT_MESSAGE =~ /ci-kbs-docs-build-full/'
- '$KBS_DOCS_FULL_BUILD == "1"'
build_docs_all:
stage: build
extends: pages
resource_group: $CI_COMMIT_SHA
script:
- (cd docs && make SPHINXARGS="$DOCS_SPHINXARGS -D 'version=$CI_COMMIT_REF_NAME'" && mkdir -p versions && cp -r html versions/$CI_COMMIT_REF_SLUG)
- *docs_script
only:
refs:
- branches
variables:
- '$CI_COMMIT_MESSAGE =~ /ci-kbs-docs-build-full/'
- '$KBS_DOCS_FULL_BUILD == "1"'
- '$CI_COMMIT_REF_NAME == "docs"'
except:
- main
- dev
pages:
stage: deploy
extends: .docs
only:
- main
- dev
variables:
DOCS_MAKE_TARGET: versions
resource_group: docs
before_script:
- git remote update
- *docs_before_script

View File

@@ -2,35 +2,30 @@ FROM archlinux:base-devel
RUN pacman-key --init && \
pacman -Sy --noconfirm archlinux-keyring && \
pacman -Su --noconfirm \
pacman -Su --noconfirm --needed \
python python-pip \
arch-install-scripts rsync \
aarch64-linux-gnu-gcc aarch64-linux-gnu-binutils aarch64-linux-gnu-glibc aarch64-linux-gnu-linux-api-headers \
git \
git sudo \
android-tools openssh inetutils \
parted
RUN sed -i "s/EUID == 0/EUID == -1/g" $(which makepkg)
RUN cd /tmp && \
git clone https://aur.archlinux.org/aarch64-linux-gnu-pkg-config.git && \
cd aarch64-linux-gnu-pkg-config && \
makepkg -s --skippgpcheck && \
pacman -U --noconfirm *.pkg*
RUN sed -i "s/EUID == 0/EUID == -1/g" "$(which makepkg)"
RUN yes | pacman -Scc
RUN sed -i "s/SigLevel.*/SigLevel = Never/g" /etc/pacman.conf
ENV KUPFERBOOTSTRAP_WRAPPED=DOCKER
ENV PATH=/app/bin:/app/local/bin:$PATH
ENV PATH=/app/bin:/app/local/bin:/app/venv/bin:$PATH
WORKDIR /app
COPY requirements.txt .
RUN pip install -r requirements.txt
COPY . .
RUN python3 -m venv /app/venv
RUN /app/venv/bin/pip3 install -r requirements.txt
RUN python -c "from distro import distro; distro.get_kupfer_local(arch=None,in_chroot=False).repos_config_snippet()" | tee -a /etc/pacman.conf
RUN /app/venv/bin/python3 -c "from kupferbootstrap.distro import distro; distro.get_kupfer_local(arch=None,in_chroot=False).repos_config_snippet()" | tee -a /etc/pacman.conf
RUN useradd -m -g users kupfer
RUN echo "kupfer ALL=(ALL) NOPASSWD: ALL" | tee /etc/sudoers.d/kupfer
WORKDIR /

View File

@@ -1,16 +1,43 @@
# kupferbootstrap
Kupfer Linux bootstrapping tool - drives pacstrap, makepkg, mkfs and fastboot, just to name a few.
Kupfer Linux bootstrapping tool - drives pacstrap, makepkg, chroot, mkfs and fastboot, just to name a few.
## Documentation
Detailed docs for the main branch are available online at https://kupfer.gitlab.io/kupferbootstrap/
You can also build and view the docs locally:
```sh
cd docs/ && \
make && \
make serve
```
This will run a webserver on localhost:9999. Access it like `firefox http://localhost:9999/`
## Installation
Install Docker, Python 3 with libraries `click`, `appdirs`, `joblib`, `toml`, `typing_extentions`, and `coloredlogs` and put `bin/` into your `PATH`.
Then use `kupferbootstrap`.
0. If you're not on ArchLinux (i.e. don't have `pacman`, `makepkg`, etc. available in your $PATH), install Docker and add yourself to the docker group.
1. Craate a python venv: `python3 -m venv venv`
1. Activate it: `source venv/bin/activate`
1. Install KBS: `pip3 install .`
## Usage
1. Initialize config with defaults: `kupferbootstrap config init -N`
1. Configure your device profile: `kupferbootstrap config profile init`
Then run `kupferbootstrap`.
### Pro Tip:
- You can add a shell alias for `$(PWD)/venv/bin/kupferbootstrap` or create a symlink to it at `/usr/local/bin/kuperbootstrap` for quick access without needing to manually source the venv script every time.
- It is recommended to abbreviate `kupferbootstrap` to `kbs` for even less typing.
## Quickstart
1. Initialize config with defaults, configure your device and flavour: `kupferbootstrap config init`
1. Initialize PKGBUILDs and caches: `kupferbootstrap packages init`
1. Build an image and packages along the way: `kupferbootstrap image build`
## Development
Put `dev` into `version.txt` to always rebuild kupferboostrap from this directory and use `kupferbootstrap` as normal.
### Docker
Put `BUILD` (the default) into `docker_version.txt` to always rebuild kupferboostrap from this directory; otherwise the image is pulled from `registry.gitlab.com/kupfer/kupferbootstrap:$VERSION`, where `$VERSION` is the contents of `docker_version.txt`.

View File

@@ -1,4 +0,0 @@
#!/bin/bash
# shellcheck disable=SC2068
python3 "$(dirname "$(dirname "$(readlink -f "${BASH_SOURCE[0]}")")")/main.py" $@

View File

@@ -1,77 +0,0 @@
# modifed from pmbootstrap's binfmt.py, Copyright 2018 Oliver Smith, GPL-licensed
import os
import logging
import subprocess
from utils import mount
def binfmt_info():
# Parse the info file
full = {}
info = "/usr/lib/binfmt.d/qemu-static.conf"
logging.debug("parsing: " + info)
with open(info, "r") as handle:
for line in handle:
if line.startswith('#') or ":" not in line:
continue
splitted = line.split(":")
result = {
# _ = splitted[0] # empty
'name': splitted[1],
'type': splitted[2],
'offset': splitted[3],
'magic': splitted[4],
'mask': splitted[5],
'interpreter': splitted[6],
'flags': splitted[7],
'line': line,
}
if not result['name'].startswith('qemu-'):
logging.fatal(f'Unknown binfmt handler "{result["name"]}"')
logging.debug(f'binfmt line: {line}')
continue
arch = ''.join(result['name'].split('-')[1:])
full[arch] = result
return full
def is_registered(arch: str) -> bool:
return os.path.exists("/proc/sys/fs/binfmt_misc/qemu-" + arch)
def register(arch):
if is_registered(arch):
return
lines = binfmt_info()
# Build registration string
# https://en.wikipedia.org/wiki/Binfmt_misc
# :name:type:offset:magic:mask:interpreter:flags
info = lines[arch]
code = info['line']
binfmt = '/proc/sys/fs/binfmt_misc'
register = binfmt + '/register'
if not os.path.exists(register):
logging.info('mounting binfmt_misc')
result = mount('binfmt_misc', binfmt, options=[], fs_type='binfmt_misc')
if result.returncode != 0:
raise Exception(f'Failed mounting binfmt_misc to {binfmt}')
# Register in binfmt_misc
logging.info(f"Registering qemu binfmt ({arch})")
subprocess.run(["sh", "-c", 'echo "' + code + '" > ' + register + ' 2>/dev/null'])
if not is_registered(arch):
logging.debug(f'binfmt line: {code}')
raise Exception(f'Failed to register qemu-user for {arch} with binfmt_misc, {binfmt}/{info["name"]} not found')
def unregister(arch):
binfmt_file = "/proc/sys/fs/binfmt_misc/qemu-" + arch
if not os.path.exists(binfmt_file):
return
logging.info(f"Unregistering qemu binfmt ({arch})")
subprocess.run(["sh", "-c", "echo -1 > " + binfmt_file])

44
boot.py
View File

@@ -1,44 +0,0 @@
import os
import urllib.request
import click
from config import config
from constants import BOOT_STRATEGIES, FLASH_PARTS, FASTBOOT, JUMPDRIVE, JUMPDRIVE_VERSION
from fastboot import fastboot_boot, fastboot_erase_dtbo
from image import get_device_and_flavour, losetup_rootfs_image, get_image_path, dump_aboot, dump_lk2nd
from wrapper import enforce_wrap
LK2ND = FLASH_PARTS['LK2ND']
ABOOT = FLASH_PARTS['ABOOT']
TYPES = [LK2ND, JUMPDRIVE, ABOOT]
@click.command(name='boot')
@click.argument('type', required=False, default=ABOOT, type=click.Choice(TYPES))
def cmd_boot(type):
"""Boot JumpDrive or the Kupfer aboot image. Erases Android DTBO in the process."""
enforce_wrap()
device, flavour = get_device_and_flavour()
# TODO: parse arch and sector size
sector_size = 4096
image_path = get_image_path(device, flavour)
strategy = BOOT_STRATEGIES[device]
if strategy == FASTBOOT:
if type == JUMPDRIVE:
file = f'boot-{device}.img'
path = os.path.join(config.get_path('jumpdrive'), file)
os.makedirs(os.path.dirname(path), exist_ok=True)
if not os.path.exists(path):
urllib.request.urlretrieve(f'https://github.com/dreemurrs-embedded/Jumpdrive/releases/download/{JUMPDRIVE_VERSION}/{file}', path)
else:
loop_device = losetup_rootfs_image(image_path, sector_size)
if type == LK2ND:
path = dump_lk2nd(loop_device + 'p1')
elif type == ABOOT:
path = dump_aboot(loop_device + 'p1')
else:
raise Exception(f'Unknown boot image type {type}')
fastboot_erase_dtbo()
fastboot_boot(path)

View File

@@ -1,43 +0,0 @@
import shutil
import click
import os
from config import config
from wrapper import enforce_wrap
import logging
PATHS = ['chroots', 'pacman', 'jumpdrive', 'packages', 'images']
@click.group(name='cache')
def cmd_cache():
"""Clean caches and chroots"""
@cmd_cache.command(name='clean')
@click.option('--force', default=False)
@click.argument('paths', nargs=-1, required=False)
def cmd_clean(paths: list[str], force=False):
if unknown_paths := (set(paths) - set(PATHS + ['all'])):
raise Exception(f"Unknown paths: {' ,'.join(unknown_paths)}")
if 'all' in paths or (not paths and force):
paths = PATHS.copy()
enforce_wrap()
clear = {path: (path in paths) for path in PATHS}
query = not paths
if not query or force:
click.confirm(f'Really clear {", ".join(paths)}?', abort=True)
for path_name in PATHS:
if query:
clear[path_name] = click.confirm(f'Clear {path_name}?')
if clear[path_name]:
logging.info(f'Clearing {path_name}')
dir = config.get_path(path_name)
for file in os.listdir(dir):
path = os.path.join(dir, file)
logging.debug(f'Removing "{path_name}/{file}"')
if os.path.isdir(path):
shutil.rmtree(path)
else:
os.unlink(path)

View File

@@ -1,60 +0,0 @@
import click
import logging
import os
from config import config
from wrapper import enforce_wrap
from .abstract import Chroot
from .base import get_base_chroot
from .build import get_build_chroot, BuildChroot
from .helpers import get_chroot_path
# export Chroot class
Chroot = Chroot
@click.command('chroot')
@click.argument('type', required=False, default='build')
@click.argument('arch', required=False, default=None)
def cmd_chroot(type: str = 'build', arch: str = None, enable_crossdirect=True):
"""Open a shell in a chroot"""
chroot_path = ''
if type not in ['base', 'build', 'rootfs']:
raise Exception('Unknown chroot type: ' + type)
enforce_wrap()
chroot: Chroot
if type == 'rootfs':
if arch:
name = 'rootfs_' + arch
else:
raise Exception('"rootfs" without args not yet implemented, sorry!')
# TODO: name = config.get_profile()[...]
chroot_path = get_chroot_path(name)
if not os.path.exists(chroot_path):
raise Exception(f"rootfs {name} doesn't exist")
else:
if not arch:
# TODO: arch = config.get_profile()[...]
arch = 'aarch64'
if type == 'base':
chroot = get_base_chroot(arch)
if not os.path.exists(chroot.get_path('/bin')):
chroot.initialize()
chroot.initialized = True
elif type == 'build':
build_chroot: BuildChroot = get_build_chroot(arch, activate=True)
chroot = build_chroot # type safety
if not os.path.exists(build_chroot.get_path('/bin')):
build_chroot.initialize()
build_chroot.initialized = True
build_chroot.mount_pkgbuilds()
if config.file['build']['crossdirect'] and enable_crossdirect:
build_chroot.mount_crossdirect()
else:
raise Exception('Really weird bug')
chroot.activate()
logging.debug(f'Starting shell in {chroot.name}:')
chroot.run_cmd('bash', attach_tty=True)

622
config.py
View File

@@ -1,622 +0,0 @@
import appdirs
import click
import os
import toml
import logging
from copy import deepcopy
from typing import Optional, Union, TypedDict, Any, Mapping
from constants import DEFAULT_PACKAGE_BRANCH
CONFIG_DIR = appdirs.user_config_dir('kupfer')
CACHE_DIR = appdirs.user_cache_dir('kupfer')
CONFIG_DEFAULT_PATH = os.path.join(CONFIG_DIR, 'kupferbootstrap.toml')
class Profile(TypedDict, total=False):
parent: str
device: str
flavour: str
pkgs_include: list[str]
pkgs_exclude: list[str]
hostname: str
username: str
password: Optional[str]
size_extra_mb: Union[str, int]
PROFILE_DEFAULTS: Profile = {
'parent': '',
'device': '',
'flavour': '',
'pkgs_include': [],
'pkgs_exclude': [],
'hostname': 'kupfer',
'username': 'kupfer',
'password': None,
'size_extra_mb': "0",
}
PROFILE_EMPTY: Profile = {key: None for key in PROFILE_DEFAULTS.keys()} # type: ignore
CONFIG_DEFAULTS: dict = {
'wrapper': {
'type': 'docker',
},
'build': {
'ccache': True,
'clean_mode': True,
'crosscompile': True,
'crossdirect': True,
'threads': 0,
},
'pkgbuilds': {
'git_repo': 'https://gitlab.com/kupfer/packages/pkgbuilds.git',
'git_branch': DEFAULT_PACKAGE_BRANCH,
},
'pacman': {
'parallel_downloads': 4,
'check_space': False, # TODO: True causes issues
'repo_branch': DEFAULT_PACKAGE_BRANCH,
},
'paths': {
'cache_dir': CACHE_DIR,
'chroots': os.path.join('%cache_dir%', 'chroots'),
'pacman': os.path.join('%cache_dir%', 'pacman'),
'packages': os.path.join('%cache_dir%', 'packages'),
'pkgbuilds': os.path.join('%cache_dir%', 'pkgbuilds'),
'jumpdrive': os.path.join('%cache_dir%', 'jumpdrive'),
'images': os.path.join('%cache_dir%', 'images'),
},
'profiles': {
'current': 'default',
'default': deepcopy(PROFILE_DEFAULTS),
},
}
CONFIG_SECTIONS = list(CONFIG_DEFAULTS.keys())
CONFIG_RUNTIME_DEFAULTS = {
'verbose': False,
'config_file': None,
'arch': None,
'no_wrap': False,
'script_source_dir': os.path.dirname(os.path.realpath(__file__)),
'error_shell': False,
}
def resolve_path_template(path_template: str, paths: dict[str, str]) -> str:
terminator = '%' # i'll be back
result = path_template
for path_name, path in paths.items():
result = result.replace(terminator + path_name + terminator, path)
return result
def resolve_profile(
name: str,
sparse_profiles: dict[str, Profile],
resolved: dict[str, Profile] = None,
_visited=None,
) -> dict[str, Profile]:
"""
Recursively resolves the specified profile by `name` and its parents to merge the config semantically,
applying include and exclude overrides along the hierarchy.
If `resolved` is passed `None`, a fresh dictionary will be created.
`resolved` will be modified in-place during parsing and also returned.
A sanitized `sparse_profiles` dict is assumed, no checking for unknown keys or incorrect data types is performed.
`_visited` should not be passed by users.
"""
if _visited is None:
_visited = list[str]()
if resolved is None:
resolved = dict[str, Profile]()
if name in _visited:
loop = list(_visited)
raise Exception(f'Dependency loop detected in profiles: {" -> ".join(loop+[loop[0]])}')
if name in resolved:
return resolved
logging.debug(f'Resolving profile {name}')
_visited.append(name)
sparse = sparse_profiles[name]
full = deepcopy(sparse)
if 'parent' in sparse and (parent_name := sparse['parent']):
parent = resolve_profile(name=parent_name, sparse_profiles=sparse_profiles, resolved=resolved, _visited=_visited)[parent_name]
full = parent | sparse
# add up size_extra_mb
if 'size_extra_mb' in sparse:
size = sparse['size_extra_mb']
if isinstance(size, str) and size.startswith('+'):
full['size_extra_mb'] = int(parent.get('size_extra_mb', 0)) + int(size.lstrip('+'))
else:
full['size_extra_mb'] = int(sparse['size_extra_mb'])
# join our includes with parent's
includes = set(parent.get('pkgs_include', []) + sparse.get('pkgs_include', []))
if 'pkgs_exclude' in sparse:
includes -= set(sparse['pkgs_exclude'])
full['pkgs_include'] = list(includes)
# join our includes with parent's
excludes = set(parent.get('pkgs_exclude', []) + sparse.get('pkgs_exclude', []))
# our includes override parent excludes
if 'pkgs_include' in sparse:
excludes -= set(sparse['pkgs_include'])
full['pkgs_exclude'] = list(excludes)
# now init missing keys
for key, value in PROFILE_DEFAULTS.items():
if key not in full.keys():
full[key] = None # type: ignore[literal-required]
if type(value) == list:
full[key] = [] # type: ignore[literal-required]
full['size_extra_mb'] = int(full['size_extra_mb'] or 0)
resolved[name] = full
return resolved
def sanitize_config(conf: dict[str, dict], warn_missing_defaultprofile=True) -> dict[str, dict]:
"""checks the input config dict for unknown keys and returns only the known parts"""
return merge_configs(conf_new=conf, conf_base={}, warn_missing_defaultprofile=warn_missing_defaultprofile)
def merge_configs(conf_new: Mapping[str, dict], conf_base={}, warn_missing_defaultprofile=True) -> dict[str, dict]:
"""
Returns `conf_new` semantically merged into `conf_base`, after validating
`conf_new` keys against `CONFIG_DEFAULTS` and `PROFILE_DEFAULTS`.
Pass `conf_base={}` to get a sanitized version of `conf_new`.
NOTE: `conf_base` is NOT checked for invalid keys. Sanitize beforehand.
"""
parsed = deepcopy(conf_base)
for outer_name, outer_conf in deepcopy(conf_new).items():
# only handle known config sections
if outer_name not in CONFIG_DEFAULTS.keys():
logging.warning(f'Skipped unknown config section "{outer_name}"')
continue
logging.debug(f'Parsing config section "{outer_name}"')
# check if outer_conf is a dict
if not isinstance(outer_conf, dict):
parsed[outer_name] = outer_conf
else:
# init section
if outer_name not in parsed:
parsed[outer_name] = {}
# profiles need special handling:
# 1. profile names are unknown keys by definition, but we want 'default' to exist
# 2. A profile's subkeys must be compared against PROFILE_DEFAULTS.keys()
if outer_name == 'profiles':
if warn_missing_defaultprofile and 'default' not in outer_conf.keys():
logging.warning('Default profile is not defined in config file')
for profile_name, profile_conf in outer_conf.items():
if not isinstance(profile_conf, dict):
if profile_name == 'current':
parsed[outer_name][profile_name] = profile_conf
else:
logging.warning('Skipped key "{profile_name}" in profile section: only subsections and "current" allowed')
continue
# init profile
if profile_name not in parsed[outer_name]:
parsed[outer_name][profile_name] = {}
for key, val in profile_conf.items():
if key not in PROFILE_DEFAULTS:
logging.warning(f'Skipped unknown config item "{key}" in profile "{profile_name}"')
continue
parsed[outer_name][profile_name][key] = val
else:
# handle generic inner config dict
for inner_name, inner_conf in outer_conf.items():
if inner_name not in CONFIG_DEFAULTS[outer_name].keys():
logging.warning(f'Skipped unknown config item "{inner_name}" in "{outer_name}"')
continue
parsed[outer_name][inner_name] = inner_conf
return parsed
def dump_toml(conf) -> str:
return toml.dumps(conf)
def dump_file(file_path: str, config: dict, file_mode: int = 0o600):
def _opener(path, flags):
return os.open(path, flags, file_mode)
conf_dir = os.path.dirname(file_path)
if not os.path.exists(conf_dir):
os.makedirs(conf_dir)
old_umask = os.umask(0)
with open(file_path, 'w', opener=_opener) as f:
f.write(dump_toml(conf=config))
os.umask(old_umask)
def parse_file(config_file: str, base: dict = CONFIG_DEFAULTS) -> dict:
"""
Parse the toml contents of `config_file`, validating keys against `CONFIG_DEFAULTS`.
The parsed results are semantically merged into `base` before returning.
`base` itself is NOT checked for invalid keys.
"""
_conf_file = config_file if config_file is not None else CONFIG_DEFAULT_PATH
logging.debug(f'Trying to load config file: {_conf_file}')
loaded_conf = toml.load(_conf_file)
return merge_configs(conf_new=loaded_conf, conf_base=base)
class ConfigLoadException(Exception):
inner = None
def __init__(self, extra_msg='', inner_exception: Exception = None):
msg: list[str] = ['Config load failed!']
if extra_msg:
msg.append(extra_msg)
if inner_exception:
self.inner = inner_exception
msg.append(str(inner_exception))
super().__init__(self, ' '.join(msg))
class ConfigStateHolder:
class ConfigLoadState:
load_finished = False
exception = None
file_state = ConfigLoadState()
defaults = CONFIG_DEFAULTS
# config options that are persisted to file
file: dict = {}
# runtime config not persisted anywhere
runtime: dict = CONFIG_RUNTIME_DEFAULTS
_profile_cache: dict[str, Profile]
def __init__(self, runtime_conf={}, file_conf_path: Optional[str] = None, file_conf_base: dict = {}):
"""init a stateholder, optionally loading `file_conf_path`"""
self.runtime.update(runtime_conf)
self.runtime['arch'] = os.uname().machine
self.file.update(file_conf_base)
if file_conf_path:
self.try_load_file(file_conf_path)
def try_load_file(self, config_file=None, base=CONFIG_DEFAULTS):
config_file = config_file or CONFIG_DEFAULT_PATH
self.runtime['config_file'] = config_file
self._profile_cache = None
try:
self.file = parse_file(config_file=config_file, base=base)
except Exception as ex:
self.file_state.exception = ex
self.file_state.load_finished = True
def is_loaded(self) -> bool:
return self.file_state.load_finished and self.file_state.exception is None
def enforce_config_loaded(self):
if not self.file_state.load_finished:
raise ConfigLoadException(Exception("Config file wasn't even parsed yet. This is probably a bug in kupferbootstrap :O"))
ex = self.file_state.exception
if ex:
if type(ex) == FileNotFoundError:
ex = Exception("File doesn't exist. Try running `kupferbootstrap config init` first?")
raise ex
def get_profile(self, name: Optional[str] = None) -> Profile:
name = name or self.file['profiles']['current']
self._profile_cache = resolve_profile(name=name, sparse_profiles=self.file['profiles'], resolved=self._profile_cache)
return self._profile_cache[name]
def get_path(self, path_name: str) -> str:
paths = self.file['paths']
return resolve_path_template(paths[path_name], paths)
def get_package_dir(self, arch: str):
return os.path.join(self.get_path('packages'), arch)
def dump(self) -> str:
"""dump toml representation of `self.file`"""
return dump_toml(self.file)
def write(self, path=None):
"""write toml representation of `self.file` to `path`"""
if path is None:
path = self.runtime['config_file']
os.makedirs(os.path.dirname(path), exist_ok=True)
dump_file(path, self.file)
logging.info(f'Created config file at {path}')
def invalidate_profile_cache(self):
"""Clear the profile cache (usually after modification)"""
self._profile_cache = None
def update(self, config_fragment: dict[str, dict], warn_missing_defaultprofile: bool = True) -> bool:
"""Update `self.file` with `config_fragment`. Returns `True` if the config was changed"""
merged = merge_configs(config_fragment, conf_base=self.file, warn_missing_defaultprofile=warn_missing_defaultprofile)
changed = self.file != merged
self.file = merged
if changed and 'profiles' in config_fragment and self.file['profiles'] != config_fragment['profiles']:
self.invalidate_profile_cache()
return changed
def update_profile(self, name: str, profile: Profile, merge: bool = False, create: bool = True, prune: bool = True):
new = {}
if name not in self.file['profiles']:
if not create:
raise Exception(f'Unknown profile: {name}')
else:
if merge:
new = deepcopy(self.file['profiles'][name])
logging.debug(f'new: {new}')
logging.debug(f'profile: {profile}')
new |= profile
if prune:
new = {key: val for key, val in new.items() if val is not None}
self.file['profiles'][name] = new
self.invalidate_profile_cache()
def list_to_comma_str(str_list: list[str], default='') -> str:
if str_list is None:
return default
return ','.join(str_list)
def comma_str_to_list(s: str, default=None) -> list[str]:
if not s:
return default
return [a for a in s.split(',') if a]
def prompt_config(
text: str,
default: Any,
field_type: type = str,
bold: bool = True,
echo_changes: bool = True,
) -> tuple[Any, bool]:
"""
prompts for a new value for a config key. returns the result and a boolean that indicates
whether the result is different, considering empty strings and None equal to each other.
"""
def true_or_zero(to_check) -> bool:
"""returns true if the value is truthy or int(0)"""
zero = 0 # compiler complains about 'is with literal' otherwise
return to_check or to_check is zero # can't do == due to boolean<->int casting
if type(None) == field_type:
field_type = str
if field_type == dict:
raise Exception('Dictionaries not supported by config_prompt, this is likely a bug in kupferbootstrap')
elif field_type == list:
default = list_to_comma_str(default)
value_conv = comma_str_to_list
else:
value_conv = None
default = '' if default is None else default
if bold:
text = click.style(text, bold=True)
result = click.prompt(text, type=field_type, default=default, value_proc=value_conv, show_default=True)
changed = (result != default) and (true_or_zero(default) or true_or_zero(result))
if changed and echo_changes:
print(f'value changed: "{text}" = "{result}"')
return result, changed
def prompt_profile(name: str, create: bool = True, defaults: Profile = {}) -> tuple[Profile, bool]:
"""Prompts the user for every field in `defaults`. Set values to None for an empty profile."""
profile: Any = PROFILE_EMPTY | defaults
# don't use get_profile() here because we need the sparse profile
if name in config.file['profiles']:
profile |= config.file['profiles'][name]
elif create:
logging.info(f"Profile {name} doesn't exist yet, creating new profile.")
else:
raise Exception(f'Unknown profile "{name}"')
logging.info(f'Configuring profile "{name}"')
changed = False
for key, current in profile.items():
current = profile[key]
text = f'{name}.{key}'
result, _changed = prompt_config(text=text, default=current, field_type=type(PROFILE_DEFAULTS[key])) # type: ignore
if _changed:
profile[key] = result
changed = True
return profile, changed
def config_dot_name_get(name: str, config: dict[str, Any], prefix: str = '') -> Any:
if not isinstance(config, dict):
raise Exception(f"Couldn't resolve config name: passed config is not a dict: {repr(config)}")
split_name = name.split('.')
name = split_name[0]
if name not in config:
raise Exception(f"Couldn't resolve config name: key {prefix + name} not found")
value = config[name]
if len(split_name) == 1:
return value
else:
rest_name = '.'.join(split_name[1:])
return config_dot_name_get(name=rest_name, config=value, prefix=prefix + name + '.')
def config_dot_name_set(name: str, value: Any, config: dict[str, Any]):
split_name = name.split('.')
if len(split_name) > 1:
config = config_dot_name_get('.'.join(split_name[:-1]), config)
config[split_name[-1]] = value
def prompt_for_save(retry_ctx: Optional[click.Context] = None):
"""
Prompt whether to save the config file. If no is answered, `False` is returned.
If `retry_ctx` is passed, the context's command will be reexecuted with the same arguments if the user chooses to retry.
False will still be returned as the retry is expected to either save, perform another retry or arbort.
"""
if click.confirm(f'Do you want to save your changes to {config.runtime["config_file"]}?', default=True):
return True
if retry_ctx:
if click.confirm('Retry? ("n" to quit without saving)', default=True):
retry_ctx.forward(retry_ctx.command)
return False
config = ConfigStateHolder(file_conf_base=CONFIG_DEFAULTS)
config_option = click.option(
'-C',
'--config',
'config_file',
help='Override path to config file',
)
@click.group(name='config')
def cmd_config():
"""Manage the configuration and -profiles"""
noninteractive_flag = click.option('-N', '--non-interactive', is_flag=True)
noop_flag = click.option('--noop', '-n', help="Don't write changes to file", is_flag=True)
@cmd_config.command(name='init')
@noninteractive_flag
@noop_flag
@click.option(
'--sections',
'-s',
multiple=True,
type=click.Choice(CONFIG_SECTIONS),
default=CONFIG_SECTIONS,
show_choices=True,
)
@click.pass_context
def cmd_config_init(ctx, sections: list[str] = CONFIG_SECTIONS, non_interactive: bool = False, noop: bool = False):
"""Initialize the config file"""
if not non_interactive:
results: dict[str, dict] = {}
for section in sections:
if section not in CONFIG_SECTIONS:
raise Exception(f'Unknown section: {section}')
if section == 'profiles':
continue
results[section] = {}
for key, current in config.file[section].items():
text = f'{section}.{key}'
result, changed = prompt_config(text=text, default=current, field_type=type(CONFIG_DEFAULTS[section][key]))
if changed:
results[section][key] = result
config.update(results)
if 'profiles' in sections:
current_profile = 'default' if 'current' not in config.file['profiles'] else config.file['profiles']['current']
new_current, _ = prompt_config('profile.current', default=current_profile, field_type=str)
profile, changed = prompt_profile(new_current, create=True)
config.update_profile(new_current, profile)
if not noop:
if not prompt_for_save(ctx):
return
if not noop:
config.write()
else:
logging.info(f'--noop passed, not writing to {config.runtime["config_file"]}!')
@cmd_config.command(name='set')
@noninteractive_flag
@noop_flag
@click.argument('key_vals', nargs=-1)
@click.pass_context
def cmd_config_set(ctx, key_vals: list[str], non_interactive: bool = False, noop: bool = False):
"""
Set config entries. Pass entries as `key=value` pairs, with keys as dot-separated identifiers,
like `build.clean_mode=false` or alternatively just keys to get prompted if run interactively.
"""
config.enforce_config_loaded()
config_copy = deepcopy(config.file)
for pair in key_vals:
split_pair = pair.split('=')
if len(split_pair) == 2:
key: str = split_pair[0]
value: Any = split_pair[1]
value_type = type(config_dot_name_get(key, CONFIG_DEFAULTS))
if value_type != list:
value = click.types.convert_type(value_type)(value)
else:
value = comma_str_to_list(value, default=[])
elif len(split_pair) == 1 and not non_interactive:
key = split_pair[0]
value_type = type(config_dot_name_get(key, CONFIG_DEFAULTS))
current = config_dot_name_get(key, config.file)
value, _ = prompt_config(text=key, default=current, field_type=value_type, echo_changes=False)
else:
raise Exception(f'Invalid key=value pair "{pair}"')
print('%s = %s' % (key, value))
config_dot_name_set(key, value, config_copy)
if merge_configs(config_copy, warn_missing_defaultprofile=False) != config_copy:
raise Exception('Config "{key}" = "{value}" failed to evaluate')
if not noop:
if not non_interactive and not prompt_for_save(ctx):
return
config.update(config_copy)
config.write()
@cmd_config.command(name='get')
@click.argument('keys', nargs=-1)
def cmd_config_get(keys: list[str]):
"""Get config entries.
Get entries for keys passed as dot-separated identifiers, like `build.clean_mode`"""
if len(keys) == 1:
print(config_dot_name_get(keys[0], config.file))
return
for key in keys:
print('%s = %s' % (key, config_dot_name_get(key, config.file)))
@cmd_config.group(name='profile')
def cmd_profile():
"""Manage config profiles"""
@cmd_profile.command(name='init')
@noninteractive_flag
@noop_flag
@click.argument('name', required=True)
@click.pass_context
def cmd_profile_init(ctx, name: str, non_interactive: bool = False, noop: bool = False):
"""Create or edit a profile"""
profile = deepcopy(PROFILE_EMPTY)
if name in config.file['profiles']:
profile |= config.file['profiles'][name]
if not non_interactive:
profile, _changed = prompt_profile(name, create=True)
config.update_profile(name, profile)
if not noop:
if not prompt_for_save(ctx):
return
config.write()
else:
logging.info(f'--noop passed, not writing to {config.runtime["config_file"]}!')

View File

@@ -1,179 +0,0 @@
from typing_extensions import TypeAlias
from typing import TypedDict
FASTBOOT = 'fastboot'
FLASH_PARTS = {
'ROOTFS': 'rootfs',
'ABOOT': 'aboot',
'LK2ND': 'lk2nd',
'QHYPSTUB': 'qhypstub',
}
EMMC = 'emmc'
MICROSD = 'microsd'
LOCATIONS = [EMMC, MICROSD]
JUMPDRIVE = 'jumpdrive'
JUMPDRIVE_VERSION = '0.8'
BOOT_STRATEGIES: dict[str, str] = {
'oneplus-enchilada': FASTBOOT,
'oneplus-fajita': FASTBOOT,
'xiaomi-beryllium-ebbg': FASTBOOT,
'xiaomi-beryllium-tianma': FASTBOOT,
'bq-paella': FASTBOOT,
}
DEVICES: dict[str, list[str]] = {
'oneplus-enchilada': ['device-sdm845-oneplus-enchilada'],
'oneplus-fajita': ['device-sdm845-oneplus-fajita'],
'xiaomi-beryllium-ebbg': ['device-sdm845-xiaomi-beryllium-ebbg'],
'xiaomi-beryllium-tianma': ['device-sdm845-xiaomi-beryllium-tianma'],
'bq-paella': ['device-msm8916-bq-paella'],
}
BASE_PACKAGES: list[str] = [
'base',
'base-kupfer',
'nano',
'vim',
]
class Flavour(TypedDict, total=False):
packages: list[str]
post_cmds: list[str]
size: int
FLAVOURS: dict[str, Flavour] = {
'barebone': {
'packages': [],
},
'debug-shell': {
'packages': ['hook-debug-shell'],
},
'gnome': {
'packages': ['gnome', 'archlinux-appstream-data', 'gnome-software-packagekit-plugin'],
'post_cmds': ['systemctl enable gdm'],
'size': 8,
},
'phosh': {
'packages': [
'phosh',
'phosh-osk-stub', # temporary replacement for 'squeekboard',
'gnome-control-center',
'gnome-software',
'gnome-software-packagekit-plugin',
'archlinux-appstream-data',
'gnome-initial-setup',
'kgx',
'iio-sensor-proxy',
],
'post_cmds': ['systemctl enable phosh'],
'size': 5,
}
}
REPOSITORIES = [
'boot',
'cross',
'device',
'firmware',
'linux',
'main',
'phosh',
]
DEFAULT_PACKAGE_BRANCH = 'dev'
KUPFER_HTTPS = 'https://gitlab.com/kupfer/packages/prebuilts/-/raw/%branch%/$arch/$repo'
Arch: TypeAlias = str
ARCHES = [
'x86_64',
'aarch64',
]
DistroArch: TypeAlias = Arch
TargetArch: TypeAlias = Arch
BASE_DISTROS: dict[DistroArch, dict[str, dict[str, str]]] = {
'x86_64': {
'repos': {
'core': 'http://ftp.halifax.rwth-aachen.de/archlinux/$repo/os/$arch',
'extra': 'http://ftp.halifax.rwth-aachen.de/archlinux/$repo/os/$arch',
'community': 'http://ftp.halifax.rwth-aachen.de/archlinux/$repo/os/$arch',
},
},
'aarch64': {
'repos': {
'core': 'http://mirror.archlinuxarm.org/$arch/$repo',
'extra': 'http://mirror.archlinuxarm.org/$arch/$repo',
'community': 'http://mirror.archlinuxarm.org/$arch/$repo',
'alarm': 'http://mirror.archlinuxarm.org/$arch/$repo',
'aur': 'http://mirror.archlinuxarm.org/$arch/$repo',
},
},
}
COMPILE_ARCHES: dict[Arch, str] = {
'x86_64': 'amd64',
'aarch64': 'arm64',
}
GCC_HOSTSPECS: dict[DistroArch, dict[TargetArch, str]] = {
'x86_64': {
'x86_64': 'x86_64-pc-linux-gnu',
'aarch64': 'aarch64-linux-gnu',
},
'aarch64': {
'aarch64': 'aarch64-unknown-linux-gnu',
}
}
CFLAGS_GENERAL = ['-O2', '-pipe', '-fstack-protector-strong']
CFLAGS_ARCHES: dict[Arch, list[str]] = {
'x86_64': ['-march=x86-64', '-mtune=generic'],
'aarch64': [
'-march=armv8-a',
'-fexceptions',
'-Wp,-D_FORTIFY_SOURCE=2',
'-Wformat',
'-Werror=format-security',
'-fstack-clash-protection',
]
}
QEMU_BINFMT_PKGS = ['qemu-user-static-bin', 'binfmt-qemu-static']
CROSSDIRECT_PKGS = ['crossdirect'] + QEMU_BINFMT_PKGS
SSH_DEFAULT_HOST = '172.16.42.1'
SSH_DEFAULT_PORT = 22
SSH_COMMON_OPTIONS = [
'-o',
'GlobalKnownHostsFile=/dev/null',
'-o',
'UserKnownHostsFile=/dev/null',
'-o',
'StrictHostKeyChecking=no',
]
CHROOT_PATHS = {
'chroots': '/chroot',
'jumpdrive': '/var/cache/jumpdrive',
'pacman': '/var/cache/pacman',
'packages': '/prebuilts',
'pkgbuilds': '/pkgbuilds',
'images': '/images',
}
WRAPPER_TYPES = [
'none',
'docker',
]
MAKEPKG_CMD = [
'makepkg',
'--noconfirm',
'--ignorearch',
'--needed',
]

View File

@@ -1,95 +0,0 @@
from typing import Optional, Mapping
from constants import Arch, ARCHES, BASE_DISTROS, REPOSITORIES, KUPFER_HTTPS, CHROOT_PATHS
from generator import generate_pacman_conf_body
from config import config
from .package import PackageInfo
from .repo import RepoInfo, Repo
class Distro:
repos: Mapping[str, Repo]
arch: str
def __init__(self, arch: Arch, repo_infos: dict[str, RepoInfo], scan=False):
assert (arch in ARCHES)
self.arch = arch
self.repos = dict[str, Repo]()
for repo_name, repo_info in repo_infos.items():
self.repos[repo_name] = Repo(
name=repo_name,
arch=arch,
url_template=repo_info.url_template,
options=repo_info.options,
scan=scan,
)
def get_packages(self) -> dict[str, PackageInfo]:
""" get packages from all repos, semantically overlaying them"""
results = dict[str, PackageInfo]()
for repo in list(self.repos.values())[::-1]:
assert repo.packages is not None
results.update(repo.packages)
return results
def repos_config_snippet(self, extra_repos: Mapping[str, RepoInfo] = {}) -> str:
extras = [Repo(name, url_template=info.url_template, arch=self.arch, options=info.options, scan=False) for name, info in extra_repos.items()]
return '\n\n'.join(repo.config_snippet() for repo in (extras + list(self.repos.values())))
def get_pacman_conf(self, extra_repos: Mapping[str, RepoInfo] = {}, check_space: bool = True):
body = generate_pacman_conf_body(self.arch, check_space=check_space)
return body + self.repos_config_snippet(extra_repos)
def scan(self, lazy=True):
for repo in self.repos.values():
if not (lazy and repo.scanned):
repo.scan()
def is_scanned(self):
for repo in self.repos.values():
if not repo.scanned:
return False
return True
def get_base_distro(arch: str) -> Distro:
repos = {name: RepoInfo(url_template=url) for name, url in BASE_DISTROS[arch]['repos'].items()}
return Distro(arch=arch, repo_infos=repos, scan=False)
def get_kupfer(arch: str, url_template: str, scan: bool = False) -> Distro:
repos = {name: RepoInfo(url_template=url_template, options={'SigLevel': 'Never'}) for name in REPOSITORIES}
return Distro(
arch=arch,
repo_infos=repos,
scan=scan,
)
_kupfer_https = dict[Arch, Distro]()
_kupfer_local = dict[Arch, Distro]()
_kupfer_local_chroots = dict[Arch, Distro]()
def get_kupfer_https(arch: Arch, scan: bool = False) -> Distro:
global _kupfer_https
if arch not in _kupfer_https or not _kupfer_https[arch]:
_kupfer_https[arch] = get_kupfer(arch, KUPFER_HTTPS.replace('%branch%', config.file['pacman']['repo_branch']), scan)
item = _kupfer_https[arch]
if scan and not item.is_scanned():
item.scan()
return item
def get_kupfer_local(arch: Optional[Arch] = None, in_chroot: bool = True, scan: bool = False) -> Distro:
global _kupfer_local, _kupfer_local_chroots
cache = _kupfer_local_chroots if in_chroot else _kupfer_local
arch = arch or config.runtime['arch']
if arch not in cache or not cache[arch]:
dir = CHROOT_PATHS['packages'] if in_chroot else config.get_path('packages')
cache[arch] = get_kupfer(arch, f"file://{dir}/$arch/$repo")
item = cache[arch]
if scan and not item.is_scanned():
item.scan()
return item

View File

@@ -1,33 +0,0 @@
from typing import Optional
class PackageInfo:
name: str
version: str
filename: str
resolved_url: Optional[str]
def __init__(
self,
name: str,
version: str,
filename: str,
resolved_url: str = None,
):
self.name = name
self.version = version
self.filename = filename
self.resolved_url = resolved_url
def __repr__(self):
return f'{self.name}@{self.version}'
@staticmethod
def parse_desc(desc_str: str, resolved_url=None):
"""Parses a desc file, returning a PackageInfo"""
pruned_lines = ([line.strip() for line in desc_str.split('%') if line.strip()])
desc = {}
for key, value in zip(pruned_lines[0::2], pruned_lines[1::2]):
desc[key.strip()] = value.strip()
return PackageInfo(desc['NAME'], desc['VERSION'], desc['FILENAME'], resolved_url='/'.join([resolved_url, desc['FILENAME']]))

View File

@@ -1,75 +0,0 @@
from copy import deepcopy
import logging
import os
import tarfile
import tempfile
import urllib.request
from .package import PackageInfo
def resolve_url(url_template, repo_name: str, arch: str):
result = url_template
for template, replacement in {'$repo': repo_name, '$arch': arch}.items():
result = result.replace(template, replacement)
return result
class RepoInfo:
options: dict[str, str] = {}
url_template: str
def __init__(self, url_template: str, options: dict[str, str] = {}):
self.url_template = url_template
self.options.update(options)
class Repo(RepoInfo):
name: str
resolved_url: str
arch: str
packages: dict[str, PackageInfo]
remote: bool
scanned: bool = False
def resolve_url(self) -> str:
return resolve_url(self.url_template, repo_name=self.name, arch=self.arch)
def scan(self):
self.resolved_url = self.resolve_url()
self.remote = not self.resolved_url.startswith('file://')
uri = f'{self.resolved_url}/{self.name}.db'
path = ''
if self.remote:
logging.info(f'Downloading repo file from {uri}')
with urllib.request.urlopen(uri) as request:
fd, path = tempfile.mkstemp()
with open(fd, 'wb') as writable:
writable.write(request.read())
else:
path = uri.split('file://')[1]
logging.debug(f'Parsing repo file at {path}')
with tarfile.open(path) as index:
for node in index.getmembers():
if os.path.basename(node.name) == 'desc':
logging.debug(f'Parsing desc file for {os.path.dirname(node.name)}')
pkg = PackageInfo.parse_desc(index.extractfile(node).read().decode(), self.resolved_url)
self.packages[pkg.name] = pkg
self.scanned = True
def __init__(self, name: str, url_template: str, arch: str, options={}, scan=False):
self.packages = {}
self.name = name
self.url_template = url_template
self.arch = arch
self.options = deepcopy(options)
if scan:
self.scan()
def config_snippet(self) -> str:
options = {'Server': self.url_template} | self.options
return ('[%s]\n' % self.name) + '\n'.join([f"{key} = {value}" for key, value in options.items()])
def get_RepoInfo(self):
return RepoInfo(url_template=self.url_template, options=self.options)

1
docker_version.txt Normal file
View File

@@ -0,0 +1 @@
BUILD

4
docs/.gitignore vendored
View File

@@ -2,3 +2,7 @@
.doctrees
html
source/cli
source/code
checkouts
versions
archived

View File

@@ -1,16 +1,72 @@
buildargs := -b dirhtml -aE source html
buildargs := -b dirhtml -aE source
.PHONY: cleanbuild clean
.PHONY: cleanbuild clean serve serve_versions versions versions_git versions_index
.NOTINTERMEDIATE:
.PRECIOUS: versions/index.html versions/%/index.html checkouts/%/docs/html/index.html archived/%.tar.gz
BRANCHES := main dev
TAGS := $(shell git tag)
FILTERTED_TAGS := $(foreach tag,$(TAGS),$(shell if [[ -n "$$(git log --max-count=1 --oneline "$(tag)" -- .)" ]]; then echo "$(tag)"; fi))
VERSIONS := $(BRANCHES) $(FILTERTED_TAGS)
cleanbuild:
@make clean
@make html
@$(MAKE) clean
@$(MAKE) html
clean:
rm -rf html source/cli
rm -rf html source/cli source/code .buildinfo .doctrees versions checkouts
html:
sphinx-build $(buildargs)
sphinx-build $(SPHINXARGS) $(buildargs) html
serve: html
(cd html && python -m http.server 9999)
cd html && python -m http.server 9999
checkouts/%/docs/html/index.html:
@mkdir -p checkouts
@# use backslashed multi-line cmd because otherwise variables will be lost
@branch="$$(echo "$(@D)" | sed 's|^checkouts/||g;s|/docs/html$$||g')" && \
ref="$$branch" && \
if ! git log --max-count=1 --oneline "$$branch" >/dev/null 2>/dev/null ; then \
commit="$$(git ls-remote origin refs/{tags,heads}/"$$branch" | cut -f 1)" ; \
[[ -n "$$commit" ]] && echo "found commit $$commit for $$branch" >&2 && \
ref="$$commit" && git branch -f "$$branch" "$$ref" ; \
fi && \
[[ -n "$$(git log --max-count=1 --oneline "$$ref" -- .)" ]] || \
(echo "ERROR: branch '$$branch' seems to have no docs/ dir, checked ref '$$ref'" >&2 && exit 1) && \
checkout="checkouts/$$branch" && \
ver="$$(echo "$$branch" | sed 's|^v\([0-9]\)|\1|g')" && \
set -x && \
([[ -e "$$checkout/.git" ]] || git clone .. "$$checkout" ) && \
(! [[ -e "$$checkout/docs/source/conf.py" ]] || echo "version = '$$ver'" >> "$$checkout/docs/source/conf.py") && \
$(MAKE) -C "$$checkout/docs" SPHINXARGS="-D version=$$ver"
archived/%.tar.gz: checkouts/%/docs/html/index.html
mkdir -p archived
tar -C "checkouts/$*/docs/html" -czf "$@" .
versions/%/index.html: archived/%.tar.gz
@mkdir -p "$(@D)"
@echo "working on version '$*'"
tar -xf "archived/$*.tar.gz" -C "$(@D)"
@# ensure index file exists and update its timestamp for Make's dependency detection
[[ -e "$(@)" ]] && touch "$(@)"
versions/versions.css: versjon/versions.css
@mkdir -p versions
cp versjon/versions.css versions/
versions_git:
@$(MAKE) $(patsubst %, versions/%/index.html, $(VERSIONS))
versions/index.html: $(sort $(wildcard versions/*/index.html))
rm -rf versions/stable
@cd versions && set -x && versjon --stable-version main --user_templates ../versjon
@# ensure the global index.html exists and is newer than each version's index.html
[[ -e "$(@)" ]] && touch "$(@)"
versions: versions_git versions/versions.css
@$(MAKE) versions/index.html
serve_versions: versions/index.html
cd versions && python -m http.server 9888

View File

@@ -1,3 +1,5 @@
sphinx-click
myst-parser
# furo sphinx theme
furo
versjon<=2.3.0

18
docs/source/cli.md Normal file
View File

@@ -0,0 +1,18 @@
# CLI Interface
```{eval-rst}
.. click:: kupferbootstrap.main:cli
:nested: none
:prog: kupferbootstrap
```
## Commands
% generated by cmd.rst
```{toctree}
:glob: true
cli/*
```

View File

@@ -1,17 +0,0 @@
#############
CLI Interface
#############
.. click:: main:cli
:nested: none
:prog: kupferbootstrap
Commands
========
.. generated by cmd.rst
.. toctree::
:glob:
cli/*

View File

@@ -1,21 +1,24 @@
:orphan:
:nosearch:
---
nosearch: true
orphan: true
---
only used to trigger builds of the submodule docs!
```{eval-rst}
.. currentmodule:: kupferbootstrap
.. autosummary::
:toctree: cli
:template: command.rst
:recursive:
boot
binfmt
cache
chroot
config
flash
forwarding
devices
flavours
image
net
packages
ssh
telnet
```

9
docs/source/code.md Normal file
View File

@@ -0,0 +1,9 @@
# Code
Code documentation is available here
```{toctree}
:glob: true
code/kupferbootstrap
```

8
docs/source/codegen.rst Normal file
View File

@@ -0,0 +1,8 @@
:nosearch:
:orphan:
.. autosummary::
:toctree: code
:recursive:
kupferbootstrap

View File

@@ -1,11 +1,17 @@
import logging
import os
import sys
from sphinx.config import getenv
from kupferbootstrap.utils import git
sys.path.insert(0, os.path.abspath('../..'))
#sys.path.insert(0, os.path.abspath('../..'))
extensions = [
'sphinx_click',
'sphinx.ext.autosummary', # Create neat summary tables
"sphinx.ext.autodoc",
'sphinx.ext.autosummary',
"sphinx.ext.linkcode",
'myst_parser'
]
myst_all_links_external = True
templates_path = ['templates']
project = 'Kupfer👢strap'
html_title = 'Kupferbootstrap'
@@ -27,4 +33,45 @@ html_theme_options = {
"color-brand-content": "#eba38d",
"color-problematic": "#ff7564",
},
"source_repository": "https://gitlab.com/kupfer/kupferbootstrap",
"source_directory": "docs/source/",
}
autosummary_generate = True
autodoc_default_options = {
"members": True,
"undoc-members": True,
"show-inheritance": True,
"inherited-members": True,
}
autodoc_preserve_defaults = True
def get_version():
try:
res = git(
["rev-parse", "HEAD"],
dir=os.path.join(os.path.dirname(__file__), "../.."),
use_git_dir=True,
capture_output=True,
)
res.check_returncode()
ver = res.stdout.decode().strip()
logging.info(f"Detected git {ver=}")
return ver
except Exception as ex:
logging.warning("Couldn't get git branch:", exc_info=ex)
return "HEAD"
version = getenv("version") or get_version()
def linkcode_resolve(domain, info):
if domain != 'py':
return None
if not info['module']:
return None
filename = info['module'].replace('.', '/')
return "%s/-/blob/%s/src/%s.py" % (html_theme_options["source_repository"], version, filename)

View File

@@ -1,134 +0,0 @@
#############
Configuration
#############
Kupferbootstrap uses `toml <https://en.wikipedia.org/wiki/TOML>`_ for its configuration file.
The file can either be edited manually or managed via the :doc:`cli/config` subcommand.
You can quickly generate a default config by running :code:`kupferbootstrap config init -N`.
File Location
#############
The configuration is stored in ``~/.config/kupfer/kupferbootstrap.toml``, where ``~`` is your user's home folder.
Kupferbootstrap needs to create a number of folders, e.g. to download ``PKGBUILDs.git`` and store binary packages.
By default, all of those folders live inside ``~/.cache/kupfer/``.
See also the ``[paths]`` section in your config.
Sections
########
A config file is split into sections like so:
.. code-block:: toml
[pkgbuilds]
git_repo = "https://gitlab.com/kupfer/packages/pkgbuilds.git"
git_branch = "dev"
[pacman]
parallel_downloads = 3
Here, we have two sections: ``pkgbuilds`` and ``pacman``.
Flavours
########
Flavours are preset collections of software and functionality to enable,
i.e. desktop environments like `Gnome <https://en.wikipedia.org/wiki/GNOME>`_
and `Phosh <https://en.wikipedia.org/wiki/Phosh>`_.
Profiles
########
The last section and currently the only one with subsections is the ``profiles`` section.
A profile is the configuration of a specific device image. It specifies (amongst others):
* the device model
* the flavour (desktop environment)
* the host- and user name
* extra packages to install
Using a profile's ``parent`` key,
you can inherit settings from another profile.
This allows you to easily keep a number of slight variations of the same target profile around
without the need to constantly modify your Kupferbootstrap configuration file.
You can easily create new profiles with
`kupferbootstrap config profile init <../cli/config/#kupferbootstrap-config-profile-init>`_.
Here's an example:
.. code:: toml
[profiles]
current = "graphical"
[profiles.default]
parent = ""
device = "oneplus-enchilada"
flavour = "barebone"
pkgs_include = [ "wget", "rsync", "nano", "tmux", "zsh", "pv", ]
pkgs_exclude = []
hostname = "kupferphone"
username = "prawn"
size_extra_mb = 800
[profiles.graphical]
parent = "default"
flavour = "phosh"
pkgs_include = [ "firefox", "tilix", "gnome-tweaks" ]
size_extra_mb = "+3000"
[profiles.hades]
parent = "graphical"
flavour = "phosh"
hostname = "hades"
[profiles.recovery]
parent = "default"
flavour = "debug-shell"
[profiles.beryllium]
parent = "graphical"
device = "xiaomi-beryllium-ebbg"
flavour = "gnome"
hostname = "pocof1"
The ``current`` key in the ``profiles`` section controlls which profile gets used by Kupferbootstrap by default.
The first subsection (``profiles.default``) describes the `default` profile
which gets created by `config init <../cli/config/#kupferbootstrap-config-init>`_.
Next, we have a `graphical` profile that defines a couple of graphical programs for all but the `recovery` profile,
since that doesn't have a GUI.
``size_extra_mb``
-----------------
Note how ``size_extra_mb`` can either be a plain integer (``800``) or a string,
optionally leading with a plus sign (``+3000``),
which instructs Kupferbootstrap to add the value to the parent profile's ``size_extra_mb``.
``pkgs_include`` / ``pkgs_exclude``
-----------------------------------
Like ``size_extra_mb``, ``pkgs_include`` will be merged with the parent profile's ``pkgs_include``.
To exclude unwanted packages from being inherited from a parent profile, use ``pkgs_exclude`` in the child profile.
.. hint::
``pkgs_exclude`` has no influence on Pacman's dependency resolution.
It only blocks packages during image build that would usually be explicitly installed
due to being listed in a parent profile or the selected flavour.

2
docs/source/genindex.rst Normal file
View File

@@ -0,0 +1,2 @@
Module Index
============

13
docs/source/index.md Normal file
View File

@@ -0,0 +1,13 @@
# Kupferbootstrap Documentation
This is the documentation for [Kupferbootstrap](https://gitlab.com/kupfer/kupferbootstrap),
a tool to build and flash packages and images for the [Kupfer](https://gitlab.com/kupfer/) mobile Linux distro.
## Documentation pages
```{toctree}
usage/index
cli
code
genindex
```

View File

@@ -1,16 +0,0 @@
#############################
Kupferbootstrap Documentation
#############################
This is the documentation for `Kupferbootstrap <https://gitlab.com/kupfer/kupferbootstrap>`_,
a tool to build and flash packages and images for the `Kupfer <https://gitlab.com/kupfer/>`_ mobile Linux distro.
Documentation pages
===================
.. toctree::
install
config
cli

View File

@@ -1,35 +0,0 @@
############
Installation
############
#.
Install Python 3, Docker, and git.
On Arch: ``pacman -S python docker git --needed --noconfirm``
.. Hint::
After installing Docker you will have to add your user to the ``docker`` group:
``sudo usermod -aG docker "$(whoami)"``
Then restart your desktop session for the new group to take effect.
#. Pick which Kupferbootstrap branch to clone: usually either ``main`` or ``dev``
#. Clone the repository: ``git clone -b INSERT_BRANCHNAME_HERE https://gitlab.com/kupfer/kupferbootstrap``
#. Change into the folder: ``cd kupferbootstrap``
#.
Install python dependencies: ``pip3 install -r requirements.txt``
.. Note::
Most of our python dependencies are available as distro packages on most distros,
sadly it's incomplete on Arch.
See ``requirements.txt`` for the list of required python packages.
#. Symlink ``kupferbootstrap`` into your ``$PATH``: ``sudo ln -s "$(pwd)/bin/kupferbootstrap" /usr/local/bin/``
#. You should now be able to run ``kupferbootstrap --help``!

View File

@@ -0,0 +1,36 @@
{% set reduced_name = fullname.split(".", 1)[-1] if fullname.startswith("kupferbootstrap.") else fullname %}
{{ fullname | escape | underline }}
.. rubric:: Description
.. automodule:: {{ fullname }}
:members:
:undoc-members:
.. currentmodule:: {{ fullname }}
{% if classes %}
.. rubric:: Classes
.. autosummary::
:toctree: .
{% for class in classes %}
{{ class }}
{% endfor %}
{% endif %}
{% if functions %}
.. rubric:: Functions
.. autosummary::
:toctree: .
{% for function in functions %}
{{ function }}
{% endfor %}
{% endif %}

View File

@@ -1,5 +1,9 @@
.. title: {{fullname}}
{% set reduced_name = fullname.split(".", 1)[-1] if fullname.startswith("kupferbootstrap.") else fullname %}
.. title: {{reduced_name}}
.. click:: {% if fullname == 'main' %}main:cli{% else %}{{fullname}}:cmd_{{fullname}}{% endif %}
:prog: kupferbootstrap {{fullname}}
.. currentmodule:: {{ fullname }}
.. click:: {% if fullname == 'main' %}kupferbootstrap.main:cli{% else %}{{fullname}}.cli:cmd_{{reduced_name}}{% endif %}
:prog: kupferbootstrap {{reduced_name}}
:nested: full

125
docs/source/usage/config.md Normal file
View File

@@ -0,0 +1,125 @@
# Configuration
Kupferbootstrap uses [toml](https://en.wikipedia.org/wiki/TOML) for its configuration file.
The file can either be edited manually or managed via the [`kupferbootstrap config`](../../cli/config) subcommand.
```{hint}
You can quickly generate a default config by running {code}`kupferbootstrap config init -N`.
For an interactive dialogue, omit the `-N`.
```
## File Location
The configuration is stored in `~/.config/kupfer/kupferbootstrap.toml`, where `~` is your user's home folder.
Kupferbootstrap needs to create a number of folders, e.g. to download `PKGBUILDs.git` and store binary packages.
By default, all of those folders live inside `~/.cache/kupfer/`.
See also the `[paths]` section in your config.
## Sections
A config file is split into sections like so:
```toml
[pkgbuilds]
git_repo = "https://gitlab.com/kupfer/packages/pkgbuilds.git"
git_branch = "dev"
[pacman]
parallel_downloads = 3
```
Here, we have two sections: `pkgbuilds` and `pacman`.
## Flavours
Flavours are preset collections of software and functionality to enable,
i.e. desktop environments like [Gnome](https://en.wikipedia.org/wiki/GNOME)
and [Phosh](https://en.wikipedia.org/wiki/Phosh).
## Profiles
The last section and currently the only one with subsections is the `profiles` section.
A profile is the configuration of a specific device image. It specifies (amongst others):
- the device model
- the flavour (desktop environment)
- the host- and user name
- extra packages to install
Using a profile's `parent` key,
you can inherit settings from another profile.
This allows you to easily keep a number of slight variations of the same target profile around
without the need to constantly modify your Kupferbootstrap configuration file.
You can easily create new profiles with
[kupferbootstrap config profile init](../../cli/config/#kupferbootstrap-config-profile-init).
Here's an example:
```toml
[profiles]
current = "graphical"
[profiles.default]
parent = ""
device = "sdm845-oneplus-enchilada"
flavour = "barebone"
pkgs_include = [ "wget", "rsync", "nano", "tmux", "zsh", "pv", ]
pkgs_exclude = []
hostname = "kupferphone"
username = "prawn"
size_extra_mb = 800
[profiles.graphical]
parent = "default"
flavour = "phosh"
pkgs_include = [ "firefox", "tilix", "gnome-tweaks" ]
size_extra_mb = "+3000"
[profiles.hades]
parent = "graphical"
flavour = "phosh"
hostname = "hades"
[profiles.recovery]
parent = "default"
flavour = "debug-shell"
[profiles.beryllium]
parent = "graphical"
device = "sdm845-xiaomi-beryllium-ebbg"
flavour = "gnome"
hostname = "pocof1"
```
The `current` key in the `profiles` section controlls which profile gets used by Kupferbootstrap by default.
The first subsection (`profiles.default`) describes the `default` profile
which gets created by [`kupferbootstrap config init`](../../cli/config/#kupferbootstrap-config-init).
Next, we have a `graphical` profile that defines a couple of graphical programs for all but the `recovery` profile,
since that doesn't have a GUI.
### `size_extra_mb`
Note how `size_extra_mb` can either be a plain integer (`800`) or a string,
optionally leading with a plus sign (`+3000`),
which instructs Kupferbootstrap to add the value to the parent profile's `size_extra_mb`.
### `pkgs_include` / `pkgs_exclude`
Like `size_extra_mb`, `pkgs_include` will be merged with the parent profile's `pkgs_include`.
To exclude unwanted packages from being inherited from a parent profile, use `pkgs_exclude` in the child profile.
```{hint}
`pkgs_exclude` has no influence on Pacman's dependency resolution.
It only blocks packages during image build that would usually be explicitly installed
due to being listed in a parent profile or the selected flavour.
```

39
docs/source/usage/faq.md Normal file
View File

@@ -0,0 +1,39 @@
# FAQ
```{contents} Table of Contents
:class: this-will-duplicate-information-and-it-is-still-useful-here
:depth: 3
```
## Which devices are currently supported?
Currently very few!
See [the `devices` repo](https://gitlab.com/kupfer/packages/pkgbuilds/-/tree/dev/device). We use the same codenames as [postmarketOS](https://wiki.postmarketos.org/wiki/Devices) (although we prefix them with the SoC)
## How to port a new device or package?
See [Porting](../porting)
## How to build a specific package
See also: The full [`kupferbootstrap packages build` docs](../../cli/packages#kupferbootstrap-packages-build)
### Example
For rebuilding `kupfer-config` and `crossdirect`, defaulting to your device's architecture
```sh
kupferbootstrap packages build [--force] [--arch $target_arch] kupfer-config crossdirect
```
### By package path
You can also use the a path snippet (`$repo/$pkgbase`) to the PKGBUILD folder as seen inside your pkgbuilds.git:
```sh
kupferbootstrap packages build [--force] main/kupfer-config cross/crossdirect
```

View File

@@ -0,0 +1,9 @@
# Usage
```{toctree}
quickstart
faq
install
config
porting
```

View File

@@ -0,0 +1,32 @@
# Installation
1. Install Python 3, Docker, and git.
On Arch: `pacman -S python docker git --needed --noconfirm`
```{Hint}
After installing Docker you will have to add your user to the `docker` group:
`sudo usermod -aG docker "$(whoami)"`
Then restart your desktop session for the new group to take effect.
```
2. Pick which Kupferbootstrap branch to clone: usually either `main` or `dev`
3. Clone the repository: `git clone -b INSERT_BRANCHNAME_HERE https://gitlab.com/kupfer/kupferbootstrap`
4. Change into the folder: `cd kupferbootstrap`
5. Install python dependencies: `pip3 install -r requirements.txt`
```{Note}
Most of our python dependencies are available as distro packages on most distros,
sadly it's incomplete on Arch.
See `requirements.txt` for the list of required python packages.
```
6. Symlink `kupferbootstrap` into your `$PATH`: `sudo ln -s "$(pwd)/bin/kupferbootstrap" /usr/local/bin/`
7. You should now be able to run `kupferbootstrap --help`!

View File

@@ -0,0 +1,94 @@
# Porting
## Porting devices
### Homework
Before you can get started porting a device, you'll need to do some research:
1. Familiarize yourself with git basics.
1. Familiarize yourself with Arch Linux packaging, i.e. `PKGBUILD`s and `makepkg`
1. Familiarize yourself with the postmarketOS port of the device.
```{warning}
If there is no postmarketOS port yet, you'll probably need to get deep into kernel development.
We suggest [starting with a port to pmOS](https://wiki.postmarketos.org/wiki/Porting_to_a_new_device) then, especially if you're not familiar with the process already.
```
### Porting
1. Navigate to your pkgbuilds checkout
1. Follow the [general package porting guidelines](#porting-packages) to create a device-, kernel- and probably also a firmware-package for the device and SoC. Usually this roughly means porting the postmarketOS APKBUILDs to our PKGBUILD scheme.
You can get inspiration by comparing existing Kupfer ports (e.g. one of the SDM845 devices) to the [postmarketOS packages](https://gitlab.com/postmarketOS/pmaports/-/tree/master/device) for that device.
Usually you should start out by copying and then customizing the Kupfer packages for a device that's as similar to yours as possible, i.e. uses the same or a related SoC, if something like that is already available in Kupfer.
```{hint} Package Repos:
Device packages belong into `device/`, kernels into `linux/` and firmware into `firmware/`.
```
1. When submitting your MR, please include some information:
- what you have found to be working, broken, and not tested (and why)
- any necessary instructions for testing
- whether you'd be willing to maintain the device long-term (test kernel upgrades, submit device package updates, etc.)
### Gotchas
Please be aware of these gotchas:
- As of now, Kupfer only really supports platforms using Android's `aboot` bootloader, i.e. ex-Android phones. In order to support other boot modes (e.g. uboot on the Librem5 and Pine devices), we'll need to port and switch to postmarketOS's [boot-deploy](https://gitlab.com/postmarketOS/boot-deploy) first and add support for EFI setups to Kupferbootstrap.
## Porting packages
### Homework
Before you can get started, you'll need to do some research:
1. Familiarize yourself with git basics.
1. Familiarize yourself with Arch Linux packaging, i.e. `PKGBUILD`s and `makepkg`
### Development
```{warning}
Throughout the process, use git to version your changes.
- Don't procrastinate using git or committing until you're "done" or "have got something working", you'll regret it.
- Don't worry about a "clean" git history while you're developing; we can squash it up later.
- \[Force-]Push your changes regularly, just like committing. Don't wait for perfection.
```
1. Create a new git branch for your package locally.
```{hint}
It might be a good ideaa to get into the habit of prefixing branch names with \[a part of] your username and a slash like so:
`myNickname/myFeatureNme`
This makes it easier to work in the same remote repo with multiple people.
```
1.
```{note}
The pkgbuilds git repo contains multiple package repositories, represented by folders at the top level (`main`, `cross`, `phosh`, etc.).
```
Try to choose a sensible package repo for your new packages and create new folders for each `pkgbase` inside the repo folder.
1. Navigate into the folder of the new package and create a new `PKGBUILD`; fill it with life!
1. **`_mode`**: Add the build mode at the top of the PKGBUILD.
```{hint}
If you're unsure what to pick, go with `_mode=host`. It'll use `crossdirect` to get speeds close to proper cross-compiling.
```
This determines whether it's built using a foreign-arch chroot (`_mode=host`) executed with qemu-user, or using real cross-compilation (`_mode=cross`) from a host-architecture chroot, but the package's build tooling has to specifically support the latter, so it's mostly useful for kernels and uncompiled packages.
1. **`_nodeps`**: (Optional) If your package doesn't require its listed dependencies to build
(usually because you're packaging a meta-package or only configs or scripts)
you can add `_nodeps=true` as the next line after the `_mode=` line to speed up packaging.
`makedeps` are still installed anyway.
1. Test building it with `kupferbootstrap packages build $pkgbname`
1. For any files and git repos downloaded by your PKGBUILD,
add them to a new `.gitignore` file in the same directory as your `PKGBUILD`.
```{hint}
Don't forget to `git add` the new `.gitignore` file!
```
1. Run `kupferbootstrap packages check` to make sure the formatting for your PKGBUILDs is okay.
```{warning}
This is **not** optional. MRs with failing CI will **not** be merged.
```
### Pushing
1. Fork the Kupfer pkgbuilds repo on Gitlab using the Fork button
1. Add your fork's **SSH** URI to your local git repo as a **new remote**: `git remote add fork git@gitlab...`
1. `git push -u fork $branchname` it
### Submitting the MR
When you're ready, open a Merge Request on the Kupfer pkgbuilds repo.
```{hint}
Prefix the MR title with `Draft: ` to indicate a Work In Progress state.
```

View File

@@ -0,0 +1,9 @@
# Quickstart
1. [Install](../install) Kupferbootstrap
1. [Configure](../config) it: `kuperbootstrap config init`
1. [Update your PKGBUILDs + SRCINFO cache](../../cli/packages#kupferbootstrap-packages-update): `kupferbootstrap packages update`
1. [Build an image](../../cli/image#kupferbootstrap-image-build): `kupferbootstrap image build`
1. [Flash the image](../../cli/image#kupferbootstrap-image-flash): `kupferbootstrap image flash abootimg && kupferbootstrap image flash full userdata`
See also: [Frequently Asked Questions](../faq)

58
docs/versjon/footer.html Normal file
View File

@@ -0,0 +1,58 @@
{# FORMAT_VERSION #}
{% macro format_version(version) %}
{% if page in version.html_files %}
{% set version_path = page_root + docs_path[version.name] + "/" + page %}
{% else %}
{% set version_path = page_root + docs_path[version.name] %}
{% endif %}
{% if current == version.name %}
<strong>
<dd><a href="{{ version_path }}">{{ version.name }}</a></dd>
</strong>
{% else %}
<dd><a href="{{ version_path }}">{{ version.name }}</a></dd>
{% endif %}
{% endmacro %}
<div id="versjon-overlay">
<button type="button" class="versjon">
<svg xmlns="http://www.w3.org/2000/svg" id="branch-icon" class="ionicon" viewBox="0 0 512 512">
<!-- Taken from Ionic, MIT licensed. Copyright (c) 2015-present Ionic (http://ionic.io/) -->
<title>Git Branch</title><circle cx="160" cy="96" r="48" fill="none" stroke="currentColor" stroke-linecap="round" stroke-linejoin="round" stroke-width="32"/><circle cx="160" cy="416" r="48" fill="none" stroke="currentColor" stroke-linecap="round" stroke-linejoin="round" stroke-width="32"/><path fill="none" stroke="currentColor" stroke-linecap="round" stroke-linejoin="round" stroke-width="32" d="M160 368V144"/><circle cx="352" cy="160" r="48" fill="none" stroke="currentColor" stroke-linecap="round" stroke-linejoin="round" stroke-width="32"/><path d="M352 208c0 128-192 48-192 160" fill="none" stroke="currentColor" stroke-linecap="round" stroke-linejoin="round" stroke-width="32"/>
</svg>
Version: {{current}}
</button>
<div class="versjon-content">
<div class="versjon-content-inner">
<dl>
<dl>
<dt>Branches</dt>
{% for version in other %}
{{ format_version(version) | indent(16) }}
{% endfor %}
</dl>
<dt>Versions</dt>
{% for version in semver %}
{{ format_version(version) | indent(16) }}
{% endfor %}
</dl>
</div>
</div>
</div>
<script>
var coll = document.getElementsByClassName("versjon");
var i;
for (i = 0; i < coll.length; i++) {
coll[i].addEventListener("click", function () {
this.classList.toggle("active");
var content = this.nextElementSibling;
if (content.style.maxHeight) {
content.style.maxHeight = null;
} else {
content.style.maxHeight = content.scrollHeight + "px";
}
});
}
</script>

1
docs/versjon/head.html Normal file
View File

@@ -0,0 +1 @@
<link href="{{ page_root }}versions.css" rel="stylesheet" type="text/css">

11
docs/versjon/header.html Normal file
View File

@@ -0,0 +1,11 @@
{% if stable and (stable.name|default("")) != current %}
{% if page in stable.html_files %}
{% set stable_path = page_root + docs_path[stable.name] + "/" + page %}
{% else %}
{% set stable_path = page_root + docs_path[stable.name] %}
{% endif %}
<p class="versjon-{% if is_semver %}old{% else %}dev{% endif %}-warning">
<strong>Warning:</strong> These docs are for version <b>{{current}}</b>. The docs for the latest stable version are at
<b> <a href="{{ stable_path }}">{{ stable.name }}</a> </b>.
</p>
{% endif %}

99
docs/versjon/versions.css Normal file
View File

@@ -0,0 +1,99 @@
.versjon {
cursor: pointer;
padding: 10px;
width: 100%;
border: none;
text-align: left;
outline: none;
font-size: 15px;
background: var(--color-code-background);
color: var(--color-code-foreground);
transition: background-color 0.1s linear;
}
.versjon:hover {
background-color: var(--color-highlighted-background);
}
.versjon:after {
content: '\002B';
font-weight: bold;
float: right;
margin-left: 5px;
}
.versjon:active:after {
content: "\2212";
}
.versjon-content {
max-height: 0;
overflow: hidden;
transition: max-height 0.2s ease-out;
}
.versjon-content-inner {
padding: 10px 18px
}
#versjon-overlay {
position: fixed;
z-index: 100;
bottom: 0px;
right: 0px;
width: 250px;
background: var(--color-code-background);
max-height: 100%;
overflow: scroll;
}
p.versjon-old-warning {
margin: 10px 0;
padding: 5px 10px;
border-radius: 4px;
letter-spacing: 1px;
color: #fff;
text-shadow: 0 0 2px #000;
text-align: center;
background: #d40 repeating-linear-gradient(135deg,
transparent,
transparent 56px,
rgba(255, 255, 255, 0.2) 56px,
rgba(255, 255, 255, 0.2) 112px);
}
p.versjon-old-warning a {
color: #fff;
border-color: #fff;
}
p.versjon-dev-warning {
margin: 10px 0;
padding: 5px 10px;
border-radius: 4px;
letter-spacing: 1px;
color: #fff;
text-shadow: 0 0 2px #000;
text-align: center;
background: #E67300 repeating-linear-gradient(135deg,
transparent,
transparent 56px,
rgba(255, 255, 255, 0.2) 56px,
rgba(255, 255, 255, 0.2) 112px);
}
p.versjon-dev-warning a {
color: #fff;
border-color: #fff;
}
#branch-icon {
width: 1em;
height: 1em;
background-size: contain;
background-repeat: no-repeat;
}

View File

@@ -1,39 +0,0 @@
import logging
import subprocess
def fastboot_erase_dtbo():
logging.info("Fastboot: Erasing DTBO")
subprocess.run(
[
'fastboot',
'erase',
'dtbo',
],
capture_output=True,
)
def fastboot_flash(partition, file):
logging.info(f"Fastboot: Flashing {file} to {partition}")
result = subprocess.run([
'fastboot',
'flash',
partition,
file,
])
if result.returncode != 0:
logging.info(f'Failed to flash {file}')
exit(1)
def fastboot_boot(file):
logging.info(f"Fastboot: booting {file}")
result = subprocess.run([
'fastboot',
'boot',
file,
])
if result.returncode != 0:
logging.fatal(f'Failed to boot {file} using fastboot')
exit(1)

View File

@@ -1,92 +0,0 @@
import atexit
import shutil
import os
import subprocess
import click
import tempfile
from constants import FLASH_PARTS, LOCATIONS
from fastboot import fastboot_flash
from image import dd_image, partprobe, shrink_fs, losetup_rootfs_image, dump_aboot, dump_lk2nd, dump_qhypstub, get_device_and_flavour, get_image_name, get_image_path
from wrapper import enforce_wrap
ABOOT = FLASH_PARTS['ABOOT']
LK2ND = FLASH_PARTS['LK2ND']
QHYPSTUB = FLASH_PARTS['QHYPSTUB']
ROOTFS = FLASH_PARTS['ROOTFS']
@click.command(name='flash')
@click.argument('what', type=click.Choice(list(FLASH_PARTS.values())))
@click.argument('location', type=str, required=False)
def cmd_flash(what: str, location: str):
"""Flash a partition onto a device. `location` takes either a path to a block device or one of emmc, sdcard"""
enforce_wrap()
device, flavour = get_device_and_flavour()
device_image_name = get_image_name(device, flavour)
device_image_path = get_image_path(device, flavour)
# TODO: PARSE DEVICE SECTOR SIZE
sector_size = 4096
if what not in FLASH_PARTS.values():
raise Exception(f'Unknown what "{what}", must be one of {", ".join(FLASH_PARTS.values())}')
if what == ROOTFS:
if location is None:
raise Exception(f'You need to specify a location to flash {what} to')
path = ''
if location.startswith("/dev/"):
path = location
else:
if location not in LOCATIONS:
raise Exception(f'Invalid location {location}. Choose one of {", ".join(LOCATIONS)}')
dir = '/dev/disk/by-id'
for file in os.listdir(dir):
sanitized_file = file.replace('-', '').replace('_', '').lower()
if f'jumpdrive{location.split("-")[0]}' in sanitized_file:
path = os.path.realpath(os.path.join(dir, file))
partprobe(path)
result = subprocess.run(['lsblk', path, '-o', 'SIZE'], capture_output=True)
if result.returncode != 0:
raise Exception(f'Failed to lsblk {path}')
if result.stdout == b'SIZE\n 0B\n':
raise Exception(
f'Disk {path} has a size of 0B. That probably means it is not available (e.g. no microSD inserted or no microSD card slot installed in the device) or corrupt or defect'
)
if path == '':
raise Exception('Unable to discover Jumpdrive')
minimal_image_dir = tempfile.gettempdir()
minimal_image_path = os.path.join(minimal_image_dir, f'minimal-{device_image_name}')
def clean_dir():
shutil.rmtree(minimal_image_dir)
atexit.register(clean_dir)
shutil.copyfile(device_image_path, minimal_image_path)
loop_device = losetup_rootfs_image(minimal_image_path, sector_size)
partprobe(loop_device)
shrink_fs(loop_device, minimal_image_path, sector_size)
result = dd_image(input=minimal_image_path, output=path)
if result.returncode != 0:
raise Exception(f'Failed to flash {minimal_image_path} to {path}')
else:
loop_device = losetup_rootfs_image(device_image_path, sector_size)
if what == ABOOT:
path = dump_aboot(f'{loop_device}p1')
fastboot_flash('boot', path)
elif what == LK2ND:
path = dump_lk2nd(f'{loop_device}p1')
fastboot_flash('lk2nd', path)
elif what == QHYPSTUB:
path = dump_qhypstub(f'{loop_device}p1')
fastboot_flash('qhypstub', path)
else:
raise Exception(f'Unknown what "{what}", this must be a bug in kupferbootstrap!')

View File

@@ -4,16 +4,22 @@ yapf_args=('--recursive' '--parallel')
autoflake_args=('--recursive' '--remove-unused-variables' '--remove-all-unused-imports' '--expand-star-imports' '--remove-duplicate-keys')
format() {
yapf "${yapf_args[@]}" .
autoflake "${autoflake_args[@]}" .
files=("$@")
if [[ -z "${files[*]}" ]]; then
files=(*.py "src")
fi
yapf "${yapf_args[@]}" "${files[@]}"
autoflake "${autoflake_args[@]}" "${files[@]}"
}
if [[ "$1" == "--check" ]]; then
yapf_args+=('--diff')
[[ "$(format | tee /dev/stderr | wc -c)" == "0" ]]
shift
[[ "$(format "$@" | tee /dev/stderr | wc -c)" == "0" ]]
else
yapf_args+=('--in-place')
autoflake_args+=('--in-place')
format
format "$@"
fi

View File

@@ -1,49 +0,0 @@
import click
import subprocess
from logger import logging
from ssh import run_ssh_command
from wrapper import check_programs_wrap
@click.command(name='forwarding')
def cmd_forwarding():
"""Enable network forwarding for a usb-attached device"""
check_programs_wrap(['syctl', 'iptables'])
result = subprocess.run([
'sysctl',
'net.ipv4.ip_forward=1',
])
if result.returncode != 0:
logging.fatal(f'Failed to enable ipv4 forward via sysctl')
exit(1)
result = subprocess.run([
'iptables',
'-P',
'FORWARD',
'ACCEPT',
])
if result.returncode != 0:
logging.fatal(f'Failed set iptables rule')
exit(1)
result = subprocess.run([
'iptables',
'-A',
'POSTROUTING',
'-t',
'nat',
'-j',
'MASQUERADE',
'-s',
'172.16.42.0/24',
])
if result.returncode != 0:
logging.fatal(f'Failed set iptables rule')
exit(1)
result = run_ssh_command(cmd=['sudo -S route add default gw 172.16.42.2'])
if result.returncode != 0:
logging.fatal(f'Failed to add gateway over ssh')
exit(1)

485
image.py
View File

@@ -1,485 +0,0 @@
import atexit
import json
import os
import re
import subprocess
import click
import logging
from signal import pause
from subprocess import run, CompletedProcess
from typing import Optional
from chroot.device import DeviceChroot, get_device_chroot
from constants import Arch, BASE_PACKAGES, DEVICES, FLAVOURS
from config import config, Profile
from distro.distro import get_base_distro, get_kupfer_https
from packages import build_enable_qemu_binfmt, discover_packages, build_packages
from ssh import copy_ssh_keys
from wrapper import enforce_wrap
# image files need to be slightly smaller than partitions to fit
IMG_FILE_ROOT_DEFAULT_SIZE = "1800M"
IMG_FILE_BOOT_DEFAULT_SIZE = "90M"
def dd_image(input: str, output: str, blocksize='1M') -> CompletedProcess:
cmd = [
'dd',
f'if={input}',
f'of={output}',
f'bs={blocksize}',
'iflag=direct',
'oflag=direct',
'status=progress',
'conv=sync,noerror',
]
logging.debug(f'running dd cmd: {cmd}')
return subprocess.run(cmd)
def partprobe(device: str):
return subprocess.run(['partprobe', device])
def shrink_fs(loop_device: str, file: str, sector_size: int):
# 8: 512 bytes sectors
# 1: 4096 bytes sectors
sectors_blocks_factor = 4096 // sector_size
partprobe(loop_device)
logging.debug(f"Checking filesystem at {loop_device}p2")
result = subprocess.run(['e2fsck', '-fy', f'{loop_device}p2'])
if result.returncode > 2:
# https://man7.org/linux/man-pages/man8/e2fsck.8.html#EXIT_CODE
raise Exception(f'Failed to e2fsck {loop_device}p2 with exit code {result.returncode}')
logging.debug(f'Shrinking filesystem at {loop_device}p2')
result = subprocess.run(['resize2fs', '-M', f'{loop_device}p2'], capture_output=True)
if result.returncode != 0:
print(result.stdout)
print(result.stderr)
raise Exception(f'Failed to resize2fs {loop_device}p2')
logging.debug(f'Finding end block of shrunken filesystem on {loop_device}p2')
blocks = int(re.search('is now [0-9]+', result.stdout.decode('utf-8')).group(0).split(' ')[2]) # type: ignore
sectors = blocks * sectors_blocks_factor #+ 157812 - 25600
logging.debug(f'Shrinking partition at {loop_device}p2 to {sectors} sectors')
child_proccess = subprocess.Popen(
['fdisk', '-b', str(sector_size), loop_device],
stdin=subprocess.PIPE,
)
child_proccess.stdin.write('\n'.join([ # type: ignore
'd',
'2',
'n',
'p',
'2',
'',
f'+{sectors}',
'w',
'q',
]).encode('utf-8'))
child_proccess.communicate()
returncode = child_proccess.wait()
if returncode == 1:
# For some reason re-reading the partition table fails, but that is not a problem
subprocess.run(['partprobe'])
if returncode > 1:
raise Exception(f'Failed to shrink partition size of {loop_device}p2 with fdisk')
partprobe(loop_device)
logging.debug(f'Finding end sector of partition at {loop_device}p2')
result = subprocess.run(['fdisk', '-b', str(sector_size), '-l', loop_device], capture_output=True)
if result.returncode != 0:
print(result.stdout)
print(result.stderr)
raise Exception(f'Failed to fdisk -l {loop_device}')
end_sector = 0
for line in result.stdout.decode('utf-8').split('\n'):
if line.startswith(f'{loop_device}p2'):
parts = list(filter(lambda part: part != '', line.split(' ')))
end_sector = int(parts[2])
if end_sector == 0:
raise Exception(f'Failed to find end sector of {loop_device}p2')
end_size = (end_sector + 1) * sector_size
logging.debug(f'({end_sector} + 1) sectors * {sector_size} bytes/sector = {end_size} bytes')
logging.info(f'Truncating {file} to {end_size} bytes')
result = subprocess.run(['truncate', '-s', str(end_size), file])
if result.returncode != 0:
raise Exception(f'Failed to truncate {file}')
partprobe(loop_device)
def get_device_and_flavour(profile_name: Optional[str] = None) -> tuple[str, str]:
config.enforce_config_loaded()
profile = config.get_profile(profile_name)
if not profile['device']:
raise Exception("Please set the device using 'kupferbootstrap config init ...'")
if not profile['flavour']:
raise Exception("Please set the flavour using 'kupferbootstrap config init ...'")
return (profile['device'], profile['flavour'])
def get_image_name(device, flavour, img_type='full') -> str:
return f'{device}-{flavour}-{img_type}.img'
def get_image_path(device, flavour, img_type='full') -> str:
return os.path.join(config.get_path('images'), get_image_name(device, flavour, img_type))
def losetup_rootfs_image(image_path: str, sector_size: int) -> str:
logging.debug(f'Creating loop device for {image_path} with sector size {sector_size}')
result = subprocess.run([
'losetup',
'-f',
'-b',
str(sector_size),
'-P',
image_path,
])
if result.returncode != 0:
logging.fatal(f'Failed to create loop device for {image_path}')
exit(1)
logging.debug(f'Finding loop device for {image_path}')
result = subprocess.run(['losetup', '-J'], capture_output=True)
if result.returncode != 0:
print(result.stdout)
print(result.stderr)
logging.fatal('Failed to list loop devices')
exit(1)
data = json.loads(result.stdout.decode('utf-8'))
loop_device = ''
for d in data['loopdevices']:
if d['back-file'] == image_path:
loop_device = d['name']
break
if loop_device == '':
raise Exception(f'Failed to find loop device for {image_path}')
partprobe(loop_device)
def losetup_destroy():
logging.debug(f'Destroying loop device {loop_device} for {image_path}')
subprocess.run(
[
'losetup',
'-d',
loop_device,
],
stderr=subprocess.DEVNULL,
)
atexit.register(losetup_destroy)
return loop_device
def mount_chroot(rootfs_source: str, boot_src: str, chroot: DeviceChroot):
logging.debug(f'Mounting {rootfs_source} at {chroot.path}')
chroot.mount_rootfs(rootfs_source)
assert (os.path.ismount(chroot.path))
os.makedirs(chroot.get_path('boot'), exist_ok=True)
logging.debug(f'Mounting {boot_src} at {chroot.path}/boot')
chroot.mount(boot_src, '/boot', options=['defaults'])
def dump_aboot(image_path: str) -> str:
path = '/tmp/aboot.img'
result = subprocess.run([
'debugfs',
image_path,
'-R',
f'dump /aboot.img {path}',
])
if result.returncode != 0:
logging.fatal('Failed to dump aboot.img')
exit(1)
return path
def dump_lk2nd(image_path: str) -> str:
"""
This doesn't append the image with the appended DTB which is needed for some devices, so it should get added in the future.
"""
path = '/tmp/lk2nd.img'
result = subprocess.run([
'debugfs',
image_path,
'-R',
f'dump /lk2nd.img {path}',
])
if result.returncode != 0:
logging.fatal('Failed to dump lk2nd.img')
exit(1)
return path
def dump_qhypstub(image_path: str) -> str:
path = '/tmp/qhypstub.bin'
result = subprocess.run([
'debugfs',
image_path,
'-R',
f'dump /qhypstub.bin {path}',
])
if result.returncode != 0:
logging.fatal('Failed to dump qhypstub.bin')
exit(1)
return path
def create_img_file(image_path: str, size_str: str):
result = subprocess.run([
'truncate',
'-s',
size_str,
image_path,
])
if result.returncode != 0:
raise Exception(f'Failed to allocate {image_path}')
return image_path
def partition_device(device: str):
boot_partition_size = '100MiB'
create_partition_table = ['mklabel', 'msdos']
create_boot_partition = ['mkpart', 'primary', 'ext2', '0%', boot_partition_size]
create_root_partition = ['mkpart', 'primary', boot_partition_size, '100%']
enable_boot = ['set', '1', 'boot', 'on']
result = subprocess.run([
'parted',
'--script',
device,
] + create_partition_table + create_boot_partition + create_root_partition + enable_boot)
if result.returncode != 0:
raise Exception(f'Failed to create partitions on {device}')
def create_filesystem(device: str, blocksize: int = 4096, label=None, options=[], fstype='ext4'):
# blocksize can be 4k max due to pagesize
blocksize = min(blocksize, 4096)
if fstype.startswith('ext'):
# blocksize for ext-fs must be >=1024
blocksize = max(blocksize, 1024)
labels = ['-L', label] if label else []
cmd = [
f'mkfs.{fstype}',
'-F',
'-b',
str(blocksize),
] + labels + [device]
result = subprocess.run(cmd)
if result.returncode != 0:
raise Exception(f'Failed to create {fstype} filesystem on {device} with CMD: {cmd}')
def create_root_fs(device: str, blocksize: int):
create_filesystem(device, blocksize=blocksize, label='kupfer_root', options=['-O', '^metadata_csum', '-N', '100000'])
def create_boot_fs(device: str, blocksize: int):
create_filesystem(device, blocksize=blocksize, label='kupfer_boot', fstype='ext2')
def install_rootfs(
rootfs_device: str,
bootfs_device: str,
device: str,
flavour: str,
arch: Arch,
packages: list[str],
use_local_repos: bool,
profile: Profile,
):
user = profile['username'] or 'kupfer'
post_cmds = FLAVOURS[flavour].get('post_cmds', [])
chroot = get_device_chroot(device=device, flavour=flavour, arch=arch, packages=packages, use_local_repos=use_local_repos)
mount_chroot(rootfs_device, bootfs_device, chroot)
chroot.mount_pacman_cache()
chroot.initialize()
chroot.activate()
chroot.create_user(
user=user,
password=profile['password'],
)
copy_ssh_keys(
chroot.path,
user=user,
)
files = {
'etc/pacman.conf': get_base_distro(arch).get_pacman_conf(check_space=True, extra_repos=get_kupfer_https(arch).repos),
'etc/sudoers.d/wheel': "# allow members of group wheel to execute any command\n%wheel ALL=(ALL:ALL) ALL\n",
'etc/hostname': profile['hostname'],
}
for target, content in files.items():
with open(os.path.join(chroot.path, target.lstrip('/')), 'w') as file:
file.write(content)
if post_cmds:
result = chroot.run_cmd(' && '.join(post_cmds))
assert isinstance(result, subprocess.CompletedProcess)
if result.returncode != 0:
raise Exception('Error running post_cmds')
logging.info('Preparing to unmount chroot')
res = chroot.run_cmd('sync && umount /boot', attach_tty=True)
logging.debug(f'rc: {res}')
chroot.deactivate()
logging.debug(f'Unmounting rootfs at "{chroot.path}"')
res = run(['umount', chroot.path])
logging.debug(f'rc: {res.returncode}')
@click.group(name='image')
def cmd_image():
"""Build and manage device images"""
@cmd_image.command(name='build')
@click.argument('profile_name', required=False)
@click.option('--local-repos/--no-local-repos',
'-l/-L',
default=True,
show_default=True,
help='Whether to use local package repos at all or only use HTTPS repos.')
@click.option('--build-pkgs/--no-build-pkgs',
'-p/-P',
default=True,
show_default=True,
help='Whether to build missing/outdated local packages if local repos are enabled.')
@click.option('--no-download-pkgs',
is_flag=True,
default=False,
help='Disable trying to download packages instead of building if building is enabled.')
@click.option('--block-target', type=click.Path(), default=None, help='Override the block device file to write the final image to')
@click.option('--skip-part-images',
is_flag=True,
default=False,
help='Skip creating image files for the partitions and directly work on the target block device.')
def cmd_build(profile_name: str = None,
local_repos: bool = True,
build_pkgs: bool = True,
no_download_pkgs=False,
block_target: str = None,
skip_part_images: bool = False):
"""
Build a device image.
Unless overriden, required packages will be built or preferably downloaded from HTTPS repos.
"""
enforce_wrap()
profile: Profile = config.get_profile(profile_name)
device, flavour = get_device_and_flavour(profile_name)
size_extra_mb: int = int(profile["size_extra_mb"])
# TODO: PARSE DEVICE ARCH AND SECTOR SIZE
arch = 'aarch64'
sector_size = 4096
rootfs_size_mb = FLAVOURS[flavour].get('size', 2) * 1000
packages = BASE_PACKAGES + DEVICES[device] + FLAVOURS[flavour]['packages'] + profile['pkgs_include']
if arch != config.runtime['arch']:
build_enable_qemu_binfmt(arch)
if local_repos and build_pkgs:
logging.info("Making sure all packages are built")
repo = discover_packages()
build_packages(repo, [p for name, p in repo.items() if name in packages], arch, try_download=not no_download_pkgs)
image_path = block_target or get_image_path(device, flavour)
os.makedirs(os.path.dirname(image_path), exist_ok=True)
logging.info(f'Creating new file at {image_path}')
create_img_file(image_path, f"{rootfs_size_mb + size_extra_mb}M")
loop_device = losetup_rootfs_image(image_path, sector_size)
partition_device(loop_device)
partprobe(loop_device)
boot_dev: str
root_dev: str
loop_boot = loop_device + 'p1'
loop_root = loop_device + 'p2'
if skip_part_images:
boot_dev = loop_boot
root_dev = loop_root
else:
logging.info('Creating per-partition image files')
boot_dev = create_img_file(get_image_path(device, flavour, 'boot'), IMG_FILE_BOOT_DEFAULT_SIZE)
root_dev = create_img_file(get_image_path(device, flavour, 'root'), f'{rootfs_size_mb + size_extra_mb - 200}M')
create_boot_fs(boot_dev, sector_size)
create_root_fs(root_dev, sector_size)
install_rootfs(
root_dev,
boot_dev,
device,
flavour,
arch,
packages,
local_repos,
profile,
)
if not skip_part_images:
logging.info('Copying partition image files into full image:')
logging.info(f'Block-copying /boot to {image_path}')
dd_image(input=boot_dev, output=loop_boot)
logging.info(f'Block-copying rootfs to {image_path}')
dd_image(input=root_dev, output=loop_root)
logging.info(f'Done! Image saved to {image_path}')
@cmd_image.command(name='inspect')
@click.option('--shell', '-s', is_flag=True)
@click.argument('profile', required=False)
def cmd_inspect(profile: str = None, shell: bool = False):
"""Open a shell in a device image"""
enforce_wrap()
device, flavour = get_device_and_flavour(profile)
# TODO: get arch from profile
arch = 'aarch64'
# TODO: PARSE DEVICE SECTOR SIZE
sector_size = 4096
chroot = get_device_chroot(device, flavour, arch)
image_path = get_image_path(device, flavour)
loop_device = losetup_rootfs_image(image_path, sector_size)
partprobe(loop_device)
mount_chroot(loop_device + 'p2', loop_device + 'p1', chroot)
logging.info(f'Inspect the rootfs image at {chroot.path}')
if shell:
chroot.initialized = True
chroot.activate()
if arch != config.runtime['arch']:
logging.info('Installing requisites for foreign-arch shell')
build_enable_qemu_binfmt(arch)
logging.info('Starting inspection shell')
chroot.run_cmd('/bin/bash')
else:
pause()

101
integration_tests.py Normal file
View File

@@ -0,0 +1,101 @@
import click
import os
import pytest
from glob import glob
from subprocess import CompletedProcess
from kupferbootstrap.config.state import config, CONFIG_DEFAULTS
from kupferbootstrap.constants import SRCINFO_METADATA_FILE
from kupferbootstrap.exec.cmd import run_cmd
from kupferbootstrap.exec.file import get_temp_dir
from kupferbootstrap.logger import setup_logging
from kupferbootstrap.packages.cli import SRCINFO_CACHE_FILES, cmd_build, cmd_clean, cmd_init, cmd_update
from kupferbootstrap.utils import git_get_branch
tempdir = None
config.try_load_file()
setup_logging(True)
PKG_TEST_PATH = 'device/device-sdm845-oneplus-enchilada'
PKG_TEST_NAME = 'device-sdm845-xiaomi-beryllium-ebbg'
@pytest.fixture()
def ctx() -> click.Context:
global tempdir
if not tempdir:
tempdir = get_temp_dir()
if not os.environ.get('INTEGRATION_TESTS_USE_GLOBAL_CONFIG', 'false').lower() == 'true':
config.file.paths.update(CONFIG_DEFAULTS.paths | {'cache_dir': tempdir})
config_path = os.path.join(tempdir, 'kupferbootstrap.toml')
config.runtime.config_file = config_path
if not os.path.exists(config_path):
config.write()
config.try_load_file(config_path)
print(f'cache_dir: {config.file.paths.cache_dir}')
return click.Context(click.Command('integration_tests'))
def test_main_import():
from kupferbootstrap.main import cli
assert cli
def test_config_load(ctx: click.Context):
path = config.runtime.config_file
assert path
assert path.startswith('/tmp/')
assert os.path.exists(path)
config.enforce_config_loaded()
def test_packages_update(ctx: click.Context):
pkgbuilds_path = config.get_path('pkgbuilds')
assert config.runtime.script_source_dir
kbs_branch = git_get_branch(os.path.join(config.runtime.script_source_dir, "../.."))
# Gitlab CI integration: the CI checks out a detached commit, branch comes back empty.
if not kbs_branch and os.environ.get('CI', 'false') == 'true':
kbs_branch = os.environ.get('CI_COMMIT_BRANCH', '')
branches: dict[str, bool] = {'main': False, 'dev': False}
if kbs_branch:
branches[kbs_branch] = True
for branch, may_fail in branches.items():
config.file.pkgbuilds.git_branch = branch
try:
ctx.invoke(cmd_init, update=True, non_interactive=True, switch_branch=True, discard_changes=True, init_caches=False)
except Exception as ex:
print(f'may_fail: {may_fail}; Exception: {ex}')
if not may_fail:
raise ex
# check branch really doesn't exist
res = run_cmd(f"git ls-remote {CONFIG_DEFAULTS.pkgbuilds.git_repo} 'refs/heads/*' | grep 'refs/heads/{branch}'")
assert isinstance(res, CompletedProcess)
assert res.returncode != 0
continue
assert git_get_branch(pkgbuilds_path) == branch
def test_packages_clean(ctx: click.Context):
if not glob(os.path.join(config.get_path('pkgbuilds'), '*', '*', SRCINFO_METADATA_FILE)):
ctx.invoke(cmd_update, non_interactive=True)
ctx.invoke(cmd_clean, what=['git'], force=True)
def test_packages_cache_init(ctx: click.Context):
ctx.invoke(cmd_update, non_interactive=True, switch_branch=False, discard_changes=False, init_caches=True)
for f in SRCINFO_CACHE_FILES:
assert os.path.exists(os.path.join(config.get_path('pkgbuilds'), PKG_TEST_PATH, f))
def build_pkgs(_ctx: click.Context, query: list[str], arch: str = 'aarch64', **kwargs):
_ctx.invoke(cmd_build, paths=query, arch=arch, **kwargs)
def test_packages_build_by_path(ctx: click.Context):
build_pkgs(ctx, [PKG_TEST_PATH], force=True)
def test_split_package_build_by_name(ctx: click.Context):
build_pkgs(ctx, [PKG_TEST_NAME])

1
local/bin/wrapper_su_helper Symbolic link
View File

@@ -0,0 +1 @@
../../wrapper_su_helper.py

View File

@@ -1,32 +0,0 @@
#!/bin/sh
set -e
wget https://raw.githubusercontent.com/archlinuxarm/PKGBUILDs/master/core/pacman/makepkg.conf -O etc/makepkg.conf
sed -i "s/@CARCH@/aarch64/g" etc/makepkg.conf
sed -i "s/@CHOST@/aarch64-unknown-linux-gnu/g" etc/makepkg.conf
sed -i "s/@CARCHFLAGS@/-march=armv8-a /g" etc/makepkg.conf
sed -i "s/xz /xz -T0 /g" etc/makepkg.conf
sed -i "s/ check / !check /g" etc/makepkg.conf
chroot="/chroot/base_aarch64"
include="-I\${CROOT}/usr/include -I$chroot/usr/include"
lib_croot="\${CROOT}/lib"
lib_chroot="$chroot/usr/lib"
cat >>etc/makepkg.conf <<EOF
export CROOT="/usr/aarch64-linux-gnu"
export ARCH="arm64"
export CROSS_COMPILE="aarch64-linux-gnu-"
export CC="aarch64-linux-gnu-gcc $include -L$lib_croot -L$lib_chroot"
export CXX="aarch64-linux-gnu-g++ $include -L$lib_croot -L$lib_chroot"
export CFLAGS="\$CFLAGS $include"
export CXXFLAGS="\$CXXFLAGS $include"
export LDFLAGS="\$LDFLAGS,-L$lib_croot,-L$lib_chroot,-rpath-link,$lib_croot,-rpath-link,$lib_chroot"
export PACMAN_CHROOT="$chroot"
EOF
# TODO: Set PACKAGER
wget https://raw.githubusercontent.com/archlinuxarm/PKGBUILDs/master/core/pacman/pacman.conf -O etc/pacman.conf
sed -i "s/@CARCH@/aarch64/g" etc/pacman.conf
sed -i "s/#ParallelDownloads.*/ParallelDownloads = 8/g" etc/pacman.conf
sed -i "s/SigLevel.*/SigLevel = Never/g" etc/pacman.conf
sed -i "s/^CheckSpace/#CheckSpace/g" etc/pacman.conf
sed -i "s|Include = /etc/pacman.d/mirrorlist|Server = http://mirror.archlinuxarm.org/\$arch/\$repo|g" etc/pacman.conf

View File

@@ -1,27 +0,0 @@
import click
import coloredlogs
import logging
import sys
def setup_logging(verbose: bool):
level_colors = coloredlogs.DEFAULT_LEVEL_STYLES | {'info': {'color': 'magenta', 'bright': True}, 'debug': {'color': 'blue', 'bright': True}}
field_colors = coloredlogs.DEFAULT_FIELD_STYLES | {'asctime': {'color': 'white', 'faint': True}}
level = logging.DEBUG if verbose else logging.INFO
coloredlogs.install(
stream=sys.stdout,
fmt='%(asctime)s %(levelname)s: %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=level,
level_styles=level_colors,
field_styles=field_colors,
)
logging.debug('Logging set up.')
verbose_option = click.option(
'-v',
'--verbose',
is_flag=True,
help='Enables verbose logging',
)

60
main.py
View File

@@ -1,60 +0,0 @@
#!/usr/bin/env python3
import click
from traceback import format_exc as get_trace
import subprocess
from logger import logging, setup_logging, verbose_option
from wrapper import nowrapper_option
from config import config, config_option, cmd_config
from forwarding import cmd_forwarding
from packages import cmd_packages
from telnet import cmd_telnet
from chroot import cmd_chroot
from cache import cmd_cache
from image import cmd_image
from boot import cmd_boot
from flash import cmd_flash
from ssh import cmd_ssh
@click.group()
@click.option('--error-shell', '-E', 'error_shell', is_flag=True, default=False, help='Spawn shell after error occurs')
@verbose_option
@config_option
@nowrapper_option
def cli(verbose: bool = False, config_file: str = None, no_wrapper: bool = False, error_shell: bool = False):
setup_logging(verbose)
config.runtime['verbose'] = verbose
config.runtime['no_wrap'] = no_wrapper
config.runtime['error_shell'] = error_shell
config.try_load_file(config_file)
def main():
try:
return cli(prog_name='kupferbootstrap')
except Exception as ex:
if config.runtime['verbose']:
logging.fatal(get_trace())
else:
logging.fatal(ex)
if config.runtime['error_shell']:
logging.info('Starting error shell. Type exit to quit.')
subprocess.call('/bin/bash')
exit(1)
cli.add_command(cmd_config)
cli.add_command(cmd_cache)
cli.add_command(cmd_packages)
cli.add_command(cmd_image)
cli.add_command(cmd_boot)
cli.add_command(cmd_flash)
cli.add_command(cmd_ssh)
cli.add_command(cmd_forwarding)
cli.add_command(cmd_telnet)
cli.add_command(cmd_chroot)
if __name__ == '__main__':
main()

File diff suppressed because it is too large Load Diff

View File

@@ -1,105 +0,0 @@
from copy import deepcopy
import os
import subprocess
from chroot import Chroot
from constants import CHROOT_PATHS, MAKEPKG_CMD
from distro.package import PackageInfo
class Pkgbuild(PackageInfo):
depends: list[str]
provides: list[str]
replaces: list[str]
local_depends: list[str]
repo = ''
mode = ''
path = ''
pkgver = ''
pkgrel = ''
def __init__(
self,
relative_path: str,
depends: list[str] = [],
provides: list[str] = [],
replaces: list[str] = [],
) -> None:
self.version = ''
self.path = relative_path
self.depends = deepcopy(depends)
self.provides = deepcopy(provides)
self.replaces = deepcopy(replaces)
def __repr__(self):
return f'Pkgbuild({self.name},{repr(self.path)},{self.version},{self.mode})'
def names(self):
return list(set([self.name] + self.provides + self.replaces))
class Pkgbase(Pkgbuild):
subpackages: list[Pkgbuild]
def __init__(self, relative_path: str, subpackages: list[Pkgbuild] = [], **args):
self.subpackages = deepcopy(subpackages)
super().__init__(relative_path, **args)
def parse_pkgbuild(relative_pkg_dir: str, native_chroot: Chroot) -> list[Pkgbuild]:
mode = None
with open(os.path.join(native_chroot.get_path(CHROOT_PATHS['pkgbuilds']), relative_pkg_dir, 'PKGBUILD'), 'r') as file:
for line in file.read().split('\n'):
if line.startswith('_mode='):
mode = line.split('=')[1]
break
if mode not in ['host', 'cross']:
raise Exception((f'{relative_pkg_dir}/PKGBUILD has {"no" if mode is None else "an invalid"} mode configured') +
(f': "{mode}"' if mode is not None else ''))
base_package = Pkgbase(relative_pkg_dir)
base_package.mode = mode
base_package.repo = relative_pkg_dir.split('/')[0]
srcinfo = native_chroot.run_cmd(
MAKEPKG_CMD + ['--printsrcinfo'],
cwd=os.path.join(CHROOT_PATHS['pkgbuilds'], base_package.path),
stdout=subprocess.PIPE,
)
assert (isinstance(srcinfo, subprocess.CompletedProcess))
lines = srcinfo.stdout.decode('utf-8').split('\n')
current = base_package
multi_pkgs = False
for line_raw in lines:
line = line_raw.strip()
if not line:
continue
splits = line.split(' = ')
if line.startswith('pkgbase'):
base_package.name = splits[1]
multi_pkgs = True
elif line.startswith('pkgname'):
if multi_pkgs:
current = deepcopy(base_package)
base_package.subpackages.append(current)
current.name = splits[1]
elif line.startswith('pkgver'):
current.pkgver = splits[1]
elif line.startswith('pkgrel'):
current.pkgrel = splits[1]
elif line.startswith('provides'):
current.provides.append(splits[1])
elif line.startswith('replaces'):
current.replaces.append(splits[1])
elif line.startswith('depends') or line.startswith('makedepends') or line.startswith('checkdepends') or line.startswith('optdepends'):
current.depends.append(splits[1].split('=')[0].split(': ')[0])
current.depends = list(set(current.depends))
results = base_package.subpackages or [base_package]
for pkg in results:
pkg.version = f'{pkg.pkgver}-{pkg.pkgrel}'
if not (pkg.pkgver == base_package.pkgver and pkg.pkgrel == base_package.pkgrel):
raise Exception('subpackage malformed! pkgver differs!')
return results

29
pyproject.toml Normal file
View File

@@ -0,0 +1,29 @@
[project]
name = "kupferbootstrap"
dependencies = [
"click>=8.0.1",
"appdirs>=1.4.4",
"joblib>=1.0.1",
"toml",
"typing_extensions",
"coloredlogs",
"munch",
"requests",
"python-dateutil",
"enlighten",
"PyYAML",
]
dynamic = ["version"]
[project.scripts]
kupferbootstrap = "kupferbootstrap.main:main"
[tool.setuptools.package-data]
"*" = ["version.txt"]
[build-system]
requires = [ "setuptools>=41", "wheel", "setuptools-git-versioning<2", ]
build-backend = "setuptools.build_meta"
[tool.setuptools-git-versioning]
enabled = true

4
pytest.sh Executable file
View File

@@ -0,0 +1,4 @@
#!/bin/bash
sudo -v
python -m pytest -v --cov=. --cov-branch --cov-report=term "$@" src/kupferbootstrap

View File

@@ -1,6 +1 @@
click>=8.0.1
appdirs>=1.4.4
joblib>=1.0.1
toml
typing_extensions
coloredlogs
-e .

View File

View File

@@ -0,0 +1,125 @@
# modifed from pmbootstrap's binfmt.py, Copyright 2018 Oliver Smith, GPL-licensed
import os
import logging
from typing import Optional
from kupferbootstrap.chroot.abstract import Chroot
from kupferbootstrap.constants import Arch, QEMU_ARCHES
from kupferbootstrap.exec.cmd import run_root_cmd, CompletedProcess
from kupferbootstrap.utils import mount
def binfmt_info(chroot: Optional[Chroot] = None):
# Parse the info file
full = {}
info = "/usr/lib/binfmt.d/qemu-static.conf"
if chroot:
info = chroot.get_path(info)
logging.debug("parsing: " + info)
with open(info, "r") as handle:
for line in handle:
if line.startswith('#') or ":" not in line:
continue
splitted = line.split(":")
result = {
# _ = splitted[0] # empty
'name': splitted[1],
'type': splitted[2],
'offset': splitted[3],
'magic': splitted[4],
'mask': splitted[5],
'interpreter': splitted[6],
'flags': splitted[7],
'line': line,
}
if not result['name'].startswith('qemu-'):
logging.fatal(f'Unknown binfmt handler "{result["name"]}"')
logging.debug(f'binfmt line: {line}')
continue
arch = ''.join(result['name'].split('-')[1:])
full[arch] = result
return full
def is_arch_known(arch: Arch, raise_exception: bool = False, action: Optional[str] = None) -> bool:
if arch not in QEMU_ARCHES:
if raise_exception:
raise Exception(f'binfmt{f".{action}()" if action else ""}: unknown arch {arch} (not in QEMU_ARCHES)')
return False
return True
def binfmt_is_registered(arch: Arch, chroot: Optional[Chroot] = None) -> bool:
is_arch_known(arch, True, 'is_registered')
qemu_arch = QEMU_ARCHES[arch]
path = "/proc/sys/fs/binfmt_misc/qemu-" + qemu_arch
binfmt_ensure_mounted(chroot)
if chroot:
path = chroot.get_path(path)
return os.path.exists(path)
def binfmt_ensure_mounted(chroot: Optional[Chroot] = None):
binfmt_path = '/proc/sys/fs/binfmt_misc'
register_path = binfmt_path + '/register'
if chroot:
register_path = chroot.get_path(register_path)
if not os.path.exists(register_path):
logging.info('mounting binfmt_misc')
result = (chroot.mount if chroot else mount)('binfmt_misc', binfmt_path, options=[], fs_type='binfmt_misc') # type: ignore[operator]
if (isinstance(result, CompletedProcess) and result.returncode != 0) or not result:
raise Exception(f'Failed mounting binfmt_misc to {binfmt_path}')
def binfmt_register(arch: Arch, chroot: Optional[Chroot] = None):
binfmt_path = '/proc/sys/fs/binfmt_misc'
register_path = binfmt_path + '/register'
is_arch_known(arch, True, 'register')
qemu_arch = QEMU_ARCHES[arch]
if binfmt_is_registered(arch, chroot=chroot):
return
lines = binfmt_info(chroot=chroot)
_runcmd = run_root_cmd
if chroot:
_runcmd = chroot.run_cmd
chroot.activate()
binfmt_ensure_mounted(chroot)
# Build registration string
# https://en.wikipedia.org/wiki/Binfmt_misc
# :name:type:offset:magic:mask:interpreter:flags
info = lines[qemu_arch]
code = info['line']
if arch == os.uname().machine:
logging.fatal("Attempted to register qemu binfmt for host architecture, skipping!")
return
# Register in binfmt_misc
logging.info(f"Registering qemu binfmt ({arch})")
_runcmd(f'echo "{code}" > "{register_path}" 2>/dev/null') # use path without chroot path prefix
if not binfmt_is_registered(arch, chroot=chroot):
logging.debug(f'binfmt line: {code}')
raise Exception(f'Failed to register qemu-user for {arch} with binfmt_misc, {binfmt_path}/{info["name"]} not found')
def binfmt_unregister(arch, chroot: Optional[Chroot] = None):
is_arch_known(arch, True, 'unregister')
qemu_arch = QEMU_ARCHES[arch]
binfmt_ensure_mounted(chroot)
binfmt_file = "/proc/sys/fs/binfmt_misc/qemu-" + qemu_arch
if chroot:
binfmt_file = chroot.get_path(binfmt_file)
if not os.path.exists(binfmt_file):
logging.debug(f"qemu binfmt for {arch} not registered")
return
logging.info(f"Unregistering qemu binfmt ({arch})")
run_root_cmd(f"echo -1 > {binfmt_file}")
if binfmt_is_registered(arch, chroot=chroot):
raise Exception(f'Failed to UNregister qemu-user for {arch} with binfmt_misc, {chroot=}')

View File

@@ -0,0 +1,44 @@
import click
import os
from typing import Optional
from kupferbootstrap.constants import Arch, ARCHES
from .binfmt import binfmt_unregister, binfmt_is_registered
cmd_binfmt = click.Group('binfmt', help='Manage qemu binfmt for executing foreign architecture binaries')
arches_arg = click.argument('arches', type=click.Choice(ARCHES), nargs=-1, required=True)
arches_arg_optional = click.argument('arches', type=click.Choice(ARCHES), nargs=-1, required=False)
@cmd_binfmt.command('register', help='Register a binfmt handler with the kernel')
@arches_arg
def cmd_register(arches: list[Arch], disable_chroot: bool = False):
from ..packages.build import build_enable_qemu_binfmt
for arch in arches:
build_enable_qemu_binfmt(arch)
@cmd_binfmt.command('unregister', help='Unregister a binfmt handler from the kernel')
@arches_arg_optional
def cmd_unregister(arches: Optional[list[Arch]]):
for arch in arches or ARCHES:
binfmt_unregister(arch)
@cmd_binfmt.command('status', help='Get the status of a binfmt handler from the kernel')
@arches_arg_optional
def cmd_status(arches: Optional[list[Arch]]):
for arch in arches or ARCHES:
native = arch == os.uname().machine
active = binfmt_is_registered(arch)
if native and not active:
# boooring
continue
verb = click.style(
"is" if active else "is NOT",
fg='green' if (active ^ native) else 'red',
bold=True,
)
click.echo(f'Binfmt for {arch} {verb} set up! {"(host architecture!)" if native else ""}')

0
src/kupferbootstrap/cache/__init__.py vendored Normal file
View File

51
src/kupferbootstrap/cache/cli.py vendored Normal file
View File

@@ -0,0 +1,51 @@
import click
import os
import logging
from kupferbootstrap.config.state import config
from kupferbootstrap.constants import CHROOT_PATHS
from kupferbootstrap.exec.file import remove_file
from kupferbootstrap.packages.cli import cmd_clean as cmd_clean_pkgbuilds
from kupferbootstrap.wrapper import enforce_wrap
PATHS = list(CHROOT_PATHS.keys())
@click.group(name='cache')
def cmd_cache():
"""Clean various cache directories"""
@cmd_cache.command(name='clean')
@click.option('--force', is_flag=True, default=False, help="Don't ask for any confirmation")
@click.option('-n', '--noop', is_flag=True, default=False, help="Print what would be removed but dont execute")
@click.argument('paths', nargs=-1, type=click.Choice(['all'] + PATHS), required=False)
@click.pass_context
def cmd_clean(ctx: click.Context, paths: list[str], force: bool = False, noop: bool = False):
"""Clean various working directories"""
if unknown_paths := (set(paths) - set(PATHS + ['all'])):
raise Exception(f"Unknown paths: {' ,'.join(unknown_paths)}")
if 'all' in paths or (not paths and force):
paths = PATHS.copy()
enforce_wrap()
clear = {path: (path in paths) for path in PATHS}
query = not paths
if not query and not force:
click.confirm(f'Really clear {", ".join(paths)}?', abort=True)
for path_name in PATHS:
if query and not force:
clear[path_name] = click.confirm(f'{"(Noop) " if noop else ""}Clear {path_name}?')
if clear[path_name]:
logging.info(f'Clearing {path_name}')
if path_name == 'pkgbuilds':
ctx.invoke(cmd_clean_pkgbuilds, force=force, noop=noop)
continue
dir = config.get_path(path_name)
for file in os.listdir(dir):
path = os.path.join(dir, file)
log = logging.info if noop else logging.debug
log(f'{"Would remove" if noop else "Removing"} "{path_name}/{file}"')
if not noop:
remove_file(path, recursive=True)

View File

View File

@@ -2,16 +2,20 @@ import atexit
import logging
import os
import subprocess
import sys
from copy import deepcopy
from shlex import quote as shell_quote
from typing import Protocol, Union, Optional, Mapping
from typing import ClassVar, Iterable, Protocol, Union, Optional, Mapping
from uuid import uuid4
from config import config
from constants import Arch, CHROOT_PATHS
from distro.distro import get_base_distro, get_kupfer_local, RepoInfo
from generator import generate_makepkg_conf
from utils import mount, umount, check_findmnt, log_or_exception
from kupferbootstrap.config.state import config
from kupferbootstrap.constants import Arch, CHROOT_PATHS, GCC_HOSTSPECS
from kupferbootstrap.distro.distro import get_base_distro, get_kupfer_local, RepoInfo
from kupferbootstrap.exec.cmd import FileDescriptor, run_root_cmd, generate_env_cmd, flatten_shell_script, wrap_in_bash, generate_cmd_su
from kupferbootstrap.exec.file import makedir, root_makedir, root_write_file, write_file
from kupferbootstrap.generator import generate_makepkg_conf
from kupferbootstrap.utils import mount, umount, check_findmnt, log_or_exception
from .helpers import BASE_CHROOT_PREFIX, BASIC_MOUNTS, base_chroot_name, make_abs_path
@@ -32,10 +36,9 @@ class AbstractChroot(Protocol):
name: str,
arch: Arch,
copy_base: bool,
initialize: bool,
extra_repos: Mapping[str, RepoInfo],
base_packages: list[str],
path_override: str = None,
path_override: Optional[str] = None,
):
pass
@@ -57,7 +60,8 @@ class AbstractChroot(Protocol):
capture_output: bool,
cwd: str,
fail_inactive: bool,
stdout: Optional[int],
stdout: Optional[FileDescriptor],
stderr: Optional[FileDescriptor],
):
pass
@@ -76,6 +80,9 @@ class AbstractChroot(Protocol):
class Chroot(AbstractChroot):
_copy_base: ClassVar[bool] = False
copy_base: bool
def __repr__(self):
return f'Chroot({self.name})'
@@ -83,11 +90,10 @@ class Chroot(AbstractChroot):
self,
name: str,
arch: Arch,
copy_base: bool = None,
initialize: bool = False,
copy_base: Optional[bool] = None,
extra_repos: Mapping[str, RepoInfo] = {},
base_packages: list[str] = ['base', 'base-devel', 'git'],
path_override: str = None,
path_override: Optional[str] = None,
):
self.uuid = uuid4()
if copy_base is None:
@@ -99,11 +105,9 @@ class Chroot(AbstractChroot):
self.name = name
self.arch = arch
self.path = path_override or os.path.join(config.get_path('chroots'), name)
self.copy_base = copy_base
self.copy_base = copy_base if copy_base is not None else self._copy_base
self.extra_repos = deepcopy(extra_repos)
self.base_packages = base_packages.copy()
if initialize:
self.initialize()
if self.name.startswith(BASE_CHROOT_PREFIX) and set(get_kupfer_local(self.arch).repos).intersection(set(self.extra_repos)):
raise Exception(f'Base chroot {self.name} had local repos specified: {self.extra_repos}')
@@ -127,6 +131,7 @@ class Chroot(AbstractChroot):
def get_path(self, *joins: str) -> str:
if joins:
# no need to check for len(joins) > 1 because [1:] will just return []
joins = (joins[0].lstrip('/'),) + joins[1:]
return os.path.join(self.path, *joins)
@@ -136,9 +141,9 @@ class Chroot(AbstractChroot):
absolute_source: str,
relative_destination: str,
options=['bind'],
fs_type: str = None,
fs_type: Optional[str] = None,
fail_if_mounted: bool = True,
makedir: bool = True,
mkdir: bool = True,
strict_cache_consistency: bool = False,
):
"""returns the absolute path `relative_target` was mounted at"""
@@ -158,8 +163,8 @@ class Chroot(AbstractChroot):
else:
if pseudo_absolute in self.active_mounts:
log_or_exc(f'{self.name}: Mount {pseudo_absolute} was in active_mounts but not actually mounted. ({absolute_destination})')
if makedir and os.path.isdir(absolute_source):
os.makedirs(absolute_destination, exist_ok=True)
if mkdir and os.path.isdir(absolute_source):
root_makedir(absolute_destination)
result = mount(absolute_source, absolute_destination, options=options, fs_type=fs_type, register_unmount=False)
if result.returncode != 0:
raise Exception(f'{self.name}: failed to mount {absolute_source} to {absolute_destination}')
@@ -177,7 +182,7 @@ class Chroot(AbstractChroot):
self.active_mounts.remove(relative_path)
return result
def umount_many(self, relative_paths: list[str]):
def umount_many(self, relative_paths: Iterable[str]):
# make sure paths start with '/'. Important: also copies the collection and casts to list, which will be sorted!
mounts = [make_abs_path(path) for path in relative_paths]
mounts.sort(reverse=True)
@@ -215,38 +220,37 @@ class Chroot(AbstractChroot):
self,
script: Union[str, list[str]],
inner_env: dict[str, str] = {},
outer_env: dict[str, str] = os.environ.copy() | {'QEMU_LD_PREFIX': '/usr/aarch64-linux-gnu'},
outer_env: dict[str, str] = {},
attach_tty: bool = False,
capture_output: bool = False,
cwd: Optional[str] = None,
fail_inactive: bool = True,
stdout: Optional[int] = None,
stdout: Optional[FileDescriptor] = None,
stderr: Optional[FileDescriptor] = None,
switch_user: Optional[str] = None,
) -> Union[int, subprocess.CompletedProcess]:
if not self.active and fail_inactive:
raise Exception(f'Chroot {self.name} is inactive, not running command! Hint: pass `fail_inactive=False`')
if outer_env is None:
outer_env = os.environ.copy()
env_cmd = ['/usr/bin/env'] + [f'{shell_quote(key)}={shell_quote(value)}' for key, value in inner_env.items()]
kwargs: dict = {
'env': outer_env,
}
if not attach_tty:
kwargs |= {'stdout': stdout} if stdout else {'capture_output': capture_output}
outer_env = {}
native = config.runtime.arch
assert native
if self.arch != native and 'QEMU_LD_PREFIX' not in outer_env:
outer_env = dict(outer_env) # copy dict for modification
outer_env |= {'QEMU_LD_PREFIX': f'/usr/{GCC_HOSTSPECS[native][self.arch]}'}
env_cmd = generate_env_cmd(inner_env) if inner_env else []
if not isinstance(script, str) and isinstance(script, list):
script = ' '.join(script)
script = flatten_shell_script(script, shell_quote_items=False, wrap_in_shell_quote=False)
if cwd:
script = f"cd {shell_quote(cwd)} && ( {script} )"
cmd = ['chroot', self.path] + env_cmd + [
'/bin/bash',
'-c',
script,
]
logging.debug(f'{self.name}: Running cmd: "{cmd}"')
if attach_tty:
return subprocess.call(cmd, **kwargs)
if switch_user:
inner_cmd = generate_cmd_su(script, switch_user=switch_user, elevation_method='none', force_su=True)
else:
return subprocess.run(cmd, **kwargs)
inner_cmd = wrap_in_bash(script, flatten_result=False)
cmd = flatten_shell_script(['chroot', self.path] + env_cmd + inner_cmd, shell_quote_items=True)
return run_root_cmd(cmd, env=outer_env, attach_tty=attach_tty, capture_output=capture_output, stdout=stdout, stderr=stderr)
def mount_pkgbuilds(self, fail_if_mounted: bool = False) -> str:
return self.mount(
@@ -256,12 +260,12 @@ class Chroot(AbstractChroot):
)
def mount_pacman_cache(self, fail_if_mounted: bool = False) -> str:
arch_cache = os.path.join(config.get_path('pacman'), self.arch)
rel_target = os.path.join(CHROOT_PATHS['pacman'].lstrip('/'), self.arch)
for dir in [arch_cache, self.get_path(rel_target)]:
os.makedirs(dir, exist_ok=True)
shared_cache = os.path.join(config.get_path('pacman'), self.arch)
rel_target = 'var/cache/pacman/pkg'
makedir(shared_cache)
root_makedir(self.get_path(rel_target))
return self.mount(
arch_cache,
shared_cache,
rel_target,
fail_if_mounted=fail_if_mounted,
)
@@ -273,6 +277,13 @@ class Chroot(AbstractChroot):
fail_if_mounted=fail_if_mounted,
)
def mount_chroots(self, fail_if_mounted: bool = False) -> str:
return self.mount(
absolute_source=config.get_path('chroots'),
relative_destination=CHROOT_PATHS['chroots'].lstrip('/'),
fail_if_mounted=fail_if_mounted,
)
def write_makepkg_conf(self, target_arch: Arch, cross_chroot_relative: Optional[str], cross: bool = True) -> str:
"""
Generate a `makepkg.conf` or `makepkg_cross_$arch.conf` file in /etc.
@@ -283,61 +294,103 @@ class Chroot(AbstractChroot):
filename = 'makepkg' + (f'_cross_{target_arch}' if cross else '') + '.conf'
makepkg_conf_path_relative = os.path.join('etc', filename)
makepkg_conf_path = os.path.join(self.path, makepkg_conf_path_relative)
with open(makepkg_conf_path, 'w') as f:
f.write(makepkg_cross_conf)
root_makedir(self.get_path('/etc'))
root_write_file(makepkg_conf_path, makepkg_cross_conf)
return makepkg_conf_path_relative
def write_pacman_conf(self, check_space: Optional[bool] = None):
def write_pacman_conf(self, check_space: Optional[bool] = None, in_chroot: bool = True, absolute_path: Optional[str] = None):
user = None
group = None
if check_space is None:
check_space = config.file['pacman']['check_space']
os.makedirs(self.get_path('/etc'), exist_ok=True)
conf_text = get_base_distro(self.arch).get_pacman_conf(self.extra_repos, check_space=check_space)
with open(self.get_path('etc/pacman.conf'), 'w') as file:
file.write(conf_text)
check_space = config.file.pacman.check_space
if not absolute_path:
path = self.get_path('/etc')
root_makedir(path)
absolute_path = os.path.join(path, 'pacman.conf')
user = 'root'
group = 'root'
repos = deepcopy(self.extra_repos)
if not in_chroot:
for repo in repos.values():
repo.url_template = repo.url_template.replace(
f'file://{CHROOT_PATHS["packages"]}',
f'file://{config.get_path("packages")}',
1,
)
conf_text = get_base_distro(self.arch).get_pacman_conf(repos, check_space=check_space, in_chroot=in_chroot)
write_file(absolute_path, conf_text, user=user, group=group)
def create_user(
self,
user='kupfer',
password='123456',
groups=['network', 'video', 'audio', 'optical', 'storage', 'input', 'scanner', 'games', 'lp', 'rfkill', 'wheel'],
user: str = 'kupfer',
password: Optional[str] = None,
groups: list[str] = ['network', 'video', 'audio', 'optical', 'storage', 'input', 'scanner', 'games', 'lp', 'rfkill', 'wheel'],
primary_group: Optional[str] = 'users',
uid: Optional[int] = None,
non_unique: bool = False,
):
user = user or 'kupfer'
uid_param = f'-u {uid}' if uid is not None else ''
unique_param = '--non-unique' if non_unique else ''
pgroup_param = f'-g {primary_group}' if primary_group else ''
install_script = f'''
set -e
if ! id -u "{user}" >/dev/null 2>&1; then
useradd -m {user}
useradd -m {unique_param} {uid_param} {pgroup_param} {user}
fi
usermod -a -G {",".join(groups)} {user}
chown {user}:{user} /home/{user} -R
usermod -a -G {",".join(groups)} {unique_param} {uid_param} {pgroup_param} {user}
chown {user}:{primary_group if primary_group else user} /home/{user} -R
'''
if password:
install_script += f'echo "{user}:{password}" | chpasswd'
else:
install_script += f'echo "Set user password:" && passwd {user}'
result = self.run_cmd(install_script)
assert isinstance(result, subprocess.CompletedProcess)
if result.returncode != 0:
raise Exception('Failed to setup user')
raise Exception(f'Failed to setup user {user} in self.name')
def get_uid(self, user: Union[str, int]) -> int:
if isinstance(user, int):
return user
if user == 'root':
return 0
res = self.run_cmd(['id', '-u', user], capture_output=True)
assert isinstance(res, subprocess.CompletedProcess)
if res.returncode or not res.stdout:
raise Exception(f"chroot {self.name}: Couldnt detect uid for user {user}: {repr(res.stdout)}")
uid = res.stdout.decode()
return int(uid)
def add_sudo_config(self, config_name: str = 'wheel', privilegee: str = '%wheel', password_required: bool = True):
if '.' in config_name:
raise Exception(f"won't create sudoers.d file {config_name} since it will be ignored by sudo because it contains a dot!")
comment = ('# allow ' + (f'members of group {privilegee.strip("%")}' if privilegee.startswith('%') else f'user {privilegee}') +
'to run any program as root' + ('' if password_required else ' without a password'))
line = privilegee + (' ALL=(ALL:ALL) ALL' if password_required else ' ALL=(ALL) NOPASSWD: ALL')
root_write_file(self.get_path(f'/etc/sudoers.d/{config_name}'), f'{comment}\n{line}')
def try_install_packages(
self,
packages: list[str],
refresh: bool = False,
allow_fail: bool = True,
redirect_stderr: bool = True,
) -> dict[str, Union[int, subprocess.CompletedProcess]]:
"""Try installing packages, fall back to installing one by one"""
results = {}
stderr = sys.stdout if redirect_stderr else sys.stderr
if refresh:
results['refresh'] = self.run_cmd('pacman -Syy --noconfirm')
results['refresh'] = self.run_cmd('pacman -Syy --noconfirm', stderr=stderr)
cmd = "pacman -S --noconfirm --needed --overwrite='/*'"
result = self.run_cmd(f'{cmd} -y {" ".join(packages)}')
result = self.run_cmd(f'{cmd} -y {" ".join(packages)}', stderr=stderr)
assert isinstance(result, subprocess.CompletedProcess)
results |= {package: result for package in packages}
if result.returncode != 0 and allow_fail:
results = {}
logging.debug('Falling back to serial installation')
for pkg in set(packages):
# Don't check for errors here because there might be packages that are listed as dependencies but are not available on x86_64
results[pkg] = self.run_cmd(f'{cmd} {pkg}')
results[pkg] = self.run_cmd(f'{cmd} {pkg}', stderr=stderr)
return results
@@ -346,26 +399,29 @@ chroots: dict[str, Chroot] = {}
def get_chroot(
name: str,
chroot_class: type[Chroot],
chroot_args: dict,
initialize: bool = False,
activate: bool = False,
fail_if_exists: bool = False,
extra_repos: Optional[Mapping[str, RepoInfo]] = None,
default: Chroot = None,
) -> Chroot:
global chroots
if default and name not in chroots:
logging.debug(f'Adding chroot {name} to chroot map: {default.uuid}')
chroots[name] = default
if name not in chroots:
chroot = chroot_class(name, **chroot_args)
logging.debug(f'Adding chroot {name} to chroot map: {chroot.uuid}')
chroots[name] = chroot
else:
existing = chroots[name]
if fail_if_exists:
raise Exception(f'chroot {name} already exists: {existing.uuid}')
logging.debug(f"returning existing chroot {name}: {existing.uuid}")
assert isinstance(existing, chroot_class)
chroot = chroots[name]
if extra_repos is not None:
chroot.extra_repos = dict(extra_repos) # copy to new dict
if initialize:
chroot.initialize()
if activate:
chroot.activate(fail_if_active=False)
chroot.activate()
return chroot

View File

@@ -1,11 +1,15 @@
import logging
import os
import subprocess
import sys
from glob import glob
from shutil import rmtree
from typing import ClassVar
from constants import Arch
from kupferbootstrap.constants import Arch
from kupferbootstrap.exec.cmd import run_root_cmd
from kupferbootstrap.exec.file import makedir, root_makedir
from kupferbootstrap.config.state import config
from .abstract import Chroot, get_chroot
from .helpers import base_chroot_name
@@ -13,31 +17,35 @@ from .helpers import base_chroot_name
class BaseChroot(Chroot):
copy_base: bool = False
_copy_base: ClassVar[bool] = False
def create_rootfs(self, reset, pacman_conf_target, active_previously):
if reset:
logging.info(f'Resetting {self.name}')
for dir in glob(os.path.join(self.path, '*')):
rmtree(dir)
makedir(config.get_path('chroots'))
root_makedir(self.get_path())
self.write_pacman_conf()
self.mount_pacman_cache()
logging.info(f'Pacstrapping chroot {self.name}: {", ".join(self.base_packages)}')
result = subprocess.run([
'pacstrap',
'-C',
pacman_conf_target,
'-c',
'-G',
self.path,
] + self.base_packages + [
'--needed',
'--overwrite=*',
'-yyuu',
])
result = run_root_cmd(
[
'pacstrap',
'-C',
pacman_conf_target,
'-G',
self.path,
*self.base_packages,
'--needed',
'--overwrite=*',
'-yyuu',
],
stderr=sys.stdout,
)
if result.returncode != 0:
raise Exception(f'Failed to initialize chroot "{self.name}"')
self.initialized = True
@@ -45,7 +53,7 @@ class BaseChroot(Chroot):
def get_base_chroot(arch: Arch) -> BaseChroot:
name = base_chroot_name(arch)
default = BaseChroot(name, arch, copy_base=False, initialize=False)
chroot = get_chroot(name, initialize=False, default=default)
args = dict(arch=arch, copy_base=False)
chroot = get_chroot(name, initialize=False, chroot_class=BaseChroot, chroot_args=args)
assert isinstance(chroot, BaseChroot)
return chroot

View File

@@ -2,11 +2,13 @@ import logging
import os
import subprocess
from glob import glob
from typing import Optional
from typing import ClassVar, Optional
from config import config
from constants import Arch, GCC_HOSTSPECS, CROSSDIRECT_PKGS, CHROOT_PATHS
from distro.distro import get_kupfer_local
from kupferbootstrap.config.state import config
from kupferbootstrap.constants import Arch, GCC_HOSTSPECS, CROSSDIRECT_PKGS, CHROOT_PATHS
from kupferbootstrap.distro.distro import get_kupfer_local
from kupferbootstrap.exec.cmd import run_root_cmd
from kupferbootstrap.exec.file import makedir, remove_file, root_makedir, root_write_file, symlink
from .abstract import Chroot, get_chroot
from .helpers import build_chroot_name
@@ -15,9 +17,11 @@ from .base import get_base_chroot
class BuildChroot(Chroot):
copy_base: bool = True
_copy_base: ClassVar[bool] = True
def create_rootfs(self, reset: bool, pacman_conf_target: str, active_previously: bool):
makedir(config.get_path('chroots'))
root_makedir(self.get_path())
if reset or not os.path.exists(self.get_path('usr/bin')):
base_chroot = get_base_chroot(self.arch)
if base_chroot == self:
@@ -29,7 +33,7 @@ class BuildChroot(Chroot):
cmd += ['--exclude', mountpoint.rstrip('/')]
cmd += [f'{base_chroot.path}/', f'{self.path}/']
logging.debug(f"running rsync: {cmd}")
result = subprocess.run(cmd)
result = run_root_cmd(cmd)
if result.returncode != 0:
raise Exception(f'Failed to copy {base_chroot.name} to {self.name}')
@@ -50,8 +54,7 @@ class BuildChroot(Chroot):
with open(self.get_path('/usr/bin/makepkg'), 'r') as file:
data = file.read()
data = data.replace('EUID == 0', 'EUID == -1')
with open(self.get_path('/usr/bin/makepkg'), 'w') as file:
file.write(data)
root_write_file(self.get_path('/usr/bin/makepkg'), data)
# configure makepkg
self.write_makepkg_conf(self.arch, cross_chroot_relative=None, cross=False)
@@ -66,7 +69,8 @@ class BuildChroot(Chroot):
"""
target_arch = self.arch
if not native_chroot:
native_chroot = get_build_chroot(config.runtime['arch'])
assert config.runtime.arch
native_chroot = get_build_chroot(config.runtime.arch)
host_arch = native_chroot.arch
hostspec = GCC_HOSTSPECS[host_arch][target_arch]
cc = f'{hostspec}-cc'
@@ -78,6 +82,7 @@ class BuildChroot(Chroot):
native_chroot.mount_pacman_cache()
native_chroot.mount_packages()
native_chroot.activate()
logging.debug(f"Installing {CROSSDIRECT_PKGS=} + {gcc=}")
results = dict(native_chroot.try_install_packages(
CROSSDIRECT_PKGS + [gcc],
refresh=True,
@@ -99,13 +104,13 @@ class BuildChroot(Chroot):
target_include_dir = os.path.join(self.path, 'include')
for target, source in {cc_path: gcc, target_lib_dir: 'lib', target_include_dir: 'usr/include'}.items():
if not os.path.exists(target):
logging.debug(f'Symlinking {source} at {target}')
os.symlink(source, target)
if not (os.path.exists(target) or os.path.islink(target)):
logging.debug(f'Symlinking {source=} at {target=}')
symlink(source, target)
ld_so = os.path.basename(glob(f"{os.path.join(native_chroot.path, 'usr', 'lib', 'ld-linux-')}*")[0])
ld_so_target = os.path.join(target_lib_dir, ld_so)
if not os.path.islink(ld_so_target):
os.symlink(os.path.join('/native', 'usr', 'lib', ld_so), ld_so_target)
symlink(os.path.join('/native', 'usr', 'lib', ld_so), ld_so_target)
else:
logging.debug(f'ld-linux.so symlink already exists, skipping for {self.name}')
@@ -113,9 +118,9 @@ class BuildChroot(Chroot):
rustc = os.path.join(native_chroot.path, 'usr/lib/crossdirect', target_arch, 'rustc')
if os.path.exists(rustc):
logging.debug('Disabling crossdirect rustc')
os.unlink(rustc)
remove_file(rustc)
os.makedirs(native_mount, exist_ok=True)
root_makedir(native_mount)
logging.debug(f'Mounting {native_chroot.name} to {native_mount}')
self.mount(native_chroot.path, 'native', fail_if_mounted=fail_if_mounted)
return native_mount
@@ -128,13 +133,39 @@ class BuildChroot(Chroot):
fail_if_mounted=fail_if_mounted,
)
def mount_ccache(self, user: str = 'kupfer', fail_if_mounted: bool = False):
mount_source = os.path.join(config.get_path('ccache'), self.arch)
mount_dest = os.path.join(f'/home/{user}' if user != 'root' else '/root', '.ccache')
uid = self.get_uid(user)
makedir(mount_source, user=uid)
return self.mount(
absolute_source=mount_source,
relative_destination=mount_dest,
fail_if_mounted=fail_if_mounted,
)
def mount_rust(self, user: str = 'kupfer', fail_if_mounted: bool = False) -> list[str]:
results = []
uid = self.get_uid(user)
mount_source_base = config.get_path('rust') # apparently arch-agnostic
for rust_dir in ['cargo', 'rustup']:
mount_source = os.path.join(mount_source_base, rust_dir)
mount_dest = os.path.join(f'/home/{user}' if user != 'root' else '/root', f'.{rust_dir}')
makedir(mount_source, user=uid)
results.append(self.mount(
absolute_source=mount_source,
relative_destination=mount_dest,
fail_if_mounted=fail_if_mounted,
))
return results
def get_build_chroot(arch: Arch, add_kupfer_repos: bool = True, **kwargs) -> BuildChroot:
name = build_chroot_name(arch)
if 'extra_repos' in kwargs:
raise Exception('extra_repos!')
repos = get_kupfer_local(arch).repos if add_kupfer_repos else {}
default = BuildChroot(name, arch, initialize=False, copy_base=True, extra_repos=repos)
chroot = get_chroot(name, **kwargs, extra_repos=repos, default=default)
args = dict(arch=arch)
chroot = get_chroot(name, **kwargs, extra_repos=repos, chroot_class=BuildChroot, chroot_args=args)
assert isinstance(chroot, BuildChroot)
return chroot

View File

@@ -0,0 +1,67 @@
import click
import logging
import os
from typing import Optional
from kupferbootstrap.config.state import config
from kupferbootstrap.wrapper import enforce_wrap
from kupferbootstrap.devices.device import get_profile_device
from .abstract import Chroot
from .base import get_base_chroot
from .build import get_build_chroot, BuildChroot
CHROOT_TYPES = ['base', 'build', 'rootfs']
@click.command('chroot')
@click.argument('type', required=False, type=click.Choice(CHROOT_TYPES), default='build')
@click.argument(
'name',
required=False,
default=None,
)
@click.pass_context
def cmd_chroot(ctx: click.Context, type: str = 'build', name: Optional[str] = None, enable_crossdirect=True):
"""Open a shell in a chroot. For rootfs NAME is a profile name, for others the architecture (e.g. aarch64)."""
if type not in CHROOT_TYPES:
raise Exception(f'Unknown chroot type: "{type}"')
if type == 'rootfs':
from ..image.image import cmd_inspect
assert isinstance(cmd_inspect, click.Command)
ctx.invoke(cmd_inspect, profile=name, shell=True)
return
enforce_wrap()
chroot: Chroot
arch = name
if not arch:
arch = get_profile_device().arch
assert arch
if type == 'base':
chroot = get_base_chroot(arch)
if not os.path.exists(chroot.get_path('/bin')):
chroot.initialize()
chroot.initialized = True
elif type == 'build':
build_chroot: BuildChroot = get_build_chroot(arch, activate=True)
chroot = build_chroot # type safety
if not os.path.exists(build_chroot.get_path('/bin')):
build_chroot.initialize()
build_chroot.initialized = True
build_chroot.mount_pkgbuilds()
build_chroot.mount_chroots()
assert arch and config.runtime.arch
if config.file.build.crossdirect and enable_crossdirect and arch != config.runtime.arch:
build_chroot.mount_crossdirect()
else:
raise Exception('Really weird bug')
chroot.mount_packages()
chroot.activate()
logging.debug(f'Starting shell in {chroot.name}:')
chroot.run_cmd('bash', attach_tty=True)

View File

@@ -1,10 +1,14 @@
import atexit
import os
from constants import Arch, BASE_PACKAGES
from distro.distro import get_kupfer_local, get_kupfer_https
from utils import check_findmnt
from typing import Optional
from typing import ClassVar, Optional, cast
from kupferbootstrap.config.state import config
from kupferbootstrap.constants import Arch, BASE_PACKAGES
from kupferbootstrap.distro.repo import RepoInfo
from kupferbootstrap.distro.distro import get_kupfer_local, get_kupfer_https
from kupferbootstrap.exec.file import get_temp_dir, makedir, root_makedir
from kupferbootstrap.utils import check_findmnt
from .base import BaseChroot
from .build import BuildChroot
@@ -13,14 +17,19 @@ from .abstract import get_chroot
class DeviceChroot(BuildChroot):
copy_base: bool = False
_copy_base: ClassVar[bool] = False
def create_rootfs(self, reset, pacman_conf_target, active_previously):
clss = BuildChroot if self.copy_base else BaseChroot
makedir(config.get_path('chroots'))
root_makedir(self.get_path())
if not self.copy_base:
pacman_conf_target = os.path.join(get_temp_dir(register_cleanup=True), f'pacman-{self.name}.conf')
self.write_pacman_conf(in_chroot=False, absolute_path=pacman_conf_target)
BaseChroot.create_rootfs(cast(BaseChroot, self), reset, pacman_conf_target, active_previously)
else:
BuildChroot.create_rootfs(self, reset, pacman_conf_target, active_previously)
clss.create_rootfs(self, reset, pacman_conf_target, active_previously)
def mount_rootfs(self, source_path: str, fs_type: str = None, options: list[str] = [], allow_overlay: bool = False):
def mount_rootfs(self, source_path: str, fs_type: Optional[str] = None, options: list[str] = [], allow_overlay: bool = False):
if self.active:
raise Exception(f'{self.name}: Chroot is marked as active, not mounting a rootfs over it.')
if not os.path.exists(source_path):
@@ -36,7 +45,7 @@ class DeviceChroot(BuildChroot):
raise Exception(f'{self.name}: There is already something mounted at {self.path}, not mounting over it.')
if os.path.exists(os.path.join(self.path, 'usr/bin')):
raise Exception(f'{self.name}: {self.path}/usr/bin exists, not mounting over existing rootfs.')
os.makedirs(self.path, exist_ok=True)
makedir(self.path)
atexit.register(self.deactivate)
self.mount(source_path, '/', fs_type=fs_type, options=options)
@@ -47,14 +56,15 @@ def get_device_chroot(
arch: Arch,
packages: list[str] = BASE_PACKAGES,
use_local_repos: bool = True,
extra_repos: Optional[dict] = None,
extra_repos: Optional[dict[str, RepoInfo]] = None,
**kwargs,
) -> DeviceChroot:
name = f'rootfs_{device}-{flavour}'
repos = dict(get_kupfer_local(arch).repos if use_local_repos else get_kupfer_https(arch).repos)
repos: dict[str, RepoInfo] = get_kupfer_local(arch).repos if use_local_repos else get_kupfer_https(arch).repos # type: ignore
repos.update(extra_repos or {})
default = DeviceChroot(name, arch, initialize=False, copy_base=False, base_packages=packages, extra_repos=repos)
chroot = get_chroot(name, **kwargs, extra_repos=repos, default=default)
args = dict(arch=arch, base_packages=packages, extra_repos=repos)
chroot = get_chroot(name, **kwargs, extra_repos=repos, chroot_class=DeviceChroot, chroot_args=args)
assert isinstance(chroot, DeviceChroot)
return chroot

View File

@@ -1,8 +1,8 @@
import os
from typing import Optional, TypedDict
from config import config
from constants import Arch
from kupferbootstrap.config.state import config
from kupferbootstrap.constants import Arch
BIND_BUILD_DIRS = 'BINDBUILDDIRS'
BASE_CHROOT_PREFIX = 'base_'
@@ -61,7 +61,7 @@ def make_abs_path(path: str) -> str:
return '/' + path.lstrip('/')
def get_chroot_path(chroot_name, override_basepath: str = None) -> str:
def get_chroot_path(chroot_name, override_basepath: Optional[str] = None) -> str:
base_path = config.get_path('chroots') if not override_basepath else override_basepath
return os.path.join(base_path, chroot_name)

View File

View File

@@ -0,0 +1,387 @@
import click
import logging
from copy import deepcopy
from typing import Any, Callable, Iterable, Mapping, Optional, Union
from kupferbootstrap.devices.device import get_devices, sanitize_device_name
from kupferbootstrap.flavours.flavour import get_flavours
from kupferbootstrap.utils import color_bold, colors_supported, color_mark_selected
from kupferbootstrap.wrapper import execute_without_exit
from .scheme import Profile
from .profile import PROFILE_EMPTY, PROFILE_DEFAULTS, resolve_profile_attr, SparseProfile
from .state import config, CONFIG_DEFAULTS, CONFIG_SECTIONS, merge_configs
def list_to_comma_str(str_list: list[str], default='') -> str:
if str_list is None:
return default
return ','.join(str_list)
def comma_str_to_list(s: str, default=None) -> list[str]:
if not s:
return default
return [a for a in s.split(',') if a]
def prompt_config(
text: str,
default: Any,
field_type: Union[type, click.Choice] = str,
bold: bool = True,
echo_changes: bool = True,
show_choices: bool = False,
) -> tuple[Any, bool]:
"""
prompts for a new value for a config key. returns the result and a boolean that indicates
whether the result is different, considering empty strings and None equal to each other.
"""
original_default = default
def true_or_zero(to_check) -> bool:
"""returns true if the value is truthy or int(0)"""
zero = 0 # compiler complains about 'is with literal' otherwise
return to_check or to_check is zero # can't do == due to boolean<->int casting
if type(None) == field_type:
field_type = str
if field_type == dict:
raise Exception('Dictionaries not supported by config_prompt, this is likely a bug in kupferbootstrap')
elif field_type == list:
default = list_to_comma_str(default)
value_conv = comma_str_to_list
else:
value_conv = None
default = '' if default is None else default
if bold:
text = click.style(text, bold=True)
result = click.prompt(
text,
type=field_type, # type: ignore
default=default,
value_proc=value_conv,
show_default=True,
show_choices=show_choices,
) # type: ignore
changed = result != (original_default if field_type == list else default) and (true_or_zero(default) or true_or_zero(result))
if changed and echo_changes:
print(f'value changed: "{text}" = "{result}"')
return result, changed
def prompt_profile(
name: str,
create: bool = True,
defaults: Union[Profile, dict] = {},
no_parse: bool = True,
) -> tuple[Profile, bool]:
"""Prompts the user for every field in `defaults`. Set values to None for an empty profile."""
PARSEABLE_FIELDS = ['device', 'flavour']
profile: Any = PROFILE_EMPTY | defaults
if name == 'current':
raise Exception("profile name 'current' not allowed")
# don't use get_profile() here because we need the sparse profile
if name in config.file.profiles:
logging.debug(f"Merging with existing profile config for {name}")
profile |= config.file.profiles[name]
elif create:
logging.info(f"Profile {name} doesn't exist yet, creating new profile.")
else:
raise Exception(f'Unknown profile "{name}"')
logging.info(f'Configuring profile "{name}"')
changed = False
for key, current in profile.items():
current = profile[key]
text = f'profiles.{name}.{key}'
if not no_parse and key in PARSEABLE_FIELDS:
parse_prompt = None
sanitize_func = None
if key == 'device':
parse_prompt = prompt_profile_device
sanitize_func = sanitize_device_name
elif key == 'flavour':
parse_prompt = prompt_profile_flavour
else:
raise Exception(f'config: Unhandled parseable field {key}, this is a bug in kupferbootstrap.')
result, _changed = parse_prompt(
current=current,
profile_name=name,
sparse_profiles=config.file.profiles,
use_colors=config.runtime.colors,
sanitize_func=sanitize_func,
) # type: ignore
else:
result, _changed = prompt_config(text=text, default=current, field_type=type(PROFILE_DEFAULTS[key])) # type: ignore
if _changed:
profile[key] = result
changed = True
return profile, changed
def prompt_choice(current: Optional[Any], key: str, choices: Iterable[Any], allow_none: bool = True, show_choices: bool = False) -> tuple[Any, bool]:
choices = list(choices) + ([''] if allow_none else [])
res, _ = prompt_config(text=key, default=current, field_type=click.Choice(choices), show_choices=show_choices)
if allow_none and res == '':
res = None
return res, res != current
def resolve_profile_field(current: Any, *kargs):
try:
return resolve_profile_attr(*kargs)
except KeyError as err:
logging.debug(err)
return current, None
def prompt_wrappable(
attr_name: str,
native_cmd: Callable,
cli_cmd: list[str],
current: Optional[str],
profile_name: str,
sparse_profiles: Mapping[str, SparseProfile],
sanitize_func: Optional[Callable[[str], str]] = None,
use_colors: Optional[bool] = None,
) -> tuple[str, bool]:
use_colors = colors_supported(use_colors)
print(color_bold(f"Pick your {attr_name}!\nThese are the available choices:", use_colors=use_colors))
items = execute_without_exit(native_cmd, cli_cmd)
if items is None:
logging.warning("(wrapper mode, input for this field will not be checked for correctness)")
return prompt_config(text=f'profiles.{profile_name}.{attr_name}', default=current)
selected, inherited_from = resolve_profile_field(current, profile_name, attr_name, sparse_profiles)
if selected and sanitize_func:
selected = sanitize_func(selected)
for key in sorted(items.keys()):
text = items[key].nice_str(newlines=True, colors=use_colors)
if key == selected:
text = color_mark_selected(text, profile_name, inherited_from)
print(text + '\n')
return prompt_choice(current, f'profiles.{profile_name}.{attr_name}', items.keys())
def prompt_profile_device(*kargs, **kwargs) -> tuple[str, bool]:
return prompt_wrappable('device', get_devices, ['devices'], *kargs, **kwargs)
def prompt_profile_flavour(*kargs, **kwargs) -> tuple[str, bool]:
return prompt_wrappable('flavour', get_flavours, ['flavours'], *kargs, **kwargs)
def config_dot_name_get(name: str, config: dict[str, Any], prefix: str = '') -> Any:
if not isinstance(config, dict):
raise Exception(f"Couldn't resolve config name: passed config is not a dict: {repr(config)}")
split_name = name.split('.')
name = split_name[0]
if name not in config:
raise Exception(f"Couldn't resolve config name: key {prefix + name} not found")
value = config[name]
if len(split_name) == 1:
return value
else:
rest_name = '.'.join(split_name[1:])
return config_dot_name_get(name=rest_name, config=value, prefix=prefix + name + '.')
def config_dot_name_set(name: str, value: Any, config: dict[str, Any]):
split_name = name.split('.')
if len(split_name) > 1:
config = config_dot_name_get('.'.join(split_name[:-1]), config)
config[split_name[-1]] = value
def prompt_for_save(retry_ctx: Optional[click.Context] = None):
"""
Prompt whether to save the config file. If no is answered, `False` is returned.
If `retry_ctx` is passed, the context's command will be reexecuted with the same arguments if the user chooses to retry.
False will still be returned as the retry is expected to either save, perform another retry or arbort.
"""
from ..wrapper import is_wrapped
if click.confirm(f'Do you want to save your changes to {config.runtime.config_file}?', default=True):
if is_wrapped():
logging.warning("Writing to config file inside wrapper."
"This is pointless and probably a bug."
"Your host config file will not be modified.")
return True
if retry_ctx:
if click.confirm('Retry? ("n" to quit without saving)', default=True):
retry_ctx.forward(retry_ctx.command)
return False
config_option = click.option(
'-C',
'--config',
'config_file',
help='Override path to config file',
)
@click.group(name='config')
def cmd_config():
"""Manage the configuration and -profiles"""
noninteractive_flag = click.option('-N', '--non-interactive', is_flag=True)
noop_flag = click.option('--noop', '-n', help="Don't write changes to file", is_flag=True)
noparse_flag = click.option('--no-parse', help="Don't search PKGBUILDs for devices and flavours", is_flag=True)
CONFIG_MSG = ("Leave fields empty to leave them at their currently displayed value.")
@cmd_config.command(name='init')
@noninteractive_flag
@noop_flag
@noparse_flag
@click.option(
'--sections',
'-s',
multiple=True,
type=click.Choice(CONFIG_SECTIONS),
default=CONFIG_SECTIONS,
show_choices=True,
)
@click.pass_context
def cmd_config_init(
ctx,
sections: list[str] = CONFIG_SECTIONS,
non_interactive: bool = False,
noop: bool = False,
no_parse: bool = False,
):
"""Initialize the config file"""
if not non_interactive:
logging.info(CONFIG_MSG)
results: dict[str, dict] = {}
for section in sections:
if section not in CONFIG_SECTIONS:
raise Exception(f'Unknown section: {section}')
if section == 'profiles':
continue
results[section] = {}
for key, current in config.file[section].items():
text = f'{section}.{key}'
result, changed = prompt_config(text=text, default=current, field_type=type(CONFIG_DEFAULTS[section][key]))
if changed:
results[section][key] = result
config.update(results)
print("Main configuration complete")
if not noop:
if prompt_for_save(ctx):
config.write()
else:
return
if 'profiles' in sections:
print("Configuring profiles")
current_profile = 'default' if 'current' not in config.file.profiles else config.file.profiles.current
new_current, _ = prompt_config('profiles.current', default=current_profile, field_type=str)
profile, changed = prompt_profile(new_current, create=True, no_parse=no_parse)
config.update_profile(new_current, profile)
if not noop:
if not prompt_for_save(ctx):
return
if not noop:
config.write()
else:
logging.info(f'--noop passed, not writing to {config.runtime.config_file}!')
@cmd_config.command(name='set')
@noninteractive_flag
@noop_flag
@noparse_flag
@click.argument('key_vals', nargs=-1)
@click.pass_context
def cmd_config_set(ctx, key_vals: list[str], non_interactive: bool = False, noop: bool = False, no_parse: bool = False):
"""
Set config entries. Pass entries as `key=value` pairs, with keys as dot-separated identifiers,
like `build.clean_mode=false` or alternatively just keys to get prompted if run interactively.
"""
config.enforce_config_loaded()
logging.info(CONFIG_MSG)
config_copy = deepcopy(config.file)
for pair in key_vals:
split_pair = pair.split('=')
if len(split_pair) == 2:
key: str = split_pair[0]
value: Any = split_pair[1]
value_type = type(config_dot_name_get(key, CONFIG_DEFAULTS))
if value_type != list:
value = click.types.convert_type(value_type)(value)
else:
value = comma_str_to_list(value, default=[])
elif len(split_pair) == 1 and not non_interactive:
key = split_pair[0]
value_type = type(config_dot_name_get(key, CONFIG_DEFAULTS))
current = config_dot_name_get(key, config.file)
value, _ = prompt_config(text=key, default=current, field_type=value_type, echo_changes=False)
else:
raise Exception(f'Invalid key=value pair "{pair}"')
print('%s = %s' % (key, value))
config_dot_name_set(key, value, config_copy)
if merge_configs(config_copy, warn_missing_defaultprofile=False) != config_copy:
raise Exception('Config "{key}" = "{value}" failed to evaluate')
if not noop:
if not non_interactive and not prompt_for_save(ctx):
return
config.update(config_copy)
config.write()
@cmd_config.command(name='get')
@click.argument('keys', nargs=-1)
def cmd_config_get(keys: list[str]):
"""Get config entries.
Get entries for keys passed as dot-separated identifiers, like `build.clean_mode`"""
if len(keys) == 1:
print(config_dot_name_get(keys[0], config.file))
return
for key in keys:
print('%s = %s' % (key, config_dot_name_get(key, config.file)))
@cmd_config.group(name='profile')
def cmd_profile():
"""Manage config profiles"""
@cmd_profile.command(name='init')
@noninteractive_flag
@noop_flag
@noparse_flag
@click.argument('name', required=False)
@click.pass_context
def cmd_profile_init(ctx, name: Optional[str] = None, non_interactive: bool = False, noop: bool = False, no_parse: bool = False):
"""Create or edit a profile"""
profile = deepcopy(PROFILE_EMPTY)
if name == 'current':
raise Exception("profile name 'current' not allowed")
logging.info(CONFIG_MSG)
name = name or config.file.profiles.current
if name in config.file.profiles:
profile |= config.file.profiles[name]
if not non_interactive:
profile, _changed = prompt_profile(name, create=True, no_parse=no_parse)
config.update_profile(name, profile)
if not noop:
if not prompt_for_save(ctx):
logging.info("Not saving.")
return
config.write()
else:
logging.info(f'--noop passed, not writing to {config.runtime.config_file}!')

View File

@@ -0,0 +1,128 @@
import logging
from copy import deepcopy
from typing import Optional
from .scheme import Profile, SparseProfile
PROFILE_DEFAULTS_DICT = {
'parent': '',
'device': '',
'flavour': '',
'pkgs_include': [],
'pkgs_exclude': [],
'hostname': 'kupfer',
'username': 'kupfer',
'password': None,
'size_extra_mb': "0",
}
PROFILE_DEFAULTS = Profile.fromDict(PROFILE_DEFAULTS_DICT)
PROFILE_EMPTY: Profile = {key: None for key in PROFILE_DEFAULTS.keys()} # type: ignore
class ProfileNotFoundException(Exception):
pass
def resolve_profile(
name: str,
sparse_profiles: dict[str, SparseProfile],
resolved: Optional[dict[str, Profile]] = None,
_visited=None,
) -> dict[str, Profile]:
"""
Recursively resolves the specified profile by `name` and its parents to merge the config semantically,
applying include and exclude overrides along the hierarchy.
If `resolved` is passed `None`, a fresh dictionary will be created.
`resolved` will be modified in-place during parsing and also returned.
A sanitized `sparse_profiles` dict is assumed, no checking for unknown keys or incorrect data types is performed.
`_visited` should not be passed by users.
"""
if _visited is None:
_visited = list[str]()
if resolved is None:
resolved = dict[str, Profile]()
if name in _visited:
loop = list(_visited)
raise Exception(f'Dependency loop detected in profiles: {" -> ".join(loop+[loop[0]])}')
if name in resolved:
return resolved
logging.debug(f'Resolving profile {name}')
_visited.append(name)
sparse = sparse_profiles[name].copy()
full = deepcopy(sparse)
if name != 'default' and 'parent' not in sparse:
sparse['parent'] = 'default'
if 'parent' in sparse and (parent_name := sparse['parent']):
parent = resolve_profile(name=parent_name, sparse_profiles=sparse_profiles, resolved=resolved, _visited=_visited)[parent_name]
full = parent | sparse
# add up size_extra_mb
if 'size_extra_mb' in sparse:
size = sparse['size_extra_mb']
if isinstance(size, str) and size.startswith('+'):
full['size_extra_mb'] = int(parent.get('size_extra_mb', 0)) + int(size.lstrip('+'))
else:
full['size_extra_mb'] = int(sparse['size_extra_mb'])
# join our includes with parent's
includes = set(parent.get('pkgs_include', []) + sparse.get('pkgs_include', []))
if 'pkgs_exclude' in sparse:
includes -= set(sparse['pkgs_exclude'])
full['pkgs_include'] = list(includes)
# join our includes with parent's
excludes = set(parent.get('pkgs_exclude', []) + sparse.get('pkgs_exclude', []))
# our includes override parent excludes
if 'pkgs_include' in sparse:
excludes -= set(sparse['pkgs_include'])
full['pkgs_exclude'] = list(excludes)
# now init missing keys
for key, value in PROFILE_DEFAULTS_DICT.items():
if key not in full.keys():
full[key] = value # type: ignore[literal-required]
if type(value) == list:
full[key] = [] # type: ignore[literal-required]
full['size_extra_mb'] = int(full['size_extra_mb'] or 0)
resolved[name] = Profile.fromDict(full)
return resolved
def resolve_profile_attr(
profile_name: str,
attr_name: str,
profiles_sparse: dict[str, SparseProfile],
) -> tuple[str, str]:
"""
This function tries to resolve a profile attribute recursively,
and throws KeyError if the key is not found anywhere in the hierarchy.
Throws a ProfileNotFoundException if the profile is not in profiles_sparse
"""
if profile_name not in profiles_sparse:
raise ProfileNotFoundException(f"Unknown profile {profile_name}")
profile: Profile = profiles_sparse[profile_name]
if attr_name in profile:
return profile[attr_name], profile_name
if 'parent' not in profile:
raise KeyError(f'Profile attribute {attr_name} not found in {profile_name} and no parents')
parent = profile
parent_name = profile_name
seen = []
while True:
if attr_name in parent:
return parent[attr_name], parent_name
seen.append(parent_name)
if not parent.get('parent', None):
raise KeyError(f'Profile attribute {attr_name} not found in inheritance chain, '
f'we went down to {parent_name}.')
parent_name = parent['parent']
if parent_name in seen:
raise RecursionError(f"Profile recursion loop: profile {profile_name} couldn't be resolved"
f"because of a dependency loop:\n{' -> '.join([*seen, parent_name])}")
parent = profiles_sparse[parent_name]

View File

@@ -0,0 +1,152 @@
from __future__ import annotations
from munch import Munch
from typing import Any, Optional, Mapping, Union
from kupferbootstrap.dictscheme import DictScheme
from kupferbootstrap.constants import Arch
class SparseProfile(DictScheme):
parent: Optional[str]
device: Optional[str]
flavour: Optional[str]
pkgs_include: Optional[list[str]]
pkgs_exclude: Optional[list[str]]
hostname: Optional[str]
username: Optional[str]
password: Optional[str]
size_extra_mb: Optional[Union[str, int]]
def __repr__(self):
return f'{type(self)}{dict.__repr__(self.toDict())}'
class Profile(SparseProfile):
parent: Optional[str]
device: str
flavour: str
pkgs_include: list[str]
pkgs_exclude: list[str]
hostname: str
username: str
password: Optional[str]
size_extra_mb: Union[str, int]
class WrapperSection(DictScheme):
type: str # NOTE: rename to 'wrapper_type' if this causes problems
class BuildSection(DictScheme):
ccache: bool
clean_mode: bool
crosscompile: bool
crossdirect: bool
threads: int
class PkgbuildsSection(DictScheme):
git_repo: str
git_branch: str
class PacmanSection(DictScheme):
parallel_downloads: int
check_space: bool
repo_branch: str
class PathsSection(DictScheme):
cache_dir: str
chroots: str
pacman: str
packages: str
pkgbuilds: str
jumpdrive: str
images: str
ccache: str
rust: str
class ProfilesSection(DictScheme):
current: str
default: SparseProfile
@classmethod
def transform(cls, values: Mapping[str, Any], validate: bool = True, allow_extra: bool = True, type_hints: Optional[dict[str, Any]] = None):
results = {}
for k, v in values.items():
if k == 'current':
results[k] = v
continue
if not allow_extra and k != 'default':
raise Exception(f'Unknown key {k} in profiles section (Hint: extra_keys not allowed for some reason)')
if not isinstance(v, dict):
raise Exception(f'profile {v} is not a dict!')
results[k] = SparseProfile.fromDict(v, validate=True)
return results
def update(self, d, validate: bool = True):
Munch.update(self, self.transform(values=d, validate=validate))
def __repr__(self):
return f'{type(self)}{dict.__repr__(self.toDict())}'
class Config(DictScheme):
wrapper: WrapperSection
build: BuildSection
pkgbuilds: PkgbuildsSection
pacman: PacmanSection
paths: PathsSection
profiles: ProfilesSection
@classmethod
def fromDict(
cls,
values: Mapping[str, Any],
validate: bool = True,
allow_extra: bool = False,
allow_incomplete: bool = False,
):
values = dict(values) # copy for later modification
_vals = {}
for name, _class in cls._type_hints.items():
if name not in values:
if not allow_incomplete:
raise Exception(f'Config key "{name}" not in input dictionary')
continue
value = values.pop(name)
if not isinstance(value, _class):
value = _class.fromDict(value, validate=validate)
_vals[name] = value
if values:
if validate:
raise Exception(f'values contained unknown keys: {list(values.keys())}')
_vals |= values
return Config(_vals, validate=validate)
class RuntimeConfiguration(DictScheme):
verbose: bool
no_wrap: bool
error_shell: bool
config_file: Optional[str]
script_source_dir: Optional[str]
arch: Optional[Arch]
uid: Optional[int]
progress_bars: Optional[bool]
colors: Optional[bool]
class ConfigLoadState(DictScheme):
load_finished: bool
exception: Optional[Exception]
def __init__(self, d: dict = {}):
self.load_finished = False
self.exception = None
self.update(d)

View File

@@ -0,0 +1,323 @@
import appdirs
import logging
import os
import toml
from copy import deepcopy
from typing import Mapping, Optional
from kupferbootstrap.constants import DEFAULT_PACKAGE_BRANCH
from .scheme import Config, ConfigLoadState, DictScheme, Profile, RuntimeConfiguration
from .profile import PROFILE_DEFAULTS, PROFILE_DEFAULTS_DICT, resolve_profile
CONFIG_DIR = appdirs.user_config_dir('kupfer')
CACHE_DIR = appdirs.user_cache_dir('kupfer')
CONFIG_DEFAULT_PATH = os.path.join(CONFIG_DIR, 'kupferbootstrap.toml')
CONFIG_DEFAULTS_DICT = {
'wrapper': {
'type': 'docker',
},
'build': {
'ccache': True,
'clean_mode': True,
'crosscompile': True,
'crossdirect': True,
'threads': 0,
},
'pkgbuilds': {
'git_repo': 'https://gitlab.com/kupfer/packages/pkgbuilds.git',
'git_branch': DEFAULT_PACKAGE_BRANCH,
},
'pacman': {
'parallel_downloads': 4,
'check_space': False, # TODO: investigate why True causes issues
'repo_branch': DEFAULT_PACKAGE_BRANCH,
},
'paths': {
'cache_dir': CACHE_DIR,
'chroots': os.path.join('%cache_dir%', 'chroots'),
'pacman': os.path.join('%cache_dir%', 'pacman'),
'packages': os.path.join('%cache_dir%', 'packages'),
'pkgbuilds': os.path.join('%cache_dir%', 'pkgbuilds'),
'jumpdrive': os.path.join('%cache_dir%', 'jumpdrive'),
'images': os.path.join('%cache_dir%', 'images'),
'ccache': os.path.join('%cache_dir%', 'ccache'),
'rust': os.path.join('%cache_dir%', 'rust'),
},
'profiles': {
'current': 'default',
'default': deepcopy(PROFILE_DEFAULTS_DICT),
},
}
CONFIG_DEFAULTS: Config = Config.fromDict(CONFIG_DEFAULTS_DICT)
CONFIG_SECTIONS = list(CONFIG_DEFAULTS.keys())
CONFIG_RUNTIME_DEFAULTS: RuntimeConfiguration = RuntimeConfiguration.fromDict({
'verbose': False,
'no_wrap': False,
'error_shell': False,
'config_file': None,
'script_source_dir': None,
'arch': None,
'uid': None,
'progress_bars': None,
'colors': None,
})
def resolve_path_template(path_template: str, paths: dict[str, str]) -> str:
terminator = '%' # i'll be back
result = path_template
for path_name, path in paths.items():
result = result.replace(terminator + path_name + terminator, path)
return result
def sanitize_config(conf: dict[str, dict], warn_missing_defaultprofile=True) -> dict[str, dict]:
"""checks the input config dict for unknown keys and returns only the known parts"""
return merge_configs(conf_new=conf, conf_base={}, warn_missing_defaultprofile=warn_missing_defaultprofile)
def merge_configs(conf_new: Mapping[str, dict], conf_base={}, warn_missing_defaultprofile=True) -> dict[str, dict]:
"""
Returns `conf_new` semantically merged into `conf_base`, after validating
`conf_new` keys against `CONFIG_DEFAULTS` and `PROFILE_DEFAULTS`.
Pass `conf_base={}` to get a sanitized version of `conf_new`.
NOTE: `conf_base` is NOT checked for invalid keys. Sanitize beforehand.
"""
parsed = deepcopy(dict(conf_base))
for outer_name, outer_conf in deepcopy(conf_new).items():
# only handle known config sections
if outer_name not in CONFIG_SECTIONS:
logging.warning(f'Skipped unknown config section "{outer_name}"')
continue
logging.debug(f'Parsing config section "{outer_name}"')
# check if outer_conf is a dict
if not (isinstance(outer_conf, (dict, DictScheme))):
parsed[outer_name] = outer_conf
else:
# init section
if outer_name not in parsed:
parsed[outer_name] = {}
# profiles need special handling:
# 1. profile names are unknown keys by definition, but we want 'default' to exist
# 2. A profile's subkeys must be compared against PROFILE_DEFAULTS.keys()
if outer_name == 'profiles':
if warn_missing_defaultprofile and 'default' not in outer_conf.keys():
logging.warning('Default profile is not defined in config file')
update = dict[str, dict]()
for profile_name, profile_conf in outer_conf.items():
if not isinstance(profile_conf, (dict, Profile)):
if profile_name == 'current':
parsed[outer_name][profile_name] = profile_conf
else:
logging.warning(f'Skipped key "{profile_name}" in profile section: only subsections and "current" allowed')
continue
# init profile
if profile_name in parsed[outer_name]:
profile = parsed[outer_name][profile_name]
else:
profile = {}
for key, val in profile_conf.items():
if key not in PROFILE_DEFAULTS:
logging.warning(f'Skipped unknown config item "{key}" in profile "{profile_name}"')
continue
profile[key] = val
update |= {profile_name: profile}
parsed[outer_name].update(update)
else:
# handle generic inner config dict
for inner_name, inner_conf in outer_conf.items():
if inner_name not in CONFIG_DEFAULTS[outer_name].keys():
logging.warning(f'Skipped unknown config item "{inner_name}" in section "{outer_name}"')
continue
parsed[outer_name][inner_name] = inner_conf
return parsed
def dump_toml(conf) -> str:
return toml.dumps(conf)
def dump_file(file_path: str, config: dict, file_mode: int = 0o600):
def _opener(path, flags):
return os.open(path, flags, file_mode)
conf_dir = os.path.dirname(file_path)
if not os.path.exists(conf_dir):
os.makedirs(conf_dir)
old_umask = os.umask(0)
with open(file_path, 'w', opener=_opener) as f:
f.write(dump_toml(conf=config))
os.umask(old_umask)
def parse_file(config_file: str, base: dict = CONFIG_DEFAULTS) -> dict:
"""
Parse the toml contents of `config_file`, validating keys against `CONFIG_DEFAULTS`.
The parsed results are semantically merged into `base` before returning.
`base` itself is NOT checked for invalid keys.
"""
_conf_file = config_file if config_file is not None else CONFIG_DEFAULT_PATH
logging.debug(f'Trying to load config file: {_conf_file}')
loaded_conf = toml.load(_conf_file)
return merge_configs(conf_new=loaded_conf, conf_base=base)
class ConfigLoadException(Exception):
inner = None
def __init__(self, extra_msg='', inner_exception: Optional[Exception] = None):
msg: list[str] = ['Config load failed!']
if extra_msg:
msg.append(extra_msg)
if inner_exception:
self.inner = inner_exception
msg.append(str(inner_exception))
super().__init__(self, ' '.join(msg))
class ConfigStateHolder:
# config options that are persisted to file
file: Config
# runtime config not persisted anywhere
runtime: RuntimeConfiguration
file_state: ConfigLoadState
_profile_cache: Optional[dict[str, Profile]]
def __init__(self, file_conf_path: Optional[str] = None, runtime_conf={}, file_conf_base: dict = {}):
"""init a stateholder, optionally loading `file_conf_path`"""
self.file = Config.fromDict(merge_configs(conf_new=file_conf_base, conf_base=CONFIG_DEFAULTS))
self.file_state = ConfigLoadState()
self.runtime = RuntimeConfiguration.fromDict(CONFIG_RUNTIME_DEFAULTS | runtime_conf)
self.runtime.arch = os.uname().machine
self.runtime.script_source_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
self.runtime.uid = os.getuid()
self._profile_cache = {}
if file_conf_path:
self.try_load_file(file_conf_path)
def try_load_file(self, config_file=None, base=CONFIG_DEFAULTS_DICT):
config_file = config_file or CONFIG_DEFAULT_PATH
self.runtime.config_file = config_file
self._profile_cache = None
try:
self.file = Config.fromDict(parse_file(config_file=config_file, base=base), validate=True)
self.file_state.exception = None
except Exception as ex:
self.file_state.exception = ex
self.file_state.load_finished = True
def is_loaded(self) -> bool:
"returns True if a file was **sucessfully** loaded"
return self.file_state.load_finished and self.file_state.exception is None
def enforce_config_loaded(self):
if not self.file_state.load_finished:
m = "Config file wasn't even parsed yet. This is probably a bug in kupferbootstrap :O"
raise ConfigLoadException(Exception(m))
ex = self.file_state.exception
if ex:
if type(ex) == FileNotFoundError:
ex = Exception("Config file doesn't exist. Try running `kupferbootstrap config init` first?")
raise ex
def get_profile(self, name: Optional[str] = None) -> Profile:
name = name or self.file.profiles.current
self._profile_cache = resolve_profile(name=name, sparse_profiles=self.file.profiles, resolved=self._profile_cache)
return self._profile_cache[name]
def _enforce_profile_field(self, field: str, profile_name: Optional[str] = None, hint_or_set_arch: bool = False) -> Profile:
# TODO: device
profile_name = profile_name if profile_name is not None else self.file.profiles.current
arch_hint = ''
if not hint_or_set_arch:
self.enforce_config_loaded()
else:
arch_hint = (' or specifiy the target architecture by passing `--arch` to the current command,\n'
'e.g. `kupferbootstrap packages build --arch x86_64`')
if not self.is_loaded():
if not self.file_state.exception:
raise Exception(f'Error enforcing config profile {field}: config hadn\'t even been loaded yet.\n'
'This is a bug in kupferbootstrap!')
raise Exception(f"Profile {field} couldn't be resolved because the config file couldn't be loaded.\n"
"If the config doesn't exist, try running `kupferbootstrap config init`.\n"
f"Error: {self.file_state.exception}")
if profile_name and profile_name not in self.file.profiles:
raise Exception(f'Unknown profile "{profile_name}". Please run `kupferbootstrap config profile init`{arch_hint}')
profile = self.get_profile(profile_name)
if field not in profile or not profile[field]:
m = (f'Profile "{profile_name}" has no {field.upper()} configured.\n'
f'Please run `kupferbootstrap config profile init {profile_name}`{arch_hint}')
raise Exception(m)
return profile
def enforce_profile_device_set(self, **kwargs) -> Profile:
return self._enforce_profile_field(field='device', **kwargs)
def enforce_profile_flavour_set(self, **kwargs) -> Profile:
return self._enforce_profile_field(field='flavour', **kwargs)
def get_path(self, path_name: str) -> str:
paths = self.file.paths
return resolve_path_template(paths[path_name], paths)
def get_package_dir(self, arch: str):
return os.path.join(self.get_path('packages'), arch)
def dump(self) -> str:
"""dump toml representation of `self.file`"""
return dump_toml(self.file)
def write(self, path=None):
"""write toml representation of `self.file` to `path`"""
if path is None:
path = self.runtime.config_file
assert path
os.makedirs(os.path.dirname(path), exist_ok=True)
new = not os.path.exists(path)
dump_file(path, self.file)
logging.info(f'{"Created" if new else "Written changes to"} config file at {path}')
def invalidate_profile_cache(self):
"""Clear the profile cache (usually after modification)"""
self._profile_cache = None
def update(self, config_fragment: dict[str, dict], warn_missing_defaultprofile: bool = True) -> bool:
"""Update `self.file` with `config_fragment`. Returns `True` if the config was changed"""
merged = merge_configs(config_fragment, conf_base=self.file, warn_missing_defaultprofile=warn_missing_defaultprofile)
changed = self.file.toDict() != merged
self.file.update(merged)
if changed and 'profiles' in config_fragment and self.file.profiles.toDict() != config_fragment['profiles']:
self.invalidate_profile_cache()
return changed
def update_profile(self, name: str, profile: Profile, merge: bool = False, create: bool = True, prune: bool = True):
new = {}
if name not in self.file.profiles:
if not create:
raise Exception(f'Unknown profile: {name}')
else:
if merge:
new = deepcopy(self.file.profiles[name])
logging.debug(f'new: {new}')
logging.debug(f'profile: {profile}')
new |= profile
if prune:
new = {key: val for key, val in new.items() if val is not None}
self.file.profiles[name] = new
self.invalidate_profile_cache()
config: ConfigStateHolder = ConfigStateHolder(file_conf_base=CONFIG_DEFAULTS)

View File

@@ -0,0 +1,225 @@
import pytest
import os
import pickle
import toml
from tempfile import mktemp, gettempdir as get_system_tempdir
from typing import Any, Optional
from kupferbootstrap.config.profile import PROFILE_DEFAULTS
from kupferbootstrap.config.scheme import Config, Profile
from kupferbootstrap.config.state import CONFIG_DEFAULTS, ConfigStateHolder
def get_filename():
return mktemp() + '_pytest.toml'
@pytest.fixture
def conf_filename():
f = get_filename()
yield f
@pytest.fixture
def empty_config():
f = get_filename()
with open(f, 'w') as fd:
fd.write('')
yield f
os.unlink(f)
@pytest.fixture
def configstate_nonexistant(conf_filename):
return ConfigStateHolder(conf_filename)
@pytest.fixture
def configstate_emptyfile(empty_config):
return ConfigStateHolder(empty_config)
def validate_ConfigStateHolder(c: ConfigStateHolder, should_load: Optional[bool] = None):
assert isinstance(c, ConfigStateHolder)
if should_load is not None:
assert c.file_state.load_finished is True
assert c.is_loaded() == should_load
assert c.file
@pytest.mark.parametrize('conf_fixture,exists', [('configstate_emptyfile', True), ('configstate_nonexistant', False)])
def test_fixture_configstate(conf_fixture: str, exists: bool, request):
configstate = request.getfixturevalue(conf_fixture)
assert 'config_file' in configstate.runtime
confpath = configstate.runtime.config_file
assert isinstance(confpath, str)
assert confpath
assert exists == os.path.exists(confpath)
assert confpath.startswith(get_system_tempdir())
def test_config_load_emptyfile(configstate_emptyfile):
validate_ConfigStateHolder(configstate_emptyfile, should_load=True)
def test_config_load_nonexistant(configstate_nonexistant):
validate_ConfigStateHolder(configstate_nonexistant, should_load=False)
@pytest.mark.parametrize('path_fixture,should_load', [('conf_filename', False), ('empty_config', True)])
def test_loadstate_is_loaded(path_fixture: str, should_load: bool, request: pytest.FixtureRequest):
path = request.getfixturevalue(path_fixture)
assert os.path.exists(path) == should_load
c = ConfigStateHolder(path)
validate_ConfigStateHolder(c, should_load)
assert c.file_state.load_finished is True
assert (c.file_state.exception is None) == should_load
assert c.is_loaded() == should_load
@pytest.mark.parametrize('conf_fixture', ['configstate_emptyfile', 'configstate_nonexistant'])
def test_config_fills_defaults(conf_fixture: str, request):
c = request.getfixturevalue(conf_fixture)
assert c.file == CONFIG_DEFAULTS
def dict_filter_out_None(d: dict):
return {k: v for k, v in d.items() if v is not None}
def compare_to_defaults(config: dict, defaults: dict = CONFIG_DEFAULTS, filter_None_from_defaults: Optional[bool] = None):
if filter_None_from_defaults is None:
filter_None_from_defaults = not isinstance(config, Config)
# assert sections match
assert config.keys() == defaults.keys()
for section, section_defaults in defaults.items():
assert section in config
assert isinstance(section_defaults, dict)
# Filter out None values from defaults - they're not written unless set
if filter_None_from_defaults:
section_defaults = dict_filter_out_None(section_defaults)
section_values_config = config[section]
if section != 'profiles':
assert section_values_config == section_defaults
else:
CURRENT_KEY = 'current'
assert CURRENT_KEY in section_defaults.keys()
assert section_defaults.keys() == section_values_config.keys()
assert section_defaults[CURRENT_KEY] == section_values_config[CURRENT_KEY]
for profile_name, profile in section_defaults.items():
if profile_name == CURRENT_KEY:
continue # not a profile
if filter_None_from_defaults:
profile = dict_filter_out_None(profile)
assert profile == section_values_config[profile_name]
def load_toml_file(path) -> dict:
with open(path, 'r') as f:
text = f.read()
assert text
return toml.loads(text)
def get_path_from_stateholder(c: ConfigStateHolder):
return c.runtime.config_file
def test_config_save_nonexistant(configstate_nonexistant: ConfigStateHolder):
c = configstate_nonexistant
confpath = c.runtime.config_file
assert confpath
assert not os.path.exists(confpath)
c.write()
assert confpath
assert os.path.exists(confpath)
loaded = load_toml_file(confpath)
assert loaded
# sadly we can't just assert `loaded == CONFIG_DEFAULTS` due to `None` values
compare_to_defaults(loaded)
def test_config_save_modified(configstate_emptyfile: ConfigStateHolder):
c = configstate_emptyfile
WRAPPER_KEY = 'wrapper'
TYPE_KEY = 'type'
assert WRAPPER_KEY in c.file
assert TYPE_KEY in c.file[WRAPPER_KEY]
wrapper_section = CONFIG_DEFAULTS[WRAPPER_KEY] | {TYPE_KEY: 'none'}
c.file[WRAPPER_KEY] |= wrapper_section
c.write()
defaults_modified = CONFIG_DEFAULTS | {WRAPPER_KEY: wrapper_section}
compare_to_defaults(load_toml_file(get_path_from_stateholder(c)), defaults_modified)
def get_config_scheme(data: dict[str, Any], validate=True, allow_incomplete=False) -> Config:
"""
helper func to ignore a false type error.
for some reason, mypy argues about DictScheme.fromDict() instead of Config.fromDict() here
"""
return Config.fromDict(data, validate=validate, allow_incomplete=allow_incomplete) # type: ignore[call-arg]
def test_config_scheme_defaults():
c = get_config_scheme(CONFIG_DEFAULTS, validate=True, allow_incomplete=False)
assert c
compare_to_defaults(c)
def test_config_scheme_modified():
modifications = {'wrapper': {'type': 'none'}, 'build': {'crossdirect': False}}
assert set(modifications.keys()).issubset(CONFIG_DEFAULTS.keys())
d = {section_name: (section | modifications.get(section_name, {})) for section_name, section in CONFIG_DEFAULTS.items()}
c = get_config_scheme(d, validate=True, allow_incomplete=False)
assert c
assert c.build.crossdirect is False
assert c.wrapper.type == 'none'
def test_configstate_profile_pickle():
c = ConfigStateHolder()
assert c.file.wrapper
assert c.file.profiles
# add new profile to check it doesn't error out due to unknown keys
c.file.profiles['graphical'] = {'username': 'kupfer123', 'hostname': 'test123'}
p = pickle.dumps(c)
unpickled = pickle.loads(p)
assert c.file == unpickled.file
def test_profile():
p = None
p = Profile.fromDict(PROFILE_DEFAULTS)
assert p is not None
assert isinstance(p, Profile)
def test_get_profile():
c = ConfigStateHolder()
d = {'username': 'kupfer123', 'hostname': 'test123'}
c.file.profiles['testprofile'] = d
p = c.get_profile('testprofile')
assert p
assert isinstance(p, Profile)
def test_get_profile_from_disk(configstate_emptyfile):
profile_name = 'testprofile'
device = 'sdm845-oneplus-enchilada'
c = configstate_emptyfile
c.file.profiles.default.device = device
d = {'parent': 'default', 'username': 'kupfer123', 'hostname': 'test123'}
c.file.profiles[profile_name] = d
filepath = c.runtime.config_file
assert filepath
c.write()
del c
c = ConfigStateHolder(filepath)
c.try_load_file(filepath)
c.enforce_config_loaded()
p: Profile = c.get_profile(profile_name)
assert isinstance(p, Profile)
assert 'device' in p
assert p.device == device

View File

@@ -0,0 +1,174 @@
from .typehelpers import TypeAlias
FASTBOOT = 'fastboot'
FLASH_PARTS = {
'FULL': 'full',
'ABOOT': 'abootimg',
'LK2ND': 'lk2nd',
'QHYPSTUB': 'qhypstub',
}
EMMC = 'emmc'
MICROSD = 'microsd'
LOCATIONS = [EMMC, MICROSD]
JUMPDRIVE = 'jumpdrive'
JUMPDRIVE_VERSION = '0.8'
BASE_LOCAL_PACKAGES: list[str] = [
'base-kupfer',
]
BASE_PACKAGES: list[str] = BASE_LOCAL_PACKAGES + [
'base',
'nano',
'vim',
]
POST_INSTALL_CMDS = [
'kupfer-config apply',
'kupfer-config --user apply',
]
REPOS_CONFIG_FILE = "repos.yml"
REPOSITORIES = [
'boot',
'cross',
'device',
'firmware',
'linux',
'main',
'phosh',
]
DEFAULT_PACKAGE_BRANCH = 'dev'
KUPFER_BRANCH_MARKER = '%kupfer_branch%'
KUPFER_HTTPS_BASE = f'https://gitlab.com/kupfer/packages/prebuilts/-/raw/{KUPFER_BRANCH_MARKER}'
KUPFER_HTTPS = KUPFER_HTTPS_BASE + '/$arch/$repo'
Arch: TypeAlias = str
ARCHES = [
'x86_64',
'aarch64',
'armv7h',
]
DistroArch: TypeAlias = Arch
TargetArch: TypeAlias = Arch
ALARM_REPOS = {
'core': 'http://mirror.archlinuxarm.org/$arch/$repo',
'extra': 'http://mirror.archlinuxarm.org/$arch/$repo',
'community': 'http://mirror.archlinuxarm.org/$arch/$repo',
'alarm': 'http://mirror.archlinuxarm.org/$arch/$repo',
'aur': 'http://mirror.archlinuxarm.org/$arch/$repo',
}
BASE_DISTROS: dict[DistroArch, dict[str, dict[str, str]]] = {
'x86_64': {
'repos': {
'core': 'https://geo.mirror.pkgbuild.com/$repo/os/$arch',
'extra': 'https://geo.mirror.pkgbuild.com/$repo/os/$arch',
'community': 'https://geo.mirror.pkgbuild.com/$repo/os/$arch',
},
},
'aarch64': {
'repos': ALARM_REPOS,
},
'armv7h': {
'repos': ALARM_REPOS,
},
}
COMPILE_ARCHES: dict[Arch, str] = {
'x86_64': 'amd64',
'aarch64': 'arm64',
'armv7h': 'arm',
}
GCC_HOSTSPECS: dict[DistroArch, dict[TargetArch, str]] = {
'x86_64': {
'x86_64': 'x86_64-pc-linux-gnu',
'aarch64': 'aarch64-unknown-linux-gnu',
'armv7h': 'arm-unknown-linux-gnueabihf'
},
'aarch64': {
'aarch64': 'aarch64-unknown-linux-gnu',
},
'armv7h': {
'armv7h': 'armv7l-unknown-linux-gnueabihf'
},
}
CFLAGS_GENERAL = ['-O2', '-pipe', '-fstack-protector-strong']
CFLAGS_ALARM = [
' -fno-plt',
'-fexceptions',
'-Wp,-D_FORTIFY_SOURCE=2',
'-Wformat',
'-Werror=format-security',
'-fstack-clash-protection',
]
CFLAGS_ARCHES: dict[Arch, list[str]] = {
'x86_64': ['-march=x86-64', '-mtune=generic'],
'aarch64': [
'-march=armv8-a',
] + CFLAGS_ALARM,
'armv7h': [
'-march=armv7-a',
'-mfloat-abi=hard',
'-mfpu=neon',
] + CFLAGS_ALARM,
}
QEMU_ARCHES: dict[Arch, str] = {
'x86_64': 'x86_64',
'aarch64': 'aarch64',
'armv7h': 'arm',
}
QEMU_BINFMT_PKGS = ['qemu-user-static-bin', 'binfmt-qemu-static']
CROSSDIRECT_PKGS = ['crossdirect'] + QEMU_BINFMT_PKGS
SSH_DEFAULT_HOST = '172.16.42.1'
SSH_DEFAULT_PORT = 22
SSH_COMMON_OPTIONS = [
'-o',
'GlobalKnownHostsFile=/dev/null',
'-o',
'UserKnownHostsFile=/dev/null',
'-o',
'StrictHostKeyChecking=no',
]
CHROOT_PATHS = {
'chroots': '/chroots',
'jumpdrive': '/var/cache/jumpdrive',
'pacman': '/pacman',
'packages': '/packages',
'pkgbuilds': '/pkgbuilds',
'images': '/images',
}
WRAPPER_TYPES = [
'none',
'docker',
]
WRAPPER_ENV_VAR = 'KUPFERBOOTSTRAP_WRAPPED'
MAKEPKG_CMD = [
'makepkg',
'--noconfirm',
'--ignorearch',
'--needed',
]
SRCINFO_FILE = '.SRCINFO'
SRCINFO_METADATA_FILE = '.srcinfo_meta.json'
SRCINFO_INITIALISED_FILE = ".srcinfo_initialised.json"
SRCINFO_TARBALL_FILE = "srcinfos.tar.gz"
SRCINFO_TARBALL_URL = f'{KUPFER_HTTPS_BASE}/{SRCINFO_TARBALL_FILE}'
FLAVOUR_INFO_FILE = 'flavourinfo.json'
FLAVOUR_DESCRIPTION_PREFIX = 'kupfer flavour:'

View File

View File

@@ -0,0 +1,80 @@
import click
import logging
from json import dumps as json_dump
from typing import Optional
from kupferbootstrap.config.state import config
from kupferbootstrap.config.cli import resolve_profile_field
from kupferbootstrap.utils import color_mark_selected, colors_supported
from .device import get_devices, get_device
@click.command(name='devices')
@click.option('-j', '--json', is_flag=True, help='output machine-parsable JSON format')
@click.option(
'--force-parse-deviceinfo/--no-parse-deviceinfo',
is_flag=True,
default=None,
help="Force or disable deviceinfo parsing. The default is to try but continue if it fails.",
)
@click.option(
'--download-packages/--no-download-packages',
is_flag=True,
default=False,
help='Download packages while trying to parse deviceinfo',
)
@click.option('--output-file', type=click.Path(exists=False, file_okay=True), help="Dump JSON to file")
def cmd_devices(
json: bool = False,
force_parse_deviceinfo: Optional[bool] = True,
download_packages: bool = False,
output_file: Optional[str] = None,
):
'list the available devices and descriptions'
devices = get_devices()
if not devices:
raise Exception("No devices found!")
profile_device = None
profile_name = config.file.profiles.current
selected, inherited_from = None, None
try:
selected, inherited_from = resolve_profile_field(None, profile_name, 'device', config.file.profiles)
if selected:
profile_device = get_device(selected)
except Exception as ex:
logging.debug(f"Failed to get profile device for marking as currently selected, continuing anyway. Exception: {ex}")
output = ['']
json_output = {}
interactive_json = json and not output_file
if output_file:
json = True
use_colors = colors_supported(False if interactive_json else config.runtime.colors)
for name in sorted(devices.keys()):
device = devices[name]
assert device
if force_parse_deviceinfo in [None, True]:
try:
device.parse_deviceinfo(try_download=download_packages)
except Exception as ex:
if not force_parse_deviceinfo:
logging.debug(f"Failed to parse deviceinfo for extended description, not a problem: {ex}")
else:
raise ex
if json:
json_output[name] = device.get_summary().toDict()
if interactive_json:
continue
snippet = device.nice_str(colors=use_colors, newlines=True)
if profile_device and profile_device.name == device.name:
snippet = color_mark_selected(snippet, profile_name or '[unknown]', inherited_from)
output.append(f"{snippet}\n")
if interactive_json:
output = ['\n' + json_dump(json_output, indent=4)]
if output_file:
with open(output_file, 'w') as fd:
fd.write(json_dump(json_output))
for line in output:
print(line)

View File

@@ -0,0 +1,209 @@
import logging
import os
from typing import Optional
from kupferbootstrap.config.state import config
from kupferbootstrap.constants import Arch, ARCHES
from kupferbootstrap.dictscheme import DictScheme
from kupferbootstrap.distro.distro import get_kupfer_local
from kupferbootstrap.distro.package import LocalPackage
from kupferbootstrap.packages.pkgbuild import Pkgbuild, _pkgbuilds_cache, discover_pkgbuilds, get_pkgbuild_by_path, init_pkgbuilds
from kupferbootstrap.utils import read_files_from_tar, color_str
from .deviceinfo import DEFAULT_IMAGE_SECTOR_SIZE, DeviceInfo, parse_deviceinfo
DEVICE_DEPRECATIONS = {
"oneplus-enchilada": "sdm845-oneplus-enchilada",
"oneplus-fajita": "sdm845-oneplus-fajita",
"xiaomi-beryllium-ebbg": "sdm845-xiaomi-beryllium-ebbg",
"xiaomi-beryllium-tianma": "sdm845-xiaomi-beryllium-tianma",
"bq-paella": "msm8916-bq-paella",
}
class DeviceSummary(DictScheme):
name: str
description: str
arch: str
package_name: Optional[str]
package_path: Optional[str]
def nice_str(self, newlines: bool = False, colors: bool = False) -> str:
separator = '\n' if newlines else ', '
assert bool(self.package_path) == bool(self.package_name)
package_path = {"Package Path": self.package_path} if self.package_path else {}
fields = {
"Device": self.name,
"Description": self.description or f"[no package {'description' if self.package_name else 'associated (?!)'} and deviceinfo not parsed]",
"Architecture": self.arch,
"Package Name": self.package_name or "no package associated. PROBABLY A BUG!",
**package_path,
}
return separator.join([f"{color_str(name, bold=True, use_colors=colors)}: {value}" for name, value in fields.items()])
class Device(DictScheme):
name: str
arch: Arch
package: Pkgbuild
deviceinfo: Optional[DeviceInfo]
def __repr__(self):
return f'Device<{self.name},{self.arch},{self.package.path if self.package else "[no package]"}>'
def __str__(self):
return self.nice_str(newlines=True)
def nice_str(self, *args, **kwargs) -> str:
return self.get_summary().nice_str(*args, **kwargs)
def get_summary(self) -> DeviceSummary:
result: dict[str, Optional[str]] = {}
description = ((self.package.description if self.package else "").strip() or
(self.deviceinfo.get("name", "[No name in deviceinfo]") if self.deviceinfo else "")).strip()
result["name"] = self.name
result["description"] = description
result["arch"] = self.arch
result["package_name"] = self.package.name if self.package else None
result["package_path"] = self.package.path if self.package else None
return DeviceSummary(result)
def parse_deviceinfo(self, try_download: bool = True, lazy: bool = True) -> DeviceInfo:
if not lazy or 'deviceinfo' not in self or self.deviceinfo is None:
# avoid import loop
from kupferbootstrap.packages.build import check_package_version_built
is_built = check_package_version_built(self.package, self.arch, try_download=try_download)
if not is_built:
raise Exception(f"device package {self.package.name} for device {self.name} couldn't be acquired!")
pkgs: dict[str, LocalPackage] = get_kupfer_local(arch=self.arch, in_chroot=False, scan=True).get_packages()
if self.package.name not in pkgs:
raise Exception(f"device package {self.package.name} somehow not in repos, this is a kupferbootstrap bug")
pkg = pkgs[self.package.name]
file_path = pkg.acquire()
assert file_path
assert os.path.exists(file_path)
deviceinfo_path = 'etc/kupfer/deviceinfo'
for path, f in read_files_from_tar(file_path, [deviceinfo_path]):
if path != deviceinfo_path:
raise Exception(f'Somehow, we got a wrong file: expected: "{deviceinfo_path}", got: "{path}"')
with f as fd:
lines = fd.readlines()
assert lines
if lines and isinstance(lines[0], bytes):
lines = [line.decode() for line in lines]
info = parse_deviceinfo(lines, self.name)
assert info.arch
assert info.arch == self.arch
self['deviceinfo'] = info
assert self.deviceinfo
return self.deviceinfo
def get_image_sectorsize(self, **kwargs) -> Optional[int]:
"""Gets the deviceinfo_rootfs_image_sector_size if defined, otherwise None"""
return self.parse_deviceinfo(**kwargs).get('rootfs_image_sector_size', None)
def get_image_sectorsize_default(self, **kwargs) -> int:
return self.get_image_sectorsize(**kwargs) or DEFAULT_IMAGE_SECTOR_SIZE
def check_devicepkg_name(name: str, log_level: Optional[int] = None):
valid = True
if not name.startswith('device-'):
valid = False
if log_level is not None:
logging.log(log_level, f'invalid device package name "{name}": doesn\'t start with "device-"')
if name.endswith('-common'):
valid = False
if log_level is not None:
logging.log(log_level, f'invalid device package name "{name}": ends with "-common"')
return valid
def parse_device_pkg(pkgbuild: Pkgbuild) -> Device:
if len(pkgbuild.arches) != 1:
raise Exception(f"{pkgbuild.name}: Device package must have exactly one arch, but has {pkgbuild.arches}")
arch = pkgbuild.arches[0]
if arch == 'any' or arch not in ARCHES:
raise Exception(f'unknown arch for device package: {arch}')
if pkgbuild.repo != 'device':
logging.warning(f'device package {pkgbuild.name} is in unexpected repo "{pkgbuild.repo}", expected "device"')
name = pkgbuild.name
prefix = 'device-'
if name.startswith(prefix):
name = name[len(prefix):]
return Device(name=name, arch=arch, package=pkgbuild, deviceinfo=None)
def sanitize_device_name(name: str, warn: bool = True) -> str:
if name not in DEVICE_DEPRECATIONS:
return name
warning = f"Deprecated device {name}"
replacement = DEVICE_DEPRECATIONS[name]
if replacement:
warning += (f': Device has been renamed to {replacement}! Please adjust your profile config!\n'
'This will become an error in a future version!')
name = replacement
if warn:
logging.warning(warning)
return name
_device_cache: dict[str, Device] = {}
_device_cache_populated: bool = False
def get_devices(pkgbuilds: Optional[dict[str, Pkgbuild]] = None, lazy: bool = True) -> dict[str, Device]:
global _device_cache, _device_cache_populated
use_cache = _device_cache_populated and lazy
if not use_cache:
logging.info("Searching PKGBUILDs for device packages")
if not pkgbuilds:
pkgbuilds = discover_pkgbuilds(lazy=lazy, repositories=['device'])
_device_cache.clear()
for pkgbuild in pkgbuilds.values():
if not (pkgbuild.repo == 'device' and check_devicepkg_name(pkgbuild.name, log_level=None)):
continue
dev = parse_device_pkg(pkgbuild)
_device_cache[dev.name] = dev
_device_cache_populated = True
return _device_cache.copy()
def get_device(name: str, pkgbuilds: Optional[dict[str, Pkgbuild]] = None, lazy: bool = True, scan_all=False) -> Device:
global _device_cache, _device_cache_populated
assert lazy or pkgbuilds
name = sanitize_device_name(name)
if lazy and name in _device_cache:
return _device_cache[name]
if scan_all:
devices = get_devices(pkgbuilds=pkgbuilds, lazy=lazy)
if name not in devices:
raise Exception(f'Unknown device {name}!\n'
f'Available: {list(devices.keys())}')
return devices[name]
else:
pkgname = f'device-{name}'
if pkgbuilds:
if pkgname not in pkgbuilds:
raise Exception(f'Unknown device {name}!')
pkgbuild = pkgbuilds[pkgname]
else:
if lazy and pkgname in _pkgbuilds_cache:
pkgbuild = _pkgbuilds_cache[pkgname]
else:
init_pkgbuilds()
relative_path = os.path.join('device', pkgname)
if not os.path.exists(os.path.join(config.get_path('pkgbuilds'), relative_path)):
logging.debug(f'Exact device pkgbuild path "pkgbuilds/{relative_path}" doesn\'t exist, scanning entire repo')
return get_device(name, pkgbuilds=pkgbuilds, lazy=lazy, scan_all=True)
pkgbuild = [p for p in get_pkgbuild_by_path(relative_path, lazy=lazy) if p.name == pkgname][0]
device = parse_device_pkg(pkgbuild)
if lazy:
_device_cache[name] = device
return device
def get_profile_device(profile_name: Optional[str] = None, hint_or_set_arch: bool = False):
profile = config.enforce_profile_device_set(profile_name=profile_name, hint_or_set_arch=hint_or_set_arch)
return get_device(profile.device)

View File

@@ -0,0 +1,270 @@
# Copyright 2022 Oliver Smith
# SPDX-License-Identifier: GPL-3.0-or-later
# Taken from postmarketOS/pmbootstrap, modified for kupferbootstrap by Prawn
import copy
import logging
import os
from typing import Mapping, Optional
from kupferbootstrap.config.state import config
from kupferbootstrap.constants import Arch
from kupferbootstrap.dictscheme import DictScheme
PMOS_ARCHES_OVERRIDES: dict[str, Arch] = {
"armv7": 'armv7h',
}
DEFAULT_IMAGE_SECTOR_SIZE = 512
class DeviceInfo(DictScheme):
arch: Arch
name: str
manufacturer: str
codename: str
chassis: str
flash_pagesize: int
flash_method: str
rootfs_image_sector_size: Optional[int]
@classmethod
def transform(cls, values: Mapping[str, Optional[str]], **kwargs):
kwargs = {'allow_extra': True} | kwargs
return super().transform(values, **kwargs)
# Variables from deviceinfo. Reference: <https://postmarketos.org/deviceinfo>
deviceinfo_attributes = [
# general
"format_version",
"name",
"manufacturer",
"codename",
"year",
"dtb",
"modules_initfs",
"arch",
# device
"chassis",
"keyboard",
"external_storage",
"screen_width",
"screen_height",
"dev_touchscreen",
"dev_touchscreen_calibration",
"append_dtb",
# bootloader
"flash_method",
"boot_filesystem",
# flash
"flash_heimdall_partition_kernel",
"flash_heimdall_partition_initfs",
"flash_heimdall_partition_system",
"flash_heimdall_partition_vbmeta",
"flash_heimdall_partition_dtbo",
"flash_fastboot_partition_kernel",
"flash_fastboot_partition_system",
"flash_fastboot_partition_vbmeta",
"flash_fastboot_partition_dtbo",
"generate_legacy_uboot_initfs",
"kernel_cmdline",
"generate_bootimg",
"bootimg_qcdt",
"bootimg_mtk_mkimage",
"bootimg_dtb_second",
"flash_offset_base",
"flash_offset_kernel",
"flash_offset_ramdisk",
"flash_offset_second",
"flash_offset_tags",
"flash_pagesize",
"flash_fastboot_max_size",
"flash_sparse",
"flash_sparse_samsung_format",
"rootfs_image_sector_size",
"sd_embed_firmware",
"sd_embed_firmware_step_size",
"partition_blacklist",
"boot_part_start",
"partition_type",
"root_filesystem",
"flash_kernel_on_update",
"cgpt_kpart",
"cgpt_kpart_start",
"cgpt_kpart_size",
# weston
"weston_pixman_type",
# keymaps
"keymaps",
]
# Valid types for the 'chassis' atribute in deviceinfo
# See https://www.freedesktop.org/software/systemd/man/machine-info.html
deviceinfo_chassis_types = [
"desktop",
"laptop",
"convertible",
"server",
"tablet",
"handset",
"watch",
"embedded",
"vm",
]
def sanity_check(deviceinfo: dict[str, Optional[str]], device_name: str):
try:
_pmos_sanity_check(deviceinfo, device_name)
except RuntimeError as err:
raise Exception(f"{device_name}: The postmarketOS checker for deviceinfo files has run into an issue.\n"
"Here at kupfer, we usually don't maintain our own deviceinfo files "
"and instead often download them postmarketOS in our PKGBUILDs.\n"
"Please make sure your PKGBUILDs.git is up to date. (run `kupferbootstrap packages update`)\n"
"If the problem persists, please open an issue for this device's deviceinfo file "
"in the kupfer pkgbuilds git repo on Gitlab.\n\n"
"postmarketOS error message (referenced file may not exist until you run makepkg in that directory):\n"
f"{err}")
def _pmos_sanity_check(info: dict[str, Optional[str]], device_name: str):
# Resolve path for more readable error messages
path = os.path.join(config.get_path('pkgbuilds'), 'device', device_name, 'deviceinfo')
# Legacy errors
if "flash_methods" in info:
raise RuntimeError("deviceinfo_flash_methods has been renamed to"
" deviceinfo_flash_method. Please adjust your"
" deviceinfo file: " + path)
if "external_disk" in info or "external_disk_install" in info:
raise RuntimeError("Instead of deviceinfo_external_disk and"
" deviceinfo_external_disk_install, please use the"
" new variable deviceinfo_external_storage in your"
" deviceinfo file: " + path)
if "msm_refresher" in info:
raise RuntimeError("It is enough to specify 'msm-fb-refresher' in the"
" depends of your device's package now. Please"
" delete the deviceinfo_msm_refresher line in: " + path)
if "flash_fastboot_vendor_id" in info:
raise RuntimeError("Fastboot doesn't allow specifying the vendor ID"
" anymore (#1830). Try removing the"
" 'deviceinfo_flash_fastboot_vendor_id' line in: " + path + " (if you are sure that "
" you need this, then we can probably bring it back to fastboot, just"
" let us know in the postmarketOS issues!)")
if "nonfree" in info:
raise RuntimeError("deviceinfo_nonfree is unused. "
"Please delete it in: " + path)
if "dev_keyboard" in info:
raise RuntimeError("deviceinfo_dev_keyboard is unused. "
"Please delete it in: " + path)
if "date" in info:
raise RuntimeError("deviceinfo_date was replaced by deviceinfo_year. "
"Set it to the release year in: " + path)
# "codename" is required
codename = os.path.basename(os.path.dirname(path))
if codename.startswith("device-"):
codename = codename[7:]
# kupfer prepends the SoC
codename_alternative = codename.split('-', maxsplit=1)[1] if codename.count('-') > 1 else codename
_codename = info.get('codename', None)
if not _codename or not (_codename in [codename, codename_alternative] or codename.startswith(_codename) or
codename_alternative.startswith(_codename)):
raise RuntimeError(f"Please add 'deviceinfo_codename=\"{codename}\"' "
f"to: {path}")
# "chassis" is required
chassis_types = deviceinfo_chassis_types
if "chassis" not in info or not info["chassis"]:
logging.info("NOTE: the most commonly used chassis types in"
" postmarketOS are 'handset' (for phones) and 'tablet'.")
raise RuntimeError(f"Please add 'deviceinfo_chassis' to: {path}")
# "arch" is required
if "arch" not in info or not info["arch"]:
raise RuntimeError(f"Please add 'deviceinfo_arch' to: {path}")
# "chassis" validation
chassis_type = info["chassis"]
if chassis_type not in chassis_types:
raise RuntimeError(f"Unknown chassis type '{chassis_type}', should"
f" be one of {', '.join(chassis_types)}. Fix this"
f" and try again: {path}")
def parse_kernel_suffix(deviceinfo: dict[str, Optional[str]], kernel: str = 'mainline') -> dict[str, Optional[str]]:
"""
Remove the kernel suffix (as selected in 'pmbootstrap init') from
deviceinfo variables. Related:
https://wiki.postmarketos.org/wiki/Device_specific_package#Multiple_kernels
:param info: deviceinfo dict, e.g.:
{"a": "first", "b_mainline": "second", "b_downstream": "third"}
:param device: which device info belongs to
:param kernel: which kernel suffix to remove (e.g. "mainline")
:returns: info, but with the configured kernel suffix removed, e.g:
{"a": "first", "b": "second", "b_downstream": "third"}
"""
# Do nothing if the configured kernel isn't available in the kernel (e.g.
# after switching from device with multiple kernels to device with only one
# kernel)
# kernels = pmb.parse._apkbuild.kernels(args, device)
if not kernel: # or kernel not in kernels:
logging.debug(f"parse_kernel_suffix: {kernel} not set, skipping")
return deviceinfo
ret = copy.copy(deviceinfo)
suffix_kernel = kernel.replace("-", "_")
for key in deviceinfo_attributes:
key_kernel = f"{key}_{suffix_kernel}"
if key_kernel not in ret:
continue
# Move ret[key_kernel] to ret[key]
logging.debug(f"parse_kernel_suffix: {key_kernel} => {key}")
ret[key] = ret[key_kernel]
del (ret[key_kernel])
return ret
def parse_deviceinfo(deviceinfo_lines: list[str], device_name: str, kernel='mainline') -> DeviceInfo:
"""
:param device: defaults to args.device
:param kernel: defaults to args.kernel
"""
info: dict[str, Optional[str]] = {}
for line in deviceinfo_lines:
line = line.strip()
if line.startswith("#") or not line:
continue
if "=" not in line:
raise SyntaxError(f"{device_name}: No '=' found:\n\t{line}")
split = line.split("=", 1)
if not split[0].startswith("deviceinfo_"):
logging.warning(f"{device_name}: Unknown key {split[0]} in deviceinfo:\n{line}")
continue
key = split[0][len("deviceinfo_"):]
value = split[1].replace("\"", "").replace("\n", "")
info[key] = value
# Assign empty string as default
for key in deviceinfo_attributes:
if key not in info:
info[key] = None
info = parse_kernel_suffix(info, kernel)
sanity_check(info, device_name)
if 'arch' in info:
arch = info['arch']
info['arch'] = PMOS_ARCHES_OVERRIDES.get(arch, arch) # type: ignore[arg-type]
dev = DeviceInfo.fromDict(info)
return dev

View File

@@ -0,0 +1,100 @@
import pytest
import os
from copy import copy
from kupferbootstrap.config.state import ConfigStateHolder, config
from kupferbootstrap.packages.pkgbuild import init_pkgbuilds, discover_pkgbuilds, Pkgbuild, parse_pkgbuild
from .device import Device, DEVICE_DEPRECATIONS, get_device, get_devices, parse_device_pkg, check_devicepkg_name
@pytest.fixture(scope='session')
def initialise_pkgbuilds_dir() -> ConfigStateHolder:
config.try_load_file()
init_pkgbuilds(interactive=False)
return config
@pytest.fixture()
def pkgbuilds_dir(initialise_pkgbuilds_dir: ConfigStateHolder) -> str:
global config
config = initialise_pkgbuilds_dir
return config.get_path('pkgbuilds')
@pytest.fixture(scope='session')
def pkgbuilds_repo_cached(initialise_pkgbuilds_dir) -> dict[str, Pkgbuild]:
return discover_pkgbuilds()
@pytest.fixture()
def pkgbuilds_repo(pkgbuilds_dir, pkgbuilds_repo_cached):
# use pkgbuilds_dir to ensure global config gets overriden, can't be done from session scope fixtures
return pkgbuilds_repo_cached
ONEPLUS_ENCHILADA = 'sdm845-oneplus-enchilada'
ONEPLUS_ENCHILADA_PKG = f'device-{ONEPLUS_ENCHILADA}'
@pytest.fixture(scope='session')
def enchilada_pkgbuild(initialise_pkgbuilds_dir: ConfigStateHolder):
config = initialise_pkgbuilds_dir
config.try_load_file()
return parse_pkgbuild(os.path.join('device', ONEPLUS_ENCHILADA_PKG))[0]
def validate_oneplus_enchilada(d: Device):
assert d
assert d.arch == 'aarch64'
assert d.package and d.package.name == ONEPLUS_ENCHILADA_PKG
def test_fixture_initialise_pkgbuilds_dir(initialise_pkgbuilds_dir: ConfigStateHolder):
assert os.path.exists(os.path.join(config.get_path('pkgbuilds'), 'device'))
def test_fixture_pkgbuilds_dir(pkgbuilds_dir):
assert os.path.exists(os.path.join(pkgbuilds_dir, 'device'))
def test_get_device():
name = ONEPLUS_ENCHILADA
d = get_device(name)
validate_oneplus_enchilada(d)
def test_get_device_deprecated():
name = 'oneplus-enchilada'
assert name in DEVICE_DEPRECATIONS
d = get_device(name)
# currently redirects to correct package, need to change this test when changed to an exception
validate_oneplus_enchilada(d)
def test_parse_device_pkg_enchilada(enchilada_pkgbuild):
validate_oneplus_enchilada(parse_device_pkg(enchilada_pkgbuild))
def test_parse_device_pkg_malformed_arch(enchilada_pkgbuild):
enchilada_pkgbuild = copy(enchilada_pkgbuild)
enchilada_pkgbuild.arches.append('x86_64')
with pytest.raises(Exception):
parse_device_pkg(enchilada_pkgbuild)
def test_discover_packages_and_warm_cache_sorry_takes_long(pkgbuilds_repo):
# mostly used to warm up the cache in a user-visible way
assert pkgbuilds_repo
assert ONEPLUS_ENCHILADA_PKG in pkgbuilds_repo
def test_get_devices(pkgbuilds_repo: dict[str, Pkgbuild]):
d = get_devices(pkgbuilds_repo)
assert d
assert ONEPLUS_ENCHILADA in d
for p in d.values():
check_devicepkg_name(p.package.name)
assert 'sdm845-oneplus-common' not in d
validate_oneplus_enchilada(d[ONEPLUS_ENCHILADA])

View File

@@ -0,0 +1,87 @@
from kupferbootstrap.config.state import config
from .deviceinfo import DeviceInfo, parse_deviceinfo
from .device import get_device
deviceinfo_text = """
# Reference: <https://postmarketos.org/deviceinfo>
# Please use double quotes only. You can source this file in shell scripts.
deviceinfo_format_version="0"
deviceinfo_name="BQ Aquaris X5"
deviceinfo_manufacturer="BQ"
deviceinfo_codename="bq-paella"
deviceinfo_year="2015"
deviceinfo_dtb="qcom/msm8916-longcheer-l8910"
deviceinfo_append_dtb="true"
deviceinfo_modules_initfs="smb1360 panel-longcheer-yushun-nt35520 panel-longcheer-truly-otm1288a msm himax-hx852x"
deviceinfo_arch="aarch64"
# Device related
deviceinfo_gpu_accelerated="true"
deviceinfo_chassis="handset"
deviceinfo_keyboard="false"
deviceinfo_external_storage="true"
deviceinfo_screen_width="720"
deviceinfo_screen_height="1280"
deviceinfo_getty="ttyMSM0;115200"
# Bootloader related
deviceinfo_flash_method="fastboot"
deviceinfo_kernel_cmdline="earlycon console=ttyMSM0,115200 PMOS_NO_OUTPUT_REDIRECT"
deviceinfo_generate_bootimg="true"
deviceinfo_flash_offset_base="0x80000000"
deviceinfo_flash_offset_kernel="0x00080000"
deviceinfo_flash_offset_ramdisk="0x02000000"
deviceinfo_flash_offset_second="0x00f00000"
deviceinfo_flash_offset_tags="0x01e00000"
deviceinfo_flash_pagesize="2048"
deviceinfo_flash_sparse="true"
"""
def test_parse_deviceinfo():
config.try_load_file()
d = parse_deviceinfo(deviceinfo_text.split('\n'), 'device-bq-paella')
assert isinstance(d, DeviceInfo)
assert d
assert d.arch
assert d.chassis
assert d.flash_method
assert d.flash_pagesize
# test that fields not listed in the class definition make it into the object
assert d.dtb
assert d.gpu_accelerated
def test_parse_variant_deviceinfo():
config.try_load_file()
# {'variant1': 'AAAAA', 'variant2': 'BBBBB', 'variant3': 'CCCCC'}
variants = {f"variant{i+1}": chr(ord('A') + i) * 5 for i in range(0, 3)}
field = "dev_touchscreen_calibration"
text = deviceinfo_text + '\n'.join([""] + [f"deviceinfo_{field}_{variant}={value}" for variant, value in variants.items()])
for variant, result in variants.items():
d = parse_deviceinfo(text.split('\n'), 'device-bq-paella', kernel=variant)
# note: the python code from pmb only strips one variant, the shell code in packaging strips all variants
assert f'{field}_{variant}' not in d
assert field in d
assert d[field] == result
def test_get_deviceinfo_from_repo():
config.try_load_file()
dev = get_device('sdm845-oneplus-enchilada')
assert dev
info = dev.parse_deviceinfo()
assert info
def test_get_variant_deviceinfo_from_repo():
config.try_load_file()
dev = get_device('sdm845-xiaomi-beryllium-ebbg')
assert dev
info = dev.parse_deviceinfo()
assert info
assert 'dtb' in info # variant-specific variable, check it has been stripped down from 'dtb_ebbg' to 'dtb'
assert 'dtb_tianma' not in info
assert info.dtb

View File

@@ -0,0 +1,299 @@
from __future__ import annotations
import logging
import toml
from munch import Munch
from toml.encoder import TomlEncoder, TomlPreserveInlineDictEncoder
from typing import ClassVar, Generator, Optional, Union, Mapping, Any, get_type_hints, get_origin, get_args, Iterable
from .typehelpers import UnionType, NoneType
def resolve_type_hint(hint: type, ignore_origins: list[type] = []) -> Iterable[type]:
origin = get_origin(hint)
args: Iterable[type] = get_args(hint)
if origin in ignore_origins:
return [hint]
if origin is Optional:
args = set(list(args) + [NoneType])
if origin in [Union, UnionType, Optional]:
results: list[type] = []
for arg in args:
results += resolve_type_hint(arg, ignore_origins=ignore_origins)
return results
return [origin or hint]
def flatten_hints(hints: Any) -> Generator[Any, None, None]:
if not isinstance(hints, (list, tuple)):
yield hints
return
for i in hints:
yield from flatten_hints(i)
def resolve_dict_hints(hints: Any) -> Generator[tuple[Any, ...], None, None]:
for hint in flatten_hints(hints):
t_origin = get_origin(hint)
t_args = get_args(hint)
if t_origin == dict:
yield t_args
continue
if t_origin in [NoneType, Optional, Union, UnionType] and t_args:
yield from resolve_dict_hints(t_args)
continue
class DictScheme(Munch):
_type_hints: ClassVar[dict[str, Any]]
_strip_hidden: ClassVar[bool] = False
_sparse: ClassVar[bool] = False
def __init__(self, d: Mapping = {}, validate: bool = True, **kwargs):
self.update(dict(d) | kwargs, validate=validate)
@classmethod
def transform(
cls,
values: Mapping[str, Any],
*,
validate: bool = True,
allow_extra: bool = False,
type_hints: Optional[dict[str, Any]] = None,
) -> Any:
results: dict[str, Any] = {}
values = dict(values)
for key in list(values.keys()):
value = values.pop(key)
type_hints = cls._type_hints if type_hints is None else type_hints
if key in type_hints:
_classes = tuple[type](resolve_type_hint(type_hints[key]))
optional = bool(set([NoneType, None]).intersection(_classes))
if optional and value is None:
results[key] = None
continue
if issubclass(_classes[0], dict):
assert isinstance(value, dict) or (optional and value is None), f'{key=} is not dict: {value!r}, {_classes=}'
target_class = _classes[0]
if target_class in [None, NoneType, Optional]:
for target in _classes[1:]:
if target not in [None, NoneType, Optional]:
target_class = target
break
if target_class is dict:
dict_hints = list(resolve_dict_hints(type_hints[key]))
if len(dict_hints) != 1:
msg = f"transform(): Received wrong amount of type hints for key {key}: {len(dict_hints)}"
if validate:
raise Exception(msg)
logging.warning(msg)
if len(dict_hints) == 1 and value is not None:
if len(dict_hints[0]) != 2 or not all(dict_hints[0]):
logging.debug(f"Weird dict hints received: {dict_hints}")
continue
key_type, value_type = dict_hints[0]
if not isinstance(value, Mapping):
msg = f"Got non-mapping {value!r} for expected dict type: {key_type} => {value_type}. Allowed classes: {_classes}"
if validate:
raise Exception(msg)
logging.warning(msg)
results[key] = value
continue
if isinstance(key_type, type):
if issubclass(key_type, str):
target_class = Munch
else:
msg = f"{key=} subdict got wrong key type hint (expected str): {key_type}"
if validate:
raise Exception(msg)
logging.warning(msg)
if validate:
for k in value:
if not isinstance(k, tuple(flatten_hints(key_type))):
raise Exception(f'Subdict "{key}": wrong type for subkey "{k}": got: {type(k)}, expected: {key_type}')
dict_content_hints = {k: value_type for k in value}
value = cls.transform(value, validate=validate, allow_extra=allow_extra, type_hints=dict_content_hints)
if not isinstance(value, target_class):
if not (optional and value is None):
assert issubclass(target_class, Munch)
# despite the above assert, mypy doesn't seem to understand target_class is a Munch here
kwargs = {'validate': validate} if issubclass(target_class, DictScheme) else {}
value = target_class(value, **kwargs) # type:ignore[attr-defined]
else:
# print(f"nothing to do: '{key}' was already {target_class})
pass
# handle numerics
elif set(_classes).intersection([int, float]) and isinstance(value, str) and str not in _classes:
parsed_number = None
parsers: list[tuple[type, list]] = [(int, [10]), (int, [0]), (float, [])]
for _cls, args in parsers:
if _cls not in _classes:
continue
try:
parsed_number = _cls(value, *args)
break
except ValueError:
continue
if parsed_number is None:
if validate:
raise Exception(f"Couldn't parse string value {repr(value)} for key '{key}' into number formats: " +
(', '.join(list(c.__name__ for c in _classes))))
else:
value = parsed_number
if validate:
if not isinstance(value, _classes):
raise Exception(f'key "{key}" has value of wrong type! expected: '
f'{" ,".join([ c.__name__ for c in _classes])}; '
f'got: {type(value).__name__}; value: {value}')
elif validate and not allow_extra:
logging.debug(f"{cls}: unknown key '{key}': {value}")
raise Exception(f'{cls}: Unknown key "{key}"')
else:
if isinstance(value, dict) and not isinstance(value, Munch):
value = Munch.fromDict(value)
results[key] = value
if values:
if validate:
raise Exception(f'values contained unknown keys: {list(values.keys())}')
results |= values
return results
@classmethod
def fromDict(cls, values: Mapping[str, Any], validate: bool = True):
return cls(d=values, validate=validate)
def toDict(
self,
strip_hidden: Optional[bool] = None,
sparse: Optional[bool] = None,
):
return self.strip_dict(
self,
strip_hidden=strip_hidden,
sparse=sparse,
recursive=True,
)
@classmethod
def strip_dict(
cls,
d: dict[Any, Any],
strip_hidden: Optional[bool] = None,
sparse: Optional[bool] = None,
recursive: bool = True,
hints: Optional[dict[str, Any]] = None,
validate: bool = True,
) -> dict[Any, Any]:
# preserve original None-type args
_sparse = cls._sparse if sparse is None else sparse
_strip_hidden = cls._strip_hidden if strip_hidden is None else strip_hidden
hints = cls._type_hints if hints is None else hints
result = dict(d)
if not (_strip_hidden or _sparse or result):
return result
for k, v in d.items():
type_hint = resolve_type_hint(hints.get(k, "abc"))
if not isinstance(k, str):
msg = f"strip_dict(): unknown key type {k=}: {type(k)=}"
if validate:
raise Exception(msg)
logging.warning(f"{msg} (skipping)")
continue
if _strip_hidden and k.startswith('_'):
result.pop(k)
continue
if v is None:
if NoneType not in type_hint:
msg = f'encountered illegal null value at key "{k}" for typehint {type_hint}'
if validate:
raise Exception(msg)
logging.warning(msg)
if _sparse:
result.pop(k)
continue
if recursive and isinstance(v, dict):
if not v:
result[k] = {}
continue
if isinstance(v, DictScheme):
# pass None in sparse and strip_hidden
result[k] = v.toDict(strip_hidden=strip_hidden, sparse=sparse)
continue
if isinstance(v, Munch):
result[k] = v.toDict()
if k not in hints:
continue
_subhints = {}
_hints = resolve_type_hint(hints[k], [dict])
hints_flat = list(flatten_hints(_hints))
subclass = DictScheme
for hint in hints_flat:
if get_origin(hint) == dict:
_valtype = get_args(hint)[1]
_subhints = {n: _valtype for n in v.keys()}
break
if isinstance(hint, type) and issubclass(hint, DictScheme):
subclass = hint
_subhints = hint._type_hints
break
else:
# print(f"ignoring {hint=}")
continue
result[k] = subclass.strip_dict(
v,
hints=_subhints,
sparse=_sparse,
strip_hidden=_strip_hidden,
recursive=recursive,
)
return result
def update(self, d: Mapping[str, Any], validate: bool = True):
Munch.update(self, type(self).transform(d, validate=validate))
def __init_subclass__(cls):
super().__init_subclass__()
cls._type_hints = {name: hint for name, hint in get_type_hints(cls).items() if get_origin(hint) is not ClassVar}
def __repr__(self):
return f'{type(self)}{dict.__repr__(dict(self))}'
def toYAML(
self,
strip_hidden: Optional[bool] = None,
sparse: Optional[bool] = None,
**yaml_args,
) -> str:
import yaml
yaml_args = {'sort_keys': False} | yaml_args
dumped = yaml.dump(
self.toDict(strip_hidden=strip_hidden, sparse=sparse),
**yaml_args,
)
if dumped is None:
raise Exception(f"Failed to yaml-serialse {self}")
return dumped
def toToml(
self,
strip_hidden: Optional[bool] = None,
sparse: Optional[bool] = None,
encoder: Optional[TomlEncoder] = TomlPreserveInlineDictEncoder(),
) -> str:
return toml.dumps(
self.toDict(strip_hidden=strip_hidden, sparse=sparse),
encoder=encoder,
)
class TomlInlineDict(dict, toml.decoder.InlineTableDict):
pass
def toml_inline_dicts(value: Any) -> Any:
if not isinstance(value, Mapping):
return value
return TomlInlineDict({k: toml_inline_dicts(v) for k, v in value.items()})

View File

View File

@@ -0,0 +1,247 @@
import logging
from enum import IntFlag
from typing import Generic, Mapping, Optional, TypeVar
from kupferbootstrap.constants import Arch, ARCHES, REPOSITORIES, KUPFER_BRANCH_MARKER, KUPFER_HTTPS, CHROOT_PATHS
from kupferbootstrap.generator import generate_pacman_conf_body
from kupferbootstrap.config.state import config
from .repo import BinaryPackageType, RepoInfo, Repo, LocalRepo, RemoteRepo
from .repo_config import AbstrRepoConfig, BaseDistro, ReposConfigFile, REPOS_CONFIG_DEFAULT, get_repo_config as _get_repo_config
class DistroLocation(IntFlag):
REMOTE = 0
LOCAL = 1
CHROOT = 3
RepoType = TypeVar('RepoType', bound=Repo)
class Distro(Generic[RepoType]):
repos: Mapping[str, RepoType]
arch: str
def __init__(self, arch: Arch, repo_infos: dict[str, RepoInfo], scan=False):
assert (arch in ARCHES)
self.arch = arch
self.repos = dict[str, RepoType]()
for repo_name, repo_info in repo_infos.items():
self.repos[repo_name] = self._create_repo(
name=repo_name,
arch=arch,
url_template=repo_info.url_template,
options=repo_info.options,
scan=scan,
)
def _create_repo(self, **kwargs) -> RepoType:
raise NotImplementedError()
Repo(**kwargs)
def get_packages(self) -> dict[str, BinaryPackageType]:
""" get packages from all repos, semantically overlaying them"""
results = dict[str, BinaryPackageType]()
for repo in list(self.repos.values())[::-1]:
assert repo.packages is not None
results.update(repo.packages)
return results
def repos_config_snippet(self, extra_repos: Mapping[str, RepoInfo] = {}) -> str:
extras: list[Repo] = [
Repo(name, url_template=info.url_template, arch=self.arch, options=info.options, scan=False) for name, info in extra_repos.items()
]
return '\n\n'.join(repo.config_snippet() for repo in (extras + list(self.repos.values())))
def get_pacman_conf(self, extra_repos: Mapping[str, RepoInfo] = {}, check_space: bool = True, in_chroot: bool = True):
body = generate_pacman_conf_body(self.arch, check_space=check_space)
return body + self.repos_config_snippet(extra_repos)
def scan(self, lazy=True):
for repo in self.repos.values():
if not (lazy and repo.scanned):
repo.scan()
def is_scanned(self):
for repo in self.repos.values():
if not repo.scanned:
return False
return True
class LocalDistro(Distro[LocalRepo]):
def _create_repo(self, **kwargs) -> LocalRepo:
return LocalRepo(**kwargs)
class RemoteDistro(Distro[RemoteRepo]):
def _create_repo(self, **kwargs) -> RemoteRepo:
return RemoteRepo(**kwargs)
def get_kupfer(arch: str, url_template: str, scan: bool = False) -> Distro:
repos = {name: RepoInfo(url_template=url_template, options={'SigLevel': 'Never'}) for name in REPOSITORIES}
remote = not url_template.startswith('file://')
clss = RemoteDistro if remote else LocalDistro
distro = clss(
arch=arch,
repo_infos=repos,
scan=scan,
)
assert isinstance(distro, (LocalDistro, RemoteDistro))
if remote:
assert isinstance(distro, RemoteDistro)
for repo in distro.repos.values():
repo.cache_repo_db = True
return distro
_kupfer_https: dict[Arch, RemoteDistro] = {}
_kupfer_local: dict[Arch, LocalDistro] = {}
_kupfer_local_chroots: dict[Arch, LocalDistro] = {}
def reset_distro_caches():
global _kupfer_https, _kupfer_local, _kupfer_local_chroots
for cache in _kupfer_https, _kupfer_local, _kupfer_local_chroots:
assert isinstance(cache, dict)
cache.clear()
def get_kupfer_url(url: str = KUPFER_HTTPS, branch: Optional[str] = None) -> str:
"""gets the repo URL for `branch`, getting branch from config if `None` is passed."""
branch = config.file.pacman.repo_branch if branch is None else branch
return url.replace(KUPFER_BRANCH_MARKER, branch)
def get_repo_config(*args, **kwargs) -> ReposConfigFile:
repo_config, changed = _get_repo_config(*args, **kwargs)
if changed:
logging.debug("Repo configs changed, resetting caches")
reset_distro_caches()
return repo_config
def get_kupfer_repo_names(local) -> list[str]:
configs = get_repo_config()
results = []
for repo, repo_config in configs.repos.items():
if not local and repo_config.local_only:
continue
results.append(repo)
return results
def get_RepoInfo(arch: Arch, repo_config: AbstrRepoConfig, default_url: Optional[str]) -> RepoInfo:
url = repo_config.remote_url or default_url
if isinstance(url, dict):
if arch not in url and not default_url:
raise Exception(f"Invalid repo config: Architecture {arch} not in remote_url mapping: {url}")
url = url.get(arch, default_url)
assert url
return RepoInfo(
url_template=get_kupfer_url(url),
options=repo_config.get('options', None) or {},
)
def get_base_distro(arch: Arch, scan: bool = False, unsigned: bool = True, cache_db: bool = True) -> RemoteDistro:
base_distros = get_repo_config().base_distros
if base_distros is None or arch not in base_distros:
base_distros = REPOS_CONFIG_DEFAULT.base_distros
assert base_distros
distro_config: BaseDistro
distro_config = base_distros.get(arch) # type: ignore[assignment]
repos = {}
for repo, repo_config in distro_config.repos.items():
if unsigned:
repo_config['options'] = (repo_config.get('options', None) or {}) | {'SigLevel': 'Never'}
repos[repo] = get_RepoInfo(arch, repo_config, default_url=distro_config.remote_url)
distro = RemoteDistro(arch=arch, repo_infos=repos, scan=False)
if cache_db:
for r in distro.repos.values():
assert isinstance(r, RemoteRepo)
r.cache_repo_db = True
if scan:
distro.scan()
return distro
def get_kupfer_distro(
arch: Arch,
location: DistroLocation,
scan: bool = False,
cache_db: bool = True,
) -> Distro:
global _kupfer_https, _kupfer_local, _kupfer_local_chroots
cls: type[Distro]
cache: Mapping[str, Distro]
repo_config = get_repo_config()
remote = False
if location == DistroLocation.REMOTE:
remote = True
cache = _kupfer_https
default_url = repo_config.remote_url or KUPFER_HTTPS
repos = {repo: get_RepoInfo(arch, conf, default_url) for repo, conf in repo_config.repos.items() if not conf.local_only}
cls = RemoteDistro
elif location in [DistroLocation.CHROOT, DistroLocation.LOCAL]:
if location == DistroLocation.CHROOT:
cache = _kupfer_local_chroots
pkgdir = CHROOT_PATHS['packages']
else:
assert location == DistroLocation.LOCAL
cache = _kupfer_local
pkgdir = config.get_path('packages')
default_url = f"file://{pkgdir}/$arch/$repo"
cls = LocalDistro
repos = {}
for name, repo in repo_config.repos.items():
repo = repo.copy()
repo.remote_url = default_url
repos[name] = get_RepoInfo(arch, repo, default_url)
else:
raise Exception(f"Unknown distro location {location}")
if cache is None:
cache = {}
assert arch
assert isinstance(cache, dict)
if arch not in cache or not cache[arch]:
distro = cls(
arch=arch,
repo_infos=repos,
scan=False,
)
assert isinstance(distro, (LocalDistro, RemoteDistro))
cache[arch] = distro
if remote and cache_db:
assert isinstance(distro, RemoteDistro)
for r in distro.repos.values():
r.cache_repo_db = True
if scan:
distro.scan()
return distro
item: Distro = cache[arch]
if scan and not item.is_scanned():
item.scan()
return item
def get_kupfer_https(arch: Arch, scan: bool = False, cache_db: bool = True) -> RemoteDistro:
d = get_kupfer_distro(arch, location=DistroLocation.REMOTE, scan=scan, cache_db=cache_db)
assert isinstance(d, RemoteDistro)
return d
def get_kupfer_local(arch: Optional[Arch] = None, scan: bool = False, in_chroot: bool = True) -> LocalDistro:
arch = arch or config.runtime.arch
assert arch
location = DistroLocation.CHROOT if in_chroot else DistroLocation.LOCAL
d = get_kupfer_distro(arch, location=location, scan=scan)
assert isinstance(d, LocalDistro)
return d

View File

@@ -0,0 +1,93 @@
import logging
import os
from shutil import copyfileobj
from typing import Optional, Union
from urllib.request import urlopen
from kupferbootstrap.exec.file import get_temp_dir, makedir
class PackageInfo:
name: str
version: str
class BinaryPackage(PackageInfo):
arch: str
filename: str
resolved_url: Optional[str]
_desc: Optional[dict[str, Union[str, list[str]]]]
def __init__(
self,
name: str,
version: str,
arch: str,
filename: str,
resolved_url: Optional[str] = None,
):
self.name = name
self.version = version
self.arch = arch
self.filename = filename
self.resolved_url = resolved_url
def __repr__(self):
return f'{self.name}@{self.version}'
@classmethod
def parse_desc(clss, desc_str: str, resolved_repo_url=None):
"""Parses a desc file, returning a PackageInfo"""
desc: dict[str, Union[str, list[str]]] = {}
for segment in f'\n{desc_str}'.split('\n%'):
if not segment.strip():
continue
key, elements = (e.strip() for e in segment.strip().split('%\n', 1))
elements_split = elements.split('\n')
desc[key] = elements if len(elements_split) == 1 else elements_split
validated: dict[str, str] = {}
for key in ['NAME', 'VERSION', 'ARCH', 'FILENAME']:
assert key in desc
value = desc[key]
assert isinstance(value, str)
validated[key] = value
p = clss(
name=validated['NAME'],
version=validated['VERSION'],
arch=validated['ARCH'],
filename=validated['FILENAME'],
resolved_url='/'.join([resolved_repo_url, validated['FILENAME']]),
)
p._desc = desc
return p
def acquire(self) -> str:
raise NotImplementedError()
class LocalPackage(BinaryPackage):
def acquire(self) -> str:
assert self.resolved_url and self.filename and self.filename in self.resolved_url
path = f'{self.resolved_url.split("file://")[1]}'
assert os.path.exists(path) or print(path)
return path
class RemotePackage(BinaryPackage):
def acquire(self, dest_dir: Optional[str] = None) -> str:
assert self.resolved_url and '.pkg.tar.' in self.resolved_url
url = f"{self.resolved_url}"
assert url
dest_dir = dest_dir or get_temp_dir()
makedir(dest_dir)
dest_file_path = os.path.join(dest_dir, self.filename)
logging.info(f"Trying to download package {url}")
with urlopen(url) as fsrc, open(dest_file_path, 'wb') as fdst:
copyfileobj(fsrc, fdst)
logging.info(f"{self.filename} downloaded from repos")
return dest_file_path

View File

@@ -0,0 +1,134 @@
from copy import deepcopy
import logging
import os
import tarfile
from typing import Generic, TypeVar
from kupferbootstrap.config.state import config
from kupferbootstrap.exec.file import get_temp_dir
from kupferbootstrap.utils import download_file
from .package import BinaryPackage, LocalPackage, RemotePackage
BinaryPackageType = TypeVar('BinaryPackageType', bound=BinaryPackage)
def resolve_url(url_template, repo_name: str, arch: str):
result = url_template
for template, replacement in {'$repo': repo_name, '$arch': arch}.items():
result = result.replace(template, replacement)
return result
class RepoInfo:
options: dict[str, str]
url_template: str
def __init__(self, url_template: str, options: dict[str, str] = {}):
self.url_template = url_template
self.options = {} | options
class Repo(RepoInfo, Generic[BinaryPackageType]):
name: str
resolved_url: str
arch: str
packages: dict[str, BinaryPackageType]
remote: bool
scanned: bool = False
def resolve_url(self) -> str:
return resolve_url(self.url_template, repo_name=self.name, arch=self.arch)
def scan(self, allow_failure: bool = False) -> bool:
failed = False
self.resolved_url = self.resolve_url()
self.remote = not self.resolved_url.startswith('file://')
try:
path = self.acquire_db_file()
index = tarfile.open(path)
except Exception as ex:
if not allow_failure:
raise ex
logging.error(f"Repo {self.name}, {self.arch}: Error acquiring repo DB: {ex!r}")
return False
logging.debug(f'Parsing repo file at {path}')
for node in index.getmembers():
if os.path.basename(node.name) == 'desc':
pkgname = os.path.dirname(node.name)
logging.debug(f'Parsing desc file for {pkgname}')
fd = index.extractfile(node)
assert fd
contents = fd.read().decode()
try:
pkg = self._parse_desc(contents)
except Exception as ex:
if not allow_failure:
raise ex
logging.error(f'Repo {self.name}, {self.arch}: Error parsing desc for "{pkgname}": {ex!r}')
failed = True
continue
self.packages[pkg.name] = pkg
if failed:
return False
self.scanned = True
return True
def _parse_desc(self, desc_text: str): # can't annotate the type properly :(
raise NotImplementedError()
def parse_desc(self, desc_text: str) -> BinaryPackageType:
return self._parse_desc(desc_text)
def acquire_db_file(self) -> str:
raise NotImplementedError
def __init__(self, name: str, url_template: str, arch: str, options={}, scan=False):
self.packages = {}
self.name = name
self.url_template = url_template
self.arch = arch
self.options = deepcopy(options)
if scan:
self.scan()
def __repr__(self):
return f'<Repo:{self.name}:{self.arch}:{self.url_template}>'
def config_snippet(self) -> str:
options = {'Server': self.url_template} | self.options
return ('[%s]\n' % self.name) + '\n'.join([f"{key} = {value}" for key, value in options.items()])
def get_RepoInfo(self):
return RepoInfo(url_template=self.url_template, options=self.options)
class LocalRepo(Repo[LocalPackage]):
def _parse_desc(self, desc_text: str) -> LocalPackage:
return LocalPackage.parse_desc(desc_text, resolved_repo_url=self.resolved_url)
def acquire_db_file(self) -> str:
return f'{self.resolved_url}/{self.name}.db'.split('file://')[1]
class RemoteRepo(Repo[RemotePackage]):
cache_repo_db: bool
def __init__(self, *kargs, cache_repo_db: bool = False, **kwargs):
self.cache_repo_db = cache_repo_db
super().__init__(*kargs, **kwargs)
def _parse_desc(self, desc_text: str) -> RemotePackage:
return RemotePackage.parse_desc(desc_text, resolved_repo_url=self.resolved_url)
def acquire_db_file(self) -> str:
uri = f'{self.resolved_url}/{self.name}.db'
logging.info(f'Downloading repo file from {uri}')
assert self.arch and self.name, f"repo has incomplete information: {self.name=}, {self.arch=}"
path = get_temp_dir() if not self.cache_repo_db else os.path.join(config.get_path('pacman'), 'repo_dbs', self.arch)
os.makedirs(path, exist_ok=True)
repo_file = f'{path}/{self.name}.tar.gz'
download_file(repo_file, uri, update=True)
return repo_file

View File

@@ -0,0 +1,170 @@
from __future__ import annotations
import logging
import os
import toml
import yaml
from copy import deepcopy
from typing import ClassVar, Optional, Mapping, Union
from ..config.state import config
from ..constants import Arch, BASE_DISTROS, KUPFER_HTTPS, REPOS_CONFIG_FILE, REPOSITORIES
from ..dictscheme import DictScheme, toml_inline_dicts, TomlPreserveInlineDictEncoder
from ..utils import sha256sum
REPOS_KEY = 'repos'
REMOTEURL_KEY = 'remote_url'
LOCALONLY_KEY = 'local_only'
OPTIONS_KEY = 'options'
BASEDISTROS_KEY = 'base_distros'
_current_config: Optional[ReposConfigFile]
class AbstrRepoConfig(DictScheme):
options: Optional[dict[str, str]]
_strip_hidden: ClassVar[bool] = True
_sparse: ClassVar[bool] = True
class BaseDistroRepo(AbstrRepoConfig):
remote_url: Optional[str]
class RepoConfig(AbstrRepoConfig):
remote_url: Optional[Union[str, dict[Arch, str]]]
local_only: Optional[bool]
class BaseDistro(DictScheme):
remote_url: Optional[str]
repos: dict[str, BaseDistroRepo]
class ReposConfigFile(DictScheme):
remote_url: Optional[str]
repos: dict[str, RepoConfig]
base_distros: dict[Arch, BaseDistro]
_path: Optional[str]
_checksum: Optional[str]
_strip_hidden: ClassVar[bool] = True
_sparse: ClassVar[bool] = True
def __init__(self, d, **kwargs):
super().__init__(d=d, **kwargs)
self[REPOS_KEY] = self.get(REPOS_KEY, {})
for repo_cls, defaults, repos, remote_url in [
(RepoConfig, REPO_DEFAULTS, self.get(REPOS_KEY), d.get(REMOTEURL_KEY, None)),
*[(BaseDistroRepo, BASE_DISTRO_DEFAULTS, _distro.repos, _distro.get(REMOTEURL_KEY, None)) for _distro in self.base_distros.values()],
]:
if repos is None:
continue
for name, repo in repos.items():
_repo = dict(defaults | (repo or {})) # type: ignore[operator]
if REMOTEURL_KEY not in repo and not repo.get(LOCALONLY_KEY, None):
_repo[REMOTEURL_KEY] = remote_url
repos[name] = repo_cls(_repo, **kwargs)
@staticmethod
def parse_config(path: str) -> ReposConfigFile:
try:
with open(path, 'r') as fd:
data = yaml.safe_load(fd)
data['_path'] = path
data['_checksum'] = sha256sum(path)
return ReposConfigFile(data, validate=True)
except Exception as ex:
logging.error(f'Error parsing repos config at "{path}":\n{ex}')
raise ex
def toToml(self, strip_hidden=None, sparse=None, encoder=TomlPreserveInlineDictEncoder()):
d = self.toDict(strip_hidden=strip_hidden, sparse=sparse)
for key in [REPOS_KEY]:
if key not in d or not isinstance(d[key], Mapping):
continue
inline = {name: {k: toml_inline_dicts(v) for k, v in value.items()} for name, value in d[key].items()}
logging.info(f"Inlined {key}: {inline}")
d[key] = inline
return toml.dumps(d, encoder=encoder)
REPO_DEFAULTS = {
LOCALONLY_KEY: None,
REMOTEURL_KEY: None,
OPTIONS_KEY: {
'SigLevel': 'Never'
},
}
BASE_DISTRO_DEFAULTS = {
REMOTEURL_KEY: None,
OPTIONS_KEY: None,
}
REPOS_CONFIG_DEFAULT = ReposConfigFile({
'_path': '__DEFAULTS__',
'_checksum': None,
REMOTEURL_KEY: KUPFER_HTTPS,
REPOS_KEY: {
'kupfer_local': REPO_DEFAULTS | {
LOCALONLY_KEY: True
},
**{
r: deepcopy(REPO_DEFAULTS) for r in REPOSITORIES
},
},
BASEDISTROS_KEY: {
arch: {
REMOTEURL_KEY: None,
'repos': {
k: {
'remote_url': v
} for k, v in arch_def['repos'].items()
},
} for arch, arch_def in BASE_DISTROS.items()
},
})
_current_config = None
def get_repo_config(
initialize_pkgbuilds: bool = False,
repo_config_file: Optional[str] = None,
) -> tuple[ReposConfigFile, bool]:
global _current_config
repo_config_file_default = os.path.join(config.get_path('pkgbuilds'), REPOS_CONFIG_FILE)
if repo_config_file is None:
repo_config_file_path = repo_config_file_default
else:
repo_config_file_path = repo_config_file
config_exists = os.path.exists(repo_config_file_path)
if not config_exists and _current_config is None:
if initialize_pkgbuilds:
from ..packages.pkgbuild import init_pkgbuilds
init_pkgbuilds(update=False)
return get_repo_config(initialize_pkgbuilds=False, repo_config_file=repo_config_file)
if repo_config_file is not None:
raise Exception(f"Requested repo config {repo_config_file} doesn't exist")
logging.warning(f"{repo_config_file_path} doesn't exist, using built-in repo config defaults")
_current_config = deepcopy(REPOS_CONFIG_DEFAULT)
return _current_config, False
changed = False
if (not _current_config) or (config_exists and _current_config._checksum != sha256sum(repo_config_file_path)):
if config_exists:
conf = ReposConfigFile.parse_config(repo_config_file_path)
else:
conf = REPOS_CONFIG_DEFAULT
changed = conf != (_current_config or {})
if changed:
_current_config = deepcopy(conf)
else:
logging.debug("Repo config: Cache hit!")
assert _current_config
return _current_config, changed
def get_repos(**kwargs) -> list[RepoConfig]:
config, _ = get_repo_config(**kwargs)
return list(config.repos.values())

View File

View File

@@ -0,0 +1,134 @@
import logging
import os
import pwd
import subprocess
from subprocess import CompletedProcess # make it easy for users of this module
from shlex import quote as shell_quote
from typing import IO, Optional, Union
from kupferbootstrap.typehelpers import TypeAlias
ElevationMethod: TypeAlias = str
FileDescriptor: TypeAlias = Union[int, IO]
# as long as **only** sudo is supported, hardcode the default into ELEVATION_METHOD_DEFAULT.
# when other methods are added, all mentions of ELEVATION_METHOD_DEFAULT should be replaced by a config key.
ELEVATION_METHOD_DEFAULT = "sudo"
ELEVATION_METHODS: dict[ElevationMethod, list[str]] = {
"none": [],
"sudo": ['sudo', '--'],
}
def generate_env_cmd(env: dict[str, str]):
return ['/usr/bin/env'] + [f'{key}={value}' for key, value in env.items()]
def flatten_shell_script(script: Union[list[str], str], shell_quote_items: bool = False, wrap_in_shell_quote=False) -> str:
"""
takes a shell-script and returns a flattened string for consumption with `sh -c`.
`shell_quote_items` should only be used on `script` arrays that have no shell magic anymore,
e.g. `['bash', '-c', 'echo $USER']`, which would return the string `'bash' '-c' 'echo user'`,
which is suited for consumption by another bash -c process.
"""
if not isinstance(script, str) and isinstance(script, list):
cmds = script
if shell_quote_items:
cmds = [shell_quote(i) for i in cmds]
else:
cmds = [(i if i != '' else '""') for i in cmds]
script = " ".join(cmds)
if wrap_in_shell_quote:
script = shell_quote(script)
return script
def wrap_in_bash(cmd: Union[list[str], str], flatten_result=True) -> Union[str, list[str]]:
res: Union[str, list[str]] = ['/bin/bash', '-c', flatten_shell_script(cmd, shell_quote_items=False, wrap_in_shell_quote=False)]
if flatten_result:
res = flatten_shell_script(res, shell_quote_items=True, wrap_in_shell_quote=False)
return res
def generate_cmd_elevated(cmd: Union[list[str], str], elevation_method: ElevationMethod):
"wraps `cmd` in the necessary commands to escalate, e.g. `['sudo', '--', cmd]`."
if isinstance(cmd, str):
cmd = wrap_in_bash(cmd, flatten_result=False)
assert not isinstance(cmd, str) # typhints cmd as list[str]
if elevation_method not in ELEVATION_METHODS:
raise Exception(f"Unknown elevation method {elevation_method}")
return ELEVATION_METHODS[elevation_method] + cmd
def generate_cmd_su(
cmd: Union[list[str], str],
switch_user: str,
elevation_method: Optional[ElevationMethod] = None,
force_su: bool = False,
force_elevate: bool = False,
):
"""
returns cmd to escalate (e.g. sudo) and switch users (su) to run `cmd` as `switch_user` as necessary.
If `switch_user` is neither the current user nor root, cmd will have to be flattened into a single string.
A result might look like `['sudo', '--', 'su', '-s', '/bin/bash', '-c', cmd_as_a_string]`.
"""
current_uid = os.getuid()
if pwd.getpwuid(current_uid).pw_name != switch_user or force_su:
if switch_user != 'root' or force_su:
cmd = ['/bin/su', switch_user, '-s', '/bin/bash', '-c', flatten_shell_script(cmd, shell_quote_items=True)]
if current_uid != 0 or force_elevate: # in order to use `/bin/su`, we have to be root first.
cmd = generate_cmd_elevated(cmd, elevation_method or ELEVATION_METHOD_DEFAULT)
return cmd
def run_cmd(
script: Union[str, list[str]],
env: dict[str, str] = {},
attach_tty: bool = False,
capture_output: bool = False,
cwd: Optional[str] = None,
switch_user: Optional[str] = None,
elevation_method: Optional[ElevationMethod] = None,
stdout: Optional[FileDescriptor] = None,
stderr: Optional[FileDescriptor] = None,
) -> Union[CompletedProcess, int]:
"execute `script` as `switch_user`, elevating and su'ing as necessary"
kwargs: dict = {}
env_cmd = []
if env:
env_cmd = generate_env_cmd(env)
kwargs['env'] = env
if not attach_tty:
if (stdout, stderr) == (None, None):
kwargs['capture_output'] = capture_output
else:
for name, fd in {'stdout': stdout, 'stderr': stderr}.items():
if fd is not None:
kwargs[name] = fd
script = flatten_shell_script(script)
if cwd:
kwargs['cwd'] = cwd
wrapped_script: list[str] = wrap_in_bash(script, flatten_result=False) # type: ignore
cmd = env_cmd + wrapped_script
if switch_user:
cmd = generate_cmd_su(cmd, switch_user, elevation_method=elevation_method)
logging.debug(f'Running cmd: "{cmd}"' + (f' (path: {repr(cwd)})' if cwd else ''))
if attach_tty:
return subprocess.call(cmd, **kwargs)
else:
return subprocess.run(cmd, **kwargs)
def run_root_cmd(*kargs, **kwargs):
kwargs['switch_user'] = 'root'
return run_cmd(*kargs, **kwargs)
def elevation_noop(**kwargs):
run_root_cmd('/bin/true', **kwargs)

View File

@@ -0,0 +1,188 @@
import atexit
import logging
import os
import stat
import subprocess
from shutil import rmtree
from tempfile import mkdtemp
from typing import Optional, Union
from .cmd import run_cmd, run_root_cmd, elevation_noop, generate_cmd_su, wrap_in_bash, shell_quote
from kupferbootstrap.utils import get_user_name, get_group_name
def try_native_filewrite(path: str, content: Union[str, bytes], chmod: Optional[str] = None) -> Optional[Exception]:
"try writing with python open(), return None on success, return(!) Exception on failure"
bflag = 'b' if isinstance(content, bytes) else ''
try:
kwargs = {}
if chmod:
kwargs['mode'] = chmod
descriptor = os.open(path, **kwargs) # type: ignore
with open(descriptor, 'w' + bflag) as f:
f.write(content)
except Exception as ex:
return ex
return None
def chown(path: str, user: Optional[Union[str, int]] = None, group: Optional[Union[str, int]] = None, recursive: bool = False):
owner = ''
if user is not None:
owner += get_user_name(user)
if group is not None:
owner += f':{get_group_name(group)}'
if owner:
cmd = ["chown"] + (['-R'] if recursive else [])
result = run_root_cmd(cmd + [owner, path])
assert isinstance(result, subprocess.CompletedProcess)
if result.returncode:
raise Exception(f"Failed to change owner of '{path}' to '{owner}'")
def chmod(path, mode: Union[int, str] = 0o0755, force_sticky=True, privileged: bool = True):
if not isinstance(mode, str):
octal = oct(mode)[2:]
else:
octal = mode
assert octal.isnumeric()
octal = octal.rjust(3, '0')
if force_sticky:
octal = octal.rjust(4, '0')
try:
os.chmod(path, mode=octal) # type: ignore
except:
cmd = ["chmod", octal, path]
result = run_cmd(cmd, switch_user='root' if privileged else None)
assert isinstance(result, subprocess.CompletedProcess)
if result.returncode:
raise Exception(f"Failed to set mode of '{path}' to '{chmod}'")
def root_check_exists(path):
return os.path.exists(path) or run_root_cmd(['[', '-e', path, ']']).returncode == 0
def root_check_is_dir(path):
return os.path.isdir(path) or run_root_cmd(['[', '-d', path, ']'])
def write_file(
path: str,
content: Union[str, bytes],
lazy: bool = True,
mode: Optional[str] = None,
user: Optional[str] = None,
group: Optional[str] = None,
):
chmod_mode = ''
chown_user = get_user_name(user) if user else None
chown_group = get_group_name(group) if group else None
fstat: os.stat_result
exists = root_check_exists(path)
dirname = os.path.dirname(path)
failed = False
if exists:
try:
fstat = os.stat(path)
except PermissionError:
failed = True
else:
chown_user = chown_user or get_user_name(os.getuid())
chown_group = chown_group or get_group_name(os.getgid())
dir_exists = root_check_exists(dirname)
if not dir_exists or not root_check_is_dir(dirname):
reason = "is not a directory" if dir_exists else "does not exist"
raise Exception(f"Error writing file {path}, parent dir {reason}")
if mode:
if not mode.isnumeric():
raise Exception(f"Unknown file mode '{mode}' (must be numeric): {path}")
if not exists or failed or stat.filemode(int(mode, 8)) != stat.filemode(fstat.st_mode):
chmod_mode = mode
if not failed:
failed = try_native_filewrite(path, content, chmod_mode) is not None
if exists or failed:
if failed:
try:
elevation_noop(attach_tty=True) # avoid password prompt while writing file
logging.debug(f"Writing to {path} using elevated /bin/tee")
cmd: list[str] = generate_cmd_su(wrap_in_bash(f'tee {shell_quote(path)} >/dev/null', flatten_result=False), 'root') # type: ignore
assert isinstance(cmd, list)
s = subprocess.Popen(
cmd,
text=(not isinstance(content, bytes)),
stdin=subprocess.PIPE,
)
s.communicate(content)
s.wait(300) # 5 minute timeout
if s.returncode:
raise Exception(f"Write command excited non-zero: {s.returncode}")
except Exception as ex:
logging.fatal(f"Writing to file '{path}' with elevated privileges failed")
raise ex
if chmod_mode:
chmod(path, chmod_mode)
chown(path, chown_user, chown_group)
def root_write_file(*args, **kwargs):
kwargs['user'] = 'root'
kwargs['group'] = 'root'
return write_file(*args, **kwargs)
def remove_file(path: str, recursive=False):
try:
rm = rmtree if recursive else os.unlink
rm(path) # type: ignore
except:
cmd = ['rm'] + (['-r'] if recursive else []) + [path]
rc = run_root_cmd(cmd).returncode
if rc:
raise Exception(f"Unable to remove {path}: cmd returned {rc}")
def makedir(
path,
user: Optional[Union[str, int]] = None,
group: Optional[Union[str, int]] = None,
parents: bool = True,
mode: Optional[Union[int, str]] = None,
):
if not root_check_exists(path):
try:
if parents:
os.makedirs(path, exist_ok=True)
else:
os.mkdir(path)
except:
run_root_cmd(['mkdir'] + (['-p'] if parents else []) + [path])
if mode is not None:
chmod(path, mode=mode)
chown(path, user, group)
def root_makedir(path, parents: bool = True):
return makedir(path, user='root', group='root', parents=parents)
def symlink(source, target):
"Create a symlink at `target`, pointing at `source`"
try:
os.symlink(source, target)
except:
result = run_root_cmd(['ln', '-s', source, target])
assert isinstance(result, subprocess.CompletedProcess)
if result.returncode:
raise Exception(f'Symlink creation of {target} pointing at {source} failed')
def get_temp_dir(register_cleanup=True, mode: int = 0o0755):
"create a new tempdir and sanitize ownership so root can access user files as god intended"
t = mkdtemp()
chmod(t, mode, privileged=False)
if register_cleanup:
atexit.register(remove_file, t, recursive=True)
return t

View File

@@ -0,0 +1,72 @@
import logging
import os
import pwd
import subprocess
from typing import Optional
from .cmd import run_cmd, run_root_cmd, generate_cmd_su
def get_username(id: int):
return pwd.getpwuid(id).pw_name
def run_func(f, expected_user: Optional[str] = None, **kwargs):
current_uid = os.getuid()
current_username = get_username(current_uid)
target_uid = current_uid
result = f(['id', '-u'], capture_output=True, **kwargs)
assert isinstance(result, subprocess.CompletedProcess)
result.check_returncode()
if expected_user and current_username != expected_user:
target_uid = pwd.getpwnam(expected_user).pw_uid
result_uid = result.stdout.decode()
assert int(result_uid) == target_uid
def run_generate_and_exec(script, generate_args={}, switch_user=None, **kwargs):
"runs generate_cmd_su() and executes the resulting argv"
if not switch_user:
switch_user = get_username(os.getuid())
cmd = generate_cmd_su(script, switch_user=switch_user, **generate_args)
logging.debug(f'run_generate_and_exec: running {cmd}')
return subprocess.run(
cmd,
**kwargs,
)
def test_generate_su_force_su():
run_func(run_generate_and_exec, generate_args={'force_su': True})
def test_generate_su_force_elevate():
run_func(run_generate_and_exec, generate_args={'force_elevate': True}, expected_user='root', switch_user='root')
def test_generate_su_nobody_force_su():
user = 'nobody'
run_func(run_generate_and_exec, expected_user=user, switch_user=user, generate_args={'force_su': True})
def test_generate_su_nobody_force_su_and_elevate():
user = 'nobody'
run_func(run_generate_and_exec, expected_user=user, switch_user=user, generate_args={'force_su': True, 'force_elevate': True})
def test_run_cmd():
run_func(run_cmd)
def test_run_cmd_su_nobody():
user = 'nobody'
run_func(run_cmd, expected_user=user, switch_user=user)
def test_run_cmd_as_root():
run_func(run_cmd, expected_user='root', switch_user='root')
def test_run_root_cmd():
run_func(run_root_cmd, expected_user='root')

View File

@@ -0,0 +1,181 @@
import pytest
import os
import stat
from typing import Union, Generator
from dataclasses import dataclass
from .cmd import run_root_cmd
from .file import chmod, chown, get_temp_dir, write_file
from kupferbootstrap.utils import get_gid, get_uid
TEMPDIR_MODE = 0o755
@dataclass
class TempdirFillInfo():
path: str
files: dict[str, str]
def _get_tempdir():
d = get_temp_dir(register_cleanup=False, mode=TEMPDIR_MODE)
assert os.path.exists(d)
return d
def remove_dir(d):
run_root_cmd(['rm', '-rf', d]).check_returncode()
def create_file(filepath, owner='root', group='root'):
assert not os.path.exists(filepath)
run_root_cmd(['touch', filepath]).check_returncode()
run_root_cmd(['chown', f'{owner}:{group}', filepath]).check_returncode()
@pytest.fixture
def tempdir():
d = _get_tempdir()
yield d
# cleanup, gets run after the test since we yield above
remove_dir(d)
def test_get_tempdir(tempdir):
mode = os.stat(tempdir).st_mode
assert stat.S_ISDIR(mode)
assert stat.S_IMODE(mode) == TEMPDIR_MODE
@pytest.fixture
def tempdir_filled() -> Generator[TempdirFillInfo, None, None]:
d = _get_tempdir()
contents = {
'rootfile': {
'owner': 'root',
'group': 'root',
},
'userfile': {
'owner': 'nobody',
'group': 'nobody',
},
}
res = TempdirFillInfo(path=d, files={})
for p, opts in contents.items():
path = os.path.join(d, p)
res.files[p] = path
create_file(path, **opts)
yield res
# cleanup, gets run after the test since we yield above
remove_dir(d)
def verify_ownership(filepath, user: Union[str, int], group: Union[str, int]):
uid = get_uid(user)
gid = get_gid(group)
assert os.path.exists(filepath)
fstat = os.stat(filepath)
assert fstat.st_uid == uid
assert fstat.st_gid == gid
def verify_mode(filepath, mode: int = TEMPDIR_MODE):
assert stat.S_IMODE(os.stat(filepath).st_mode) == mode
def verify_content(filepath, content):
assert os.path.exists(filepath)
with open(filepath, 'r') as f:
assert f.read().strip() == content.strip()
@pytest.mark.parametrize("user,group", [('root', 'root'), ('nobody', 'nobody')])
def test_chown(tempdir: str, user: str, group: str):
assert os.path.exists(tempdir)
target_uid = get_uid(user)
target_gid = get_gid(group)
chown(tempdir, target_uid, target_gid)
verify_ownership(tempdir, target_uid, target_gid)
@pytest.mark.parametrize("mode", [0, 0o700, 0o755, 0o600, 0o555])
def test_chmod(tempdir_filled, mode: int):
for filepath in tempdir_filled.files.values():
chmod(filepath, mode)
verify_mode(filepath, mode)
def test_tempdir_filled_fixture(tempdir_filled: TempdirFillInfo):
files = tempdir_filled.files
assert files
assert 'rootfile' in files
assert 'userfile' in files
verify_ownership(files['rootfile'], 'root', 'root')
verify_ownership(files['userfile'], 'nobody', 'nobody')
def test_write_new_file_naive(tempdir: str):
assert os.path.exists(tempdir)
new = os.path.join(tempdir, 'newfiletest')
content = 'test12345'
assert not os.path.exists(new)
write_file(new, content)
verify_content(new, content)
verify_ownership(new, user=os.getuid(), group=os.getgid())
def test_write_new_file_root(tempdir: str):
assert os.path.exists(tempdir)
new = os.path.join(tempdir, 'newfiletest')
content = 'test12345'
assert not os.path.exists(new)
write_file(new, content, user='root', group='root')
verify_content(new, content)
verify_ownership(new, user=0, group=0)
def test_write_new_file_user(tempdir: str):
user = 'nobody'
group = 'nobody'
assert os.path.exists(tempdir)
new = os.path.join(tempdir, 'newfiletest')
content = 'test12345'
assert not os.path.exists(new)
write_file(new, content, user=user, group=group)
assert os.path.exists(new)
verify_content(new, content)
verify_ownership(new, user=user, group=group)
def test_write_new_file_user_in_root_dir(tempdir: str):
assert os.path.exists(tempdir)
chown(tempdir, user='root', group='root')
verify_ownership(tempdir, 'root', 'root')
test_write_new_file_user(tempdir)
def test_write_rootfile_naive(tempdir_filled: TempdirFillInfo):
files = tempdir_filled.files
assert 'rootfile' in files
p = files['rootfile']
assert os.path.exists(p)
verify_ownership(p, 'root', 'root')
content = 'test123'
write_file(p, content)
verify_content(p, 'test123')
verify_ownership(p, 'root', 'root')
@pytest.mark.parametrize("user,group", [('root', 'root'), ('nobody', 'nobody')])
def test_write_rootfile(tempdir_filled: TempdirFillInfo, user: str, group: str):
files = tempdir_filled.files
assert 'rootfile' in files
p = files['rootfile']
assert os.path.exists(p)
verify_ownership(p, 'root', 'root')
content = 'test123'
write_file(p, content)
verify_content(p, 'test123')
verify_ownership(p, 'root', 'root')

View File

View File

@@ -0,0 +1,71 @@
import click
import logging
from json import dumps as json_dump
from typing import Optional
from kupferbootstrap.config.cli import resolve_profile_field
from kupferbootstrap.config.state import config
from kupferbootstrap.utils import color_mark_selected, colors_supported
from .flavour import get_flavours, get_flavour
profile_option = click.option('-p', '--profile', help="name of the profile to use", required=False, default=None)
@click.command(name='flavours')
@click.option('-j', '--json', is_flag=True, help='output machine-parsable JSON format')
@click.option('--output-file', type=click.Path(exists=False, file_okay=True), help="Dump JSON to file")
def cmd_flavours(json: bool = False, output_file: Optional[str] = None):
'list information about available flavours'
results = []
json_results = {}
profile_flavour = None
flavours = get_flavours()
interactive_json = json and not output_file
use_colors = colors_supported(config.runtime.colors) and not interactive_json
profile_name = config.file.profiles.current
selected, inherited_from = None, None
if output_file:
json = True
if not flavours:
raise Exception("No flavours found!")
if not interactive_json:
try:
selected, inherited_from = resolve_profile_field(None, profile_name, 'flavour', config.file.profiles)
if selected:
profile_flavour = get_flavour(selected)
except Exception as ex:
logging.debug(f"Failed to get profile flavour for marking as currently selected, continuing anyway. Exception: {ex}")
for name in sorted(flavours.keys()):
f = flavours[name]
try:
f.parse_flavourinfo()
except Exception as ex:
logging.debug(f"A problem happened while parsing flavourinfo for {name}, continuing anyway. Exception: {ex}")
if not interactive_json:
snippet = f.nice_str(newlines=True, colors=use_colors)
if profile_flavour == f:
snippet = color_mark_selected(snippet, profile_name or '[unknown]', inherited_from)
snippet += '\n'
results += snippet.split('\n')
if json:
d = dict(f)
d["description"] = f.flavour_info.description if (f.flavour_info and f.flavour_info.description) else f.description
if "flavour_info" in d and d["flavour_info"]:
for k in set(d["flavour_info"].keys()) - set(['description']):
d[k] = d["flavour_info"][k]
del d["flavour_info"]
d["pkgbuild"] = f.pkgbuild.path if f.pkgbuild else None
d["package"] = f.pkgbuild.name
d["arches"] = sorted(f.pkgbuild.arches) if f.pkgbuild else None
json_results[name] = d
print()
if output_file:
with open(output_file, 'w') as fd:
fd.write(json_dump(json_results))
if interactive_json:
print(json_dump(json_results, indent=4))
else:
for r in results:
print(r)

View File

@@ -0,0 +1,129 @@
from __future__ import annotations
import json
import logging
import os
from typing import Optional
from kupferbootstrap.config.state import config
from kupferbootstrap.constants import FLAVOUR_DESCRIPTION_PREFIX, FLAVOUR_INFO_FILE
from kupferbootstrap.dictscheme import DictScheme
from kupferbootstrap.packages.pkgbuild import discover_pkgbuilds, get_pkgbuild_by_name, init_pkgbuilds, Pkgbuild
from kupferbootstrap.utils import color_str
class FlavourInfo(DictScheme):
rootfs_size: int # rootfs size in GB
description: Optional[str]
def __repr__(self):
return f'rootfs_size: {self.rootfs_size}'
class Flavour(DictScheme):
name: str
pkgbuild: Pkgbuild
description: str
flavour_info: Optional[FlavourInfo]
@staticmethod
def from_pkgbuild(pkgbuild: Pkgbuild) -> Flavour:
name = pkgbuild.name
if not name.startswith('flavour-'):
raise Exception(f'Flavour package "{name}" doesn\'t start with "flavour-": "{name}"')
if name.endswith('-common'):
raise Exception(f'Flavour package "{name}" ends with "-common": "{name}"')
name = name[8:] # split off 'flavour-'
description = pkgbuild.description
# cut off FLAVOUR_DESCRIPTION_PREFIX
if description.lower().startswith(FLAVOUR_DESCRIPTION_PREFIX.lower()):
description = description[len(FLAVOUR_DESCRIPTION_PREFIX):]
return Flavour(name=name, pkgbuild=pkgbuild, description=description.strip(), flavour_info=None)
def __repr__(self):
return f'Flavour<"{self.name}": "{self.description}", package: {self.pkgbuild.name if self.pkgbuild else "??? PROBABLY A BUG!"}{f", {self.flavour_info}" if self.flavour_info else ""}>'
def __str__(self):
return self.nice_str()
def nice_str(self, newlines: bool = False, colors: bool = False) -> str:
separator = '\n' if newlines else ', '
def get_lines(k, v, key_prefix=''):
results = []
full_k = f'{key_prefix}.{k}' if key_prefix else k
if not isinstance(v, (dict, DictScheme)):
results = [f'{color_str(full_k, bold=True)}: {v}']
else:
for _k, _v in v.items():
if _k.startswith('_'):
continue
results += get_lines(_k, _v, key_prefix=full_k)
return results
return separator.join(get_lines(None, self))
def parse_flavourinfo(self, lazy: bool = True):
if lazy and self.flavour_info is not None:
return self.flavour_info
infopath = os.path.join(config.get_path('pkgbuilds'), self.pkgbuild.path, FLAVOUR_INFO_FILE)
if not os.path.exists(infopath):
raise Exception(f"Error parsing flavour info for flavour {self.name}: file doesn't exist: {infopath}")
try:
defaults = {'description': None}
with open(infopath, 'r') as fd:
infodict = json.load(fd)
i = FlavourInfo(**(defaults | infodict))
except Exception as ex:
raise Exception(f"Error parsing {FLAVOUR_INFO_FILE} for flavour {self.name}: {ex}")
self.flavour_info = i
if i.description:
self.description = i.description
return i
_flavours_discovered: bool = False
_flavours_cache: dict[str, Flavour] = {}
def get_flavours(lazy: bool = True):
global _flavours_cache, _flavours_discovered
if lazy and _flavours_discovered:
return _flavours_cache
logging.info("Searching PKGBUILDs for flavour packages")
flavours: dict[str, Flavour] = {}
pkgbuilds: dict[str, Pkgbuild] = discover_pkgbuilds(lazy=(lazy or not _flavours_discovered))
for pkg in pkgbuilds.values():
name = pkg.name
if not name.startswith('flavour-') or name.endswith('-common'):
continue
name = name[8:] # split off 'flavour-'
logging.info(f"Found flavour package {name}")
flavours[name] = Flavour.from_pkgbuild(pkg)
_flavours_cache.clear()
_flavours_cache.update(flavours)
_flavours_discovered = True
return flavours
def get_flavour(name: str, lazy: bool = True):
global _flavours_cache
pkg_name = f'flavour-{name}'
if lazy and name in _flavours_cache:
return _flavours_cache[name]
try:
logging.info(f"Trying to find PKGBUILD for flavour {name}")
init_pkgbuilds()
pkg = get_pkgbuild_by_name(pkg_name)
except Exception as ex:
raise Exception(f"Error parsing PKGBUILD for flavour package {pkg_name}:\n{ex}")
assert pkg and pkg.name == pkg_name
flavour = Flavour.from_pkgbuild(pkg)
_flavours_cache[name] = flavour
return flavour
def get_profile_flavour(profile_name: Optional[str] = None) -> Flavour:
profile = config.enforce_profile_flavour_set(profile_name=profile_name)
return get_flavour(profile.flavour)

View File

@@ -0,0 +1,29 @@
import pytest
from .flavour import Flavour, get_flavour, get_flavours
FLAVOUR_NAME = 'phosh'
@pytest.fixture()
def flavour(name=FLAVOUR_NAME) -> Flavour:
return get_flavour(name)
def test_get_flavour(flavour: Flavour):
assert isinstance(flavour, Flavour)
assert flavour.name
assert flavour.pkgbuild
def test_parse_flavourinfo(flavour: Flavour):
info = flavour.parse_flavourinfo()
assert isinstance(info.rootfs_size, int)
# rootfs_size should not be zero
assert info.rootfs_size
def test_get_flavours():
flavours = get_flavours()
assert flavours
assert FLAVOUR_NAME in flavours

View File

@@ -1,13 +1,16 @@
from constants import Arch, GCC_HOSTSPECS, CFLAGS_GENERAL, CFLAGS_ARCHES, COMPILE_ARCHES, CHROOT_PATHS
from config import config
from typing import Optional
from .constants import Arch, CFLAGS_ARCHES, CFLAGS_GENERAL, COMPILE_ARCHES, GCC_HOSTSPECS
from .config.state import config
def generate_makepkg_conf(arch: Arch, cross: bool = False, chroot: str = None) -> str:
def generate_makepkg_conf(arch: Arch, cross: bool = False, chroot: Optional[str] = None) -> str:
"""
Generate a makepkg.conf. For use with crosscompiling, specify `cross=True` and pass as `chroot`
the relative path inside the native chroot where the foreign chroot will be mounted.
"""
hostspec = GCC_HOSTSPECS[config.runtime['arch'] if cross else arch][arch]
assert config.runtime.arch
hostspec = GCC_HOSTSPECS[config.runtime.arch if cross else arch][arch]
cflags = CFLAGS_ARCHES[arch] + CFLAGS_GENERAL
if cross and not chroot:
raise Exception('Cross-compile makepkg conf requested but no chroot path given: "{chroot}"')
@@ -193,7 +196,9 @@ export LDFLAGS="$LDFLAGS,-L/usr/{hostspec}/lib,-L/{chroot}/usr/lib,-rpath-link,/
def generate_pacman_conf_body(
arch: Arch,
check_space: bool = True,
in_chroot: bool = True,
):
pacman_cache = f"{config.get_path('pacman')}/{arch}" if not in_chroot else '/var/cache/pacman/pkg'
return f'''
#
# /etc/pacman.conf
@@ -208,7 +213,7 @@ def generate_pacman_conf_body(
# If you wish to use different paths, uncomment and update the paths.
#RootDir = /
#DBPath = /var/lib/pacman/
CacheDir = {CHROOT_PATHS['pacman']}/{arch}
CacheDir = {pacman_cache}
#LogFile = /var/log/pacman.log
#GPGDir = /etc/pacman.d/gnupg/
#HookDir = /etc/pacman.d/hooks/
@@ -231,7 +236,7 @@ Color
#NoProgressBar
{'' if check_space else '#'}CheckSpace
VerbosePkgLists
ParallelDownloads = {config.file['pacman']['parallel_downloads']}
ParallelDownloads = {config.file.pacman.parallel_downloads}
# By default, pacman accepts packages signed by keys that its local keyring
# trusts (see pacman-key and its man page), as well as unsigned packages.

View File

Some files were not shown because too many files have changed in this diff Show More