diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 3ec1e39..32b5f7c 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -1,8 +1,10 @@ -name: CI on SFOS 3.1.0 +name: CI on SDK for 3.1.0 on: push: tags: + # Mind that '*' does not match a slash ('/'), in contrast to '**', + # see https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#filter-pattern-cheat-sheet - 'release*/*' - 'rc*/*' - 'beta*/*' @@ -12,13 +14,30 @@ on: - master - devel - 'v*-legacy' + # Allows to run this workflow manually from the Actions tab. + workflow_dispatch: + +defaults: + run: + # Note that 'bash' provides -o pipefail, in contrast to the default (i.e. unspecified, which also uses bash) or 'sh', + # see https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsshell + shell: sh + +# Do not use concurrency in order to enforce checking every commit of a Pull Request. +# See, e.g.: https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#example-only-cancel-in-progress-jobs-or-runs-for-the-current-workflow +#concurrency: + #group: ci-${{ github.ref_name }} + # 'false' (default) allows for two concurrent runs, one executing and one freshly enqueued; 'true' for only one; no 'concurrency:' defined for multiple. + #cancel-in-progress: false jobs: build: runs-on: ubuntu-22.04 env: + # Do not wait up to the default of 10 minutes for network timeouts in a workflow which runs ca. 2 minutes. SEGMENT_DOWNLOAD_TIMEOUT_MINS: 1 steps: + - name: Checkout uses: actions/checkout@v3 @@ -37,6 +56,9 @@ jobs: - name: Build i486 on 3.1.0.12 uses: coderus/github-sfos-build@old-stable with: + # Solely build for i486 on 3.1.0, because this is a noarch RPM and i486 matches the architecture of the host (compiling faster than cross-compilation) + # and 3.1.0 is the oldest supported release hence with the smallest Docker image to download from Docker hub. + # See also https://github.com/sailfishos-patches/patchmanager/pull/437#issuecomment-1615317003 release: 3.1.0.12 arch: i486 @@ -45,3 +67,4 @@ jobs: with: name: RPM-build-results path: RPMS/ + diff --git a/LICENSE b/LICENSE index 9555783..5e452a5 100644 --- a/LICENSE +++ b/LICENSE @@ -461,8 +461,9 @@ DAMAGES. WWW-Link to this software: https://github.com/storeman-developers/harbour-storeman-installer - Copyright (C) 2021-2022 Petr Tsymbarovich (osetr) - 2021-2023 olf + Copyright (C) 2021-2022 Petr Tsymbarovich (osetr / mentaljam) + 2021-2024 olf + 2024 Peter G. (nephros) This software is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public diff --git a/README.md b/README.md index 18596c5..ca08848 100644 --- a/README.md +++ b/README.md @@ -8,26 +8,26 @@ Starting with version 0.2.9, Storeman is built by the help of the SailfishOS-OBS The Storeman Installer works on any SailfishOS release ≥ 3.1.0 and all supported CPU-architectures (armv7hl, i486 and aarch64). The current Storeman Installer RPM can be obtained from its ["latest release" page at GitHub](https://github.com/storeman-developers/harbour-storeman-installer/releases/latest) and [OpenRepos.net](https://openrepos.net/content/olf/storeman-installer). -RPMs of [older Storeman releases are also available at OpenRepos](https://openrepos.net/content/olf/storeman-legacy), e.g., v0.1.8 which works on SailfishOS 2.2.1 and may work on older SailfishOS 2 releases. +RPMs of [older Storeman releases are also available at OpenRepos](https://openrepos.net/content/olf/storeman-legacy), e.g. v0.1.8 which works on SailfishOS 2.2.1 and may work on older SailfishOS 2 releases. ### Important notes * If you experience issues with Storeman Installer, please take a look at its log file `/var/log/harbour-storeman-installer.log.txt`. If that does not reveal to you what is going wrong, please check first if an issue report describing this issue is [already filed at GitHub](https://github.com/storeman-developers/harbour-storeman-installer/issues), then you might file a new issue report there and attach the log file to it, or enhance an extant bug report. * If you experience issues when installing, removing or updating packages after a SailfishOS upgrade, try running `devel-su pkcon refresh` in a terminal app. * When Storeman Installer fails to install anything (i.e, a minute after installing it the icon of Storeman has not appeared on the launcher / desktop), most likely the preceding or the following bullet point is the reason. -* Before software can be build for a SailfishOS release at the SailfishOS-OBS, Jolla must create a [corresponding "download on demand (DoD)" OBS-repository](https://build.merproject.org/project/subprojects/sailfishos). It may take a little time after a new SailfishOS release is published before the corresponding "DoD" repository is being made available, during which installing Storeman by the Storeman Installer or updating Storeman by itself on a device with the new SailfishOS release already installed does not work, because Storeman cannot be compiled for this new SailfishOS release at the Sailfish-OBS, yet; consequently this is always the case for "closed beta (cBeta)" releases of SailfishOS. In such a situation one has to manually download Storeman built for the last prior SailfishOS "general availability (GA)" release (e.g., from [its releases section at GitHub](https://github.com/storeman-developers/harbour-storeman/releases) or [the SailfishOS-OBS](https://build.merproject.org/project/show/home:olf:harbour-storeman)), then install or update Storeman via `pkcon install-local `, and hope that there is no change in the new SailfishOS release which breaks Storeman; if there is, please report that soon at [Storeman's issue tracker](https://github.com/storeman-developers/harbour-storeman/issues). +* Before software can be build for a SailfishOS release at the SailfishOS-OBS, Jolla must create a [corresponding "download on demand (DoD)" OBS-repository](https://build.merproject.org/project/subprojects/sailfishos). It may take a little time after a new SailfishOS release is published before the corresponding "DoD" repository is being made available, during which installing Storeman by the Storeman Installer or updating Storeman by itself on a device with the new SailfishOS release already installed does not work, because Storeman cannot be compiled for this new SailfishOS release at the Sailfish-OBS, yet; consequently this is always the case for "closed beta (cBeta)" releases of SailfishOS. In such a situation one has to manually download Storeman built for the last prior SailfishOS "general availability (GA)" release (e.g. from [its releases section at GitHub](https://github.com/storeman-developers/harbour-storeman/releases) or [the SailfishOS-OBS](https://build.merproject.org/project/show/home:olf:harbour-storeman)), then install or update Storeman via `pkcon install-local `, and hope that there is no change in the new SailfishOS release which breaks Storeman; if there is, please report that soon at [Storeman's issue tracker](https://github.com/storeman-developers/harbour-storeman/issues). * Disclaimer: Storeman and its installer may still have flaws, kill your kittens or break your SailfishOS installation! Although this is very unlikely after years of testing by many users, new flaws may be introduced in any release (as for any software). Mind that the license you implicitly accept by using Storeman or Storeman Installer excludes any liability. ### Installation instructions -* Initial installation without having Storeman or SailfishOS:Chum already installed +* Initial installation without having Storeman already installed 1. Enable "System → Security → Untrusted software → Allow untrusted software" in the SailfishOS Settings app. 2. Download the current Storeman Installer RPM from its ["latest release" page at GitHub](https://github.com/storeman-developers/harbour-storeman-installer/releases/latest) or [OpenRepos.net](https://openrepos.net/content/olf/storeman-installer). 3. Tap on the "File downloaded" notification on your SailfishOS device or select the downloaded RPM file in a file-manager app and choose "Install" in its pulley menu; then confirm the installation. 4. Preferably disable "Allow untrusted software" again. -* Installation via Storeman (i.e., updating from Storeman <  0.2.9) - * If you have [olf's repository at OpenRepos](https://openrepos.net/user/5928/programs) enabled, *Storeman Installer* shall be offered as an update candidate for the outdated *Storeman* installed: Just accept this offer.
Otherwise:
+* Installation via Storeman (i.e. updating from Storeman < 0.2.9) + * If you have [olf's repository at OpenRepos](https://openrepos.net/user/5928/programs) enabled, *Storeman Installer* should be offered as an update candidate for the outdated *Storeman* installed: Just accept this offer.
Otherwise:
1. Search for *Installer*. 2. Select the *Storeman Installer* by *olf*. 3. Enable olf's repository in the top pulley menu. @@ -39,6 +39,6 @@ RPMs of [older Storeman releases are also available at OpenRepos](https://openre * [Storeman Installer 1.3.1](https://github.com/storeman-developers/harbour-storeman-installer/releases/tag/1.3.1) and all later versions are offered as an update candidate for Storeman, if an RPM repository is enabled, which offers the *harbour-storeman-installer* package and Storeman (*harbour-storeman* package) < 0.2.99 is already installed. * Installing [Storeman Installer 1.3.1](https://github.com/storeman-developers/harbour-storeman-installer/releases/tag/1.3.1) and all later versions also automatically removes an installed Storeman (*harbour-storeman* package) < 0.2.99, which eliminates the former necessity to manually remove ("uninstall") an old Storeman. * [Storeman Installer 1.3.8](https://github.com/storeman-developers/harbour-storeman-installer/releases/tag/1.3.8) and all later versions create a persistent log file `/var/log/harbour-storeman-installer.log.txt`. -* Storeman Installer 2 runs "unattended": I.e., without any manual steps, after its installation has been triggered, until Storeman is installed. +* Storeman Installer 2 runs "unattended": I.e. without any manual steps, after its installation has been triggered, until Storeman is installed. * Storeman Installer is slow, because it calls `pkcon` two (releases before v1.3.8) to three times (releases from v[1.3.8](https://github.com/storeman-developers/harbour-storeman-installer/releases/tag/1.3.8) on), which acts quite slowly. The minimal run time for Storeman Installer 2 is about 7 seconds, the typical run time is rather 10 seconds (measured from the moment Storeman Installer's installation is triggered, until Storeman is installed and its icon is displayed at the "launcher"). This is already a lot, but rarely the Packagekit daemon stalled (`packagekitd`, for which `pkcon` is just a command line front-end, communicating with the daemon via D-Bus) during heavy testing, which can be observed with the crude `pkmon` utility (`Ctrl-C` gets you out.:smiley:), so the Storeman Installer now tries to detect these "hangs" and to counter them: If that happens, its run time can be up to slightly more than 1 minute. In the worst case a stalled PackageKit daemon (and with it its `pkcon` client process(es)) stalls Storeman Installer, until the PackageKit daemon reaches its idle time out of 300 seconds (5 minutes; this could theoretically happen three times, resulting in a likely unsuccessful run time of more than 15 minutes).
Also note that SailfishOS sometimes fails to show an icon of a freshly installed app on the launcher ("homescreen") until SailfishOS is rebooted (rsp. more precisely: Lipstick is restarted). diff --git a/bin/harbour-storeman-installer b/bin/harbour-storeman-installer index b4ebe71..3212c6b 100644 --- a/bin/harbour-storeman-installer +++ b/bin/harbour-storeman-installer @@ -5,7 +5,7 @@ set -ufC # "-u" (error when using uninitialised variables), # despite minor errors. set -o pipefail # May not be supported, busybox's ash and bash do. export LC_ALL=POSIX # For details see https://pubs.opengroup.org/onlinepubs/9699919799/basedefs/V1_chap08.html#tag_08_02 -export POSIXLY_CORRECT=1 # Can have side-effects, see e.g., https://github.com/Olf0/sfos-upgrade/issues/73 +export POSIXLY_CORRECT=1 # Can have side-effects, see e.g. https://github.com/Olf0/sfos-upgrade/issues/73 # Ultimately this ensures an almost consistent behaviour across shell implementations. # Specify bash as interpreter of this script (in its first line), as this ensures @@ -28,7 +28,7 @@ export POSIXLY_CORRECT=1 # Can have side-effects, see e.g., https://github.com/ # This script is designed to be called fully detached (by a "double fork" # / a "daemonize") as the last statement of the %posttrans scriptlet -# (the last executed) of an RPM spec file or directly (e.g., in a shell). +# (the last executed) of an RPM spec file or directly (e.g. in a shell). # Memorise how we were called (argument checks are primarily intended for interactive use): mypid="$$" @@ -72,7 +72,7 @@ source /etc/os-release; logentry="[Debug] From /etc/os-release: $ID $VERSION_ID printf '\n%s\n' "$(date -Iseconds) $logentry" systemd-cat -t "$called" -p 7 printf '%s' "$logentry" -ssus="$(ssu s | grep -iv 'UID:\? ')"; logentry='[Debug] `ssu status`, UID omitted:' +ssus="$(ssu s | grep -iv 'UID:\? ')"; logentry='[Debug] \`ssu status\`, UID omitted:' printf '\n%s\n%s\n' "$(date -Iseconds) $logentry" "$ssus" systemd-cat -t "$called" -p 7 printf '%s %s' "$logentry" "$(printf '%s' "$ssus" | sed 's/$/, /g' | tr -d '\n')" @@ -143,7 +143,7 @@ do logentry="[Warning] Failed to refresh harbour-storeman-obs repository, because error-code $retc was returned by: $logentry" printf '\n%s\n' "$(date -Iseconds) $logentry" systemd-cat -t "$called" -p 4 printf '%s' "$logentry" - logentry="[Notice] Trying to terminate (i.e., sending SIGTERM to) all processes named (ID) `pkcon`, then sleeping for $wait seconds." + logentry="[Notice] Trying to terminate (i.e. sending SIGTERM to) all processes named (ID) \`pkcon\`, then sleeping for $wait seconds." printf '%s\n' "$(date -Iseconds) $logentry" systemd-cat -t "$called" -p 5 printf '%s' "$logentry" killall -q -TERM pkcon @@ -156,7 +156,7 @@ do logentry="[Error] Failed to refresh harbour-storeman-obs repository, because error-code $retc was returned by: $logentry" printf '\n%s\n' "$(date -Iseconds) $logentry" systemd-cat -t "$called" -p 3 printf '%s' "$logentry" - logentry="[Warning] Trying to interrupt (i.e., sending SIGINT to) all processes named (ID) `pkcon`, then sleeping for $wait seconds." + logentry="[Warning] Trying to interrupt (i.e. sending SIGINT to) all processes named (ID) \`pkcon\`, then sleeping for $wait seconds." printf '%s\n' "$(date -Iseconds) $logentry" systemd-cat -t "$called" -p 4 printf '%s' "$logentry" killall -q -INT pkcon @@ -170,7 +170,7 @@ do logentry="[Error] Failed to refresh harbour-storeman-obs repository, because error-code $retc was returned by: $logentry" printf '\n%s\n' "$(date -Iseconds) $logentry" systemd-cat -t "$called" -p 3 printf '%s' "$logentry" - logentry="[Warning] Trying to hang-up (i.e., sending SIGHUP to) all processes named (ID) `pkcon`, then sleeping for $i seconds and ultimately killing them." + logentry="[Warning] Trying to hang-up (i.e. sending SIGHUP to) all processes named (ID) \`pkcon\`, then sleeping for $i seconds and ultimately killing them." printf '%s\n' "$(date -Iseconds) $logentry" systemd-cat -t "$called" -p 4 printf '%s' "$logentry" killall -q -HUP pkcon @@ -210,7 +210,7 @@ do systemd-cat -t "$3" -p 6 printf '%s' "[Info] Self-removal in 20 seconds: $2";\ eval $2)' sh_rm_inst-storeman "$mypid" "$logentry" "$called" # The first 15 characters of the spawned process' name - # (to be used for, e.g., `ps` and `pgrep` / `pkill`) are: + # (to be used for, e.g. `ps` and `pgrep` / `pkill`) are: # sh_rm_inst-stor fi done @@ -230,7 +230,7 @@ setsid --fork sh -c '(sleep 1;\ systemd-cat -t "$3" -p 6 printf '%s' "[Info] Executing: $2";\ eval $2)' sh_do_inst-storeman "$mypid" "$logentry" "$called" # The first 15 characters of the spawned process' name - # (to be used for, e.g., `ps` and `pgrep` / `pkill`) are: + # (to be used for, e.g. `ps` and `pgrep` / `pkill`) are: # sh_do_inst-stor # Double-fork / "Daemonise" ("SysV-style") in shell code, (ab)using this # interpreter instance for the preamble (setting the environment): diff --git a/cache-docker-images_github-ci.md b/cache-docker-images_github-ci.md index 475b07f..3d6fec4 100644 --- a/cache-docker-images_github-ci.md +++ b/cache-docker-images_github-ci.md @@ -2,48 +2,52 @@ ## Issue description -If a CI configuration (i.e., a "GitHub action") requires a docker image to run, it downloads such images for each CI run. These repeated downloads of docker images, which are often hundreds of megabytes to gigabytes large, significantly slow down each CI run and consume vast amounts of network bandwidth. +If a CI configuration (i.e. a "GitHub workflow") requires a docker image to run, it downloads such images for each CI run. These repeated downloads of docker images, which are often hundreds of megabytes to gigabytes large, significantly slow down each CI run and consume vast amounts of network bandwidth. ### Specific issue Specifically, using the Sailfish-SDK images provided by Coderus for a CI run results in downloading [a docker image between 1 GB and 3,5 GB in size](https://hub.docker.com/r/coderus/sailfishos-platform-sdk/tags) (depending on the SDK / SailfishOS version to build for) up to three times (once for each of the supported architectures: aarch64, armv7hl and i486) from an external "docker registry" (here: [Docker Hub](https://hub.docker.com/)). This affects the [simple variant of using these images](https://github.com/storeman-developers/harbour-storeman-installer/blob/master/.github/workflows/build.yml#L24) (by directly using the [`coderus/github-sfos-build` "action"](https://github.com/CODeRUS/github-sfos-build)) and [the more sophisticated one](https://github.com/sailfishos-patches/patchmanager/blob/master/.github/workflows/build.yml#L34) alike. +As these images are downloaded by all users of Coderus' SailfishOS Platform SDK Docker images hosted at DockerHub (the Docker "registry") and Docker imposes consequtively stricter "rate limiting" (i.e. limits for download volume and / or frequency, before access is severely throttled or someone pays for it), this may prevent the use of these images for CI runs in the future. + ## Issue analysis ### Initial assessment -Caching "locally" means, with the measure(s) provided at GitHub, e.g., GitHub "actions". Ultimately all these solutions use [GitHub's `action/cache`](https://github.com/actions/cache), which provides (as of 2023) 10 GB of cache, expiring cached items [LRU based](https://en.wikipedia.org/wiki/Cache_replacement_policies#LRU) or when an item was not accessed for a week. But as some research shows, there are many variants and indirections how to utilise GitHub's `action/cache`. +Caching "locally" means, with the measure(s) provided at GitHub, e.g. GitHub "actions". Ultimately all these solutions use [GitHub's `action/cache`](https://github.com/actions/cache), which provides (as of 2023) 10 GB of cache, expiring cached items [LRU based](https://en.wikipedia.org/wiki/Cache_replacement_policies#LRU) or when an item was not accessed for a week. But as some research shows, there are many variants and indirections how to utilise GitHub's `action/cache`. ### Alternative solutions Other "solutions", as an external, caching proxy server, are implicitly not very effective. -Reducing the size of docker images is always a valid approach, has some potential (many docker images carry large amounts of unnecessary cruft), but is time consuming and futile, as the creation and distribution of such images are inviting to a "quick & dirty" approach (i.e., they are much quicker and easier to create and distribute than optimised). +Reducing the size of docker images is always a valid approach, has some potential (many docker images carry large amounts of unnecessary cruft), but is time consuming and futile, as the creation and distribution of such images are inviting to a "quick & dirty" approach (i.e. they are much quicker and easier to create and distribute than optimised). -The only real alternative solution is to host container images "locally" at GitHub, i.e., at [GitHub's container registry](https://docs.github.com/en/packages/working-with-a-github-packages-registry/working-with-the-container-registry). For an introduction, see GitHub's documentation for [creating, managing and distributing "GitHub packages"](https://docs.github.com/en/packages). +The only real alternative solution is to host container images "locally" at GitHub, i.e. at [GitHub's container registry](https://docs.github.com/en/packages/working-with-a-github-packages-registry/working-with-the-container-registry). For an introduction, see GitHub's documentation for [creating, managing and distributing "GitHub packages"](https://docs.github.com/en/packages). ### Basic properties of GitHub's `action/cache` * The [`action/cache`](https://github.com/actions/cache) seems to be implicitly run in the context of the user `runner`. While a `sudo su` executed as part of a `run:` statement is effective for subsequent shell commands (tested with the Ubuntu-Linux runner environment provided by GitHub in 2023), I have not found a way to let an "action" run in a different user context. -* The `action/cache` only accepts download targets (i.e., local paths) to be configured as items to cache, not download sources. +* The `action/cache` only accepts download targets (i.e. local paths) to be configured as items to cache, not download sources. -* These first two properties of GitHub's `action/cache` prevent to simply cache the images downloaded by the local docker instance, usually (in 2023) [in `/var/lib/docker/overlay2/`](https://www.freecodecamp.org/news/where-are-docker-images-stored-docker-container-paths-explained/#docker-images) on Linux (utilising [overlayfs](https://www.kernel.org/doc/Documentation/filesystems/overlayfs.txt)), because `/var/lib/docker` and all its sub-directories are assigned to the user and group `root` and provide no access for others. Adding the user `runner` to the group `root` does not help, because this only provides search permission in directories (i.e., the `x` bit is set for directories), but still no access to the files in `/var/lib/docker/[](https://docs.docker.com/storage/storagedriver/overlayfs-driver/)/`. +* These first two properties of GitHub's `action/cache` prevent to simply cache the images downloaded by the local docker instance, usually (in 2023) [in `/var/lib/docker//`](https://www.freecodecamp.org/news/where-are-docker-images-stored-docker-container-paths-explained/#docker-images) on Linux (utilising [overlayfs](https://www.kernel.org/doc/Documentation/filesystems/overlayfs.txt), as [recommended by Docker Inc.](https://docs.docker.com/storage/storagedriver/select-storage-driver/) and preinstalled on Ubuntu 2x.yz), because `/var/lib/docker` and all its sub-directories are assigned to the user and group `root` and provide no access for others. Adding the user `runner` to the group `root` does not help, because this only provides search permission in directories (i.e. the `x` bit is set for directories), but still no access to the files in `/var/lib/docker/[](https://docs.docker.com/storage/storagedriver/overlayfs-driver/)/`. -* The `action/cache` only caches items used in a *successful* CI run. Sometimes it makes sense to always cache items, which are known be independent of the outcome of a CI run, e.g., classic prerequisites for it; exactly what the Sailfish-SDK images constitute for building software for SailfishOS at GitHub. +* The `action/cache` only caches items used in a *successful* CI job run. Sometimes it makes sense to always cache items, which are known be independent of the outcome of a CI run, e.g. classic prerequisites for it; exactly what the Sailfish-SDK images constitute for building software for SailfishOS at GitHub. - Others have also noticed that long ago and trivially patched the original `action/cache` (e.g., [[1]](https://github.com/actions/cache/compare/main...pat-s:always-upload-cache:main#diff-1243c5424efaaa19bd8e813c5e6f6da46316e63761421b3e5f5c8ced9a36e6b6L24-R24), [[2]](https://github.com/actions/cache/compare/master...gerbal:always-cache:master#diff-1243c5424efaaa19bd8e813c5e6f6da46316e63761421b3e5f5c8ced9a36e6b6L21-R21)), but very often this ultimately results in stale forks. Hence [applying this trivial change by "live patching"](https://github.com/mxxk/gh-actions-cache-always) is the only maintainable solution, which resulted in [an improved version of the "live patching" approach](https://github.com/actions/cache/issues/92#issuecomment-1263067512). + Others have also noticed that long ago and trivially patched the original `action/cache` (e.g. [[1]](https://github.com/actions/cache/compare/main...pat-s:always-upload-cache:main#diff-1243c5424efaaa19bd8e813c5e6f6da46316e63761421b3e5f5c8ced9a36e6b6L24-R24), [[2]](https://github.com/actions/cache/compare/master...gerbal:always-cache:master#diff-1243c5424efaaa19bd8e813c5e6f6da46316e63761421b3e5f5c8ced9a36e6b6L21-R21)), but very often this ultimately results in stale forks. Hence [applying this trivial change by "live patching"](https://github.com/mxxk/gh-actions-cache-always) is the only maintainable solution, which resulted in [an improved version of the "live patching" approach](https://github.com/actions/cache/issues/92#issuecomment-1263067512). ~~Unfortunately~~ GitHub has ~~not~~ provided a way to adjust this behaviour by a CI configuration, ~~despite~~ \[see\] [issue \#92](https://github.com/actions/cache/issues/92) (and subsequent issues [\#165](https://github.com/actions/cache/issues/165), [\#334](https://github.com/actions/cache/issues/334) etc.) has been filed for GitHub's `action/cache` long ago.
*Edit:* [Mostly solved](https://github.com/actions/cache/discussions/1020) by the initial release of `actions/cache/save` and `actions/cache/restore` in December 2022; although [this extension of the original `action/cache`](https://github.com/MartijnHols/actions-cache) still provides a larger feature set and is structurally analog to GitHub's new `actions/cache/save` and `actions/cache/restore`. This is [now the recommended way of storing items in a cache](https://github.com/actions/cache/tree/main/save#always-save-cache), regardless if the whole action is sucessful or fails; still "live patching" GitHub's original `action/cache` to also cache when the job fails still has some appeal due to the simpler usage of `action/cache` compared to the new `action/cache/save` and `action/cache/restore`, which all three are now and continue to be maintained by GitHub. As their basic properties are the same (except for this point), the remainder of this document can stay unchanged. - Plan: Enhance and release [a "live patching" action, which downloads (actually: checks-out), patches and transparently maps to the locally patched version of the original `action/cache`](https://github.com/Olf0/always-cache), ultimately also to the GitHub Marketplace. + Plan: Enhance and release [a "live patching" action, which downloads (actually: checks-out), patches and transparently maps to the locally patched version of the original `action/cache`](https://github.com/Olf0/cache-always), ultimately also to the GitHub Marketplace. ## Exploring the solution space ### Pre-download the container images -The most trivial way to cope with `action/cache`'s access limitations is to pre-download images expicitly. For this one creates a download directory by issuing `mkdir -p $GITHUB_WORKSPACE/` (the `-p` is only used to prevent an error, when the dirctory already exists; `$GITHUB_WORKSPACE` resolves to `/home/runner//` on Linux (yes, twice ``), GitHub calls this location "runner workspace", it is naturally also the initial PWD), download the image by some third party tool (the docker CLI commands do not allow for setting the download location), then execute a [`docker image load`](https://docs.docker.com/engine/reference/commandline/image_load/) (or [`docker image import`](https://docs.docker.com/engine/reference/commandline/image_import/)) and ultimately continue as before (e.g., instanciating and starting a docker container by [`docker run`](https://docs.docker.com/engine/reference/commandline/run/)). +The most trivial way to cope with `action/cache`'s access limitations is to pre-download images expicitly. For this one creates a download directory by issuing `mkdir -p $GITHUB_WORKSPACE/` (the `-p` is only used to prevent an error, when the dirctory already exists; `$GITHUB_WORKSPACE` resolves to `/home/runner//` on Linux (yes, twice ``), GitHub calls this location "runner workspace", it is naturally also the initial PWD), download the image by some third party tool (the docker CLI commands do not allow for setting the download location), then execute a [`docker image load`](https://docs.docker.com/engine/reference/commandline/image_load/) (or [`docker image import`](https://docs.docker.com/engine/reference/commandline/image_import/)) and ultimately continue as before (e.g. instanciating and starting a docker container by [`docker run`](https://docs.docker.com/engine/reference/commandline/run/)). + +Unfortunately this approach does not work for large images (e.g. > 1 GB) due to space constraints GitHub imposes for the runner home directory. I have not followed the idea of alleviating this by raising the quota, because that requires analysis (is it imposted by a classic `quota` and can it be raised by `sudo`ing?) and might be seen by GitHub as cirumventing their constraints. Mind that the git repository is also checked out to the "runner workspace" (`$GITHUB_WORKSPACE`) as root directory, so do pay attention to not clobber any files or directories of your source repository. @@ -54,7 +58,7 @@ Mind that the git repository is also checked out to the "runner workspace" (`$GI * Created and maintained as [a by-product](https://github.com/moby/moby/tree/v23.0.0-rc.1/contrib#readme) of a [lively project](https://github.com/moby/moby/pulse). * Provides [tagged, stable releases](https://github.com/moby/moby/releases), e.g. (latest as of 2023-01-07), [v20.10.22](https://github.com/moby/moby/blob/v20.10.22/contrib/download-frozen-image-v2.sh). * Is a simple and small shell-script (< 400 sloc, ~ 13 KBytes), which implicitly documents [how to call it](https://github.com/moby/moby/blob/v23.0.0-rc.1/contrib/download-frozen-image-v2.sh#L18-L22) and [how to utilise it](https://github.com/moby/moby/blob/v23.0.0-rc.1/contrib/download-frozen-image-v2.sh#L429-L431). -* My favorite third-party tool for this approach. +* My favorite "external" tool for this approach. #### ● [Scopeo](https://github.com/containers/skopeo#readme) by the ["Containers" project](https://github.com/containers) * Its source code is [hosted at GitHub](https://github.com/containers/skopeo) and uses the Apache-2.0 license. @@ -116,35 +120,41 @@ Mind that the git repository is also checked out to the "runner workspace" (`$GI * Its source code is [hosted at GitHub](https://github.com/ktmud/cached-dependencies) and uses the MIT license. * Does provide a single git tag. * Written in TypeScript (Microsoft's superset of JavaScript). -* Smallish, < 100 KBytes. +* Smallish, ca. 275 KiB comprising compiled JavaScript ([three files](https://github.com/ktmud/cached-dependencies/tree/master/dist)), a [bash script](https://github.com/ktmud/cached-dependencies/blob/master/src/scripts/bashlib.sh) and an [action.yaml](https://github.com/ktmud/cached-dependencies/blob/master/action.yml). * Appears to be unmaintained. * Appears to be a generic caching solution for pulling external dependencies. * States to be adaptable, includes cache configurations for `pip`, `npm` and `yarn`. * Despite [extensive documentation](https://github.com/ktmud/cached-dependencies#readme), I fail to quickly comprehend: * How to configure a different source (Docker Hub). * If it is also limited to downloads in the runner's "workspace". -* Pulled from the "GitHub marketplace" (yesterday it was still there and is still [found via the search](https://github.com/marketplace?type=actions&query=cached-+))? See https://github.com/marketplace/actions/cached-dependencies - +* Pulled (?) from the "GitHub marketplace" 2023-01-08, see [github.com/marketplace/actions/cached-dependencies](https://github.com/marketplace/actions/cached-dependencies). 2023-01-07 it was still there and is still [found via the search](https://github.com/marketplace?type=actions&query=cached-+)!?! + #### ● [Docker Cache](https://github.com/marketplace/actions/docker-cache) by [ScribeMD](https://github.com/ScribeMD) * Its source code is [hosted at GitHub](https://github.com/ScribeMD/docker-cache) and uses the MIT license. * Does provide stable releases and git tags (lots!). -* Written in TypeScript (Microsoft's superset of JavaScript). -* Smallish, < 100 KBytes. +* Comprises a [few TypeScript scripts](https://github.com/ScribeMD/docker-cache/tree/main/src) (Microsoft's superset of JavaScript), which are compiled into two JavaScript scripts ([main/index.js](https://github.com/ScribeMD/docker-cache/blob/main/dist/main/index.js) and [post/index.js](https://github.com/ScribeMD/docker-cache/blob/main/dist/post/index.js)) each 1,17 MiB large (!), plus a tiny [action.yaml](https://github.com/ScribeMD/docker-cache/blob/main/action.yaml) file which calls these. * Appears to be well maintained. * Appears to be a generic caching solution for Docker images. * Explicitly denotes the use case "pull images from Docker Hub"! +* Works technically fine, but uses `docker save --output ~/.docker-images.tar` , which results in `write /home/runner/.docker_temp_XYZ: no space left on device` even with the smallest SailfishOS Platform SDK images by Coderus (ca. 1 GB, but these pull in a few additional layers). #### ● [Rootless Docker](https://github.com/marketplace/actions/rootless-docker) also by [ScribeMD](https://github.com/ScribeMD) * Its source code is [hosted at GitHub](https://github.com/ScribeMD/rootless-docker) and uses the MIT license. * Does provide stable releases and git tags (lots!). -* Seem to be primarily written in Python with some JavaScript / TypeScript (Microsoft's superset of JavaScript). -* Smallish, < 100 KBytes. +* A small, well readable [action.yaml](https://github.com/ScribeMD/rootless-docker/blob/main/action.yaml) file. +* Tiny: 2,65 KBytes +* But [Downloads and executes](https://github.com/ScribeMD/rootless-docker/blob/main/action.yaml#L48-L55) directly [`https://get.docker.com/rootless` shell script](https://get.docker.com/rootless) (some 10 KBytes), which in turn [downloads and unpacks (i.e. "installs") TAR archives of the required Docker components](https://get.docker.com/rootless) (some MBytes). * Appears to be well maintained. * States to provide a set of advantages over running docker conventionally in root mode. -* Renders any specific caching moot, as GitHub's `action/cache` suffices. +* Renders any specific caching moot, as GitHub's `action/cache` should suffice. +* But I have not yet determined in which directories pulled images / layers are stored (Rootless Docker's default is `~/.local/share/docker`, likely in the subdirectory ``); i.e. those which are to be cached by GitHub's `action/cache`. ## Down-selection of possible solutions to try -1. [Rootless Docker](https://github.com/marketplace/actions/rootless-docker): https://github.com/ScribeMD/rootless-docker -2. [Docker Cache](https://github.com/marketplace/actions/docker-cache): https://github.com/ScribeMD/docker-cache +0. ~~Use Podman instead; it is preinstalled on GitHub's Ubuntu 22.04 runner image, too.~~
+ When started by an non-root user, it uses `$HOME/.local/share/containers/storage/` to store images, layers and their metadata, specifically the subdirectory `-layers` for the downloaded layers. This [configuration can easily be adapted](https://github.com/containers/podman/issues/1916#issuecomment-1219466711). But not all files are neccesarily redable by the user, despite being their owner, because they have no permissions set (e.g. an `/etc/shadow` in a conatiner image). Consequently the GitHub Actions `cache` and `cache/save` fail. +1. ~~[Rootless Docker](https://github.com/marketplace/actions/rootless-docker): https://github.com/ScribeMD/rootless-docker~~
+ Very likely it exposes the same issue as rootless Podman, which is described in the prior point. +2. ~~[Docker Cache](https://github.com/marketplace/actions/docker-cache): https://github.com/ScribeMD/docker-cache~~
+ Easily runs out of space on a GitHub runner, see details [in its section](#-docker-cache-by-scribemd). 3. [`download-frozen-image-v2.sh`](https://github.com/moby/moby/blob/master/contrib/download-frozen-image-v2.sh): https://github.com/moby/moby/tree/master/contrib#readme diff --git a/double-fork-in-shell.md b/double-fork-in-shell.md index 718b1da..9c07d21 100644 --- a/double-fork-in-shell.md +++ b/double-fork-in-shell.md @@ -24,28 +24,28 @@ Inerits umask, PWD and and file descriptors from caller ## Variations - One can set umask and PWD (via `cd`) as it fits best: The exemplary values in the "generic form" are just often used ones; I usually set the umask more restrictively.
- Mind that the directory to change to must exist (you do not want a `cd` at this point to fail), hence `/` is a safe value, as e.g., `/tmp` is not available early in the boot-phase (not relevant for actions triggered by a regular user). + Mind that the directory to change to must exist (you do not want a `cd` at this point to fail), hence `/` is a safe value, as e.g. `/tmp` is not available early in the boot-phase (not relevant for actions triggered by a regular user). - One sure can redirect from or to anywhere else than `/dev/null` or redirect StdIN and StdERR differently.
Consider where you want the output in error cases to go (both, StdOUT and StdERR) to be visible for debugging, too. -- If one only calls an own shell-script `sh -c '()'`, one could consider to pull checking / setting the umask and PWD early inside that script and also take care of input- and output-redirections there; do not, because if the initial values are dynamic (i.e., variable, hence unknown at the time the code is written) or may become invalid after the double-fork (e.g., the caller subsequently deletes the directory, which was PWD at call time) you shall set them as early as possible. See also [the first points of the section "Notes"](#notes). -- With a POSIX-compliant shell, one can close any file descriptor with "&-" (e.g., for StdIN `<&-`, for StdOUT `>&-` and for StdERR `2>&-`), instead of redirecting it from or to `/dev/null`.
- Also note that when closing StdOUT or StdERR, anything writing to a closed file descriptor will (/ might / should / must? POSIX might tell.) fail, just as reading from a closed StdIN, in contrast to redirections from or to `/dev/null`. This is fine if one ensures that the commands executed do not use any closed file descriptors, e.g., by redirecting them individually for single commands or commands grouped by `{ ; }`. +- If one only calls an own shell-script `sh -c '()'`, one could consider to pull checking / setting the umask and PWD early inside that script and also take care of input- and output-redirections there; do not, because if the initial values are dynamic (i.e. variable, hence unknown at the time the code is written) or may become invalid after the double-fork (e.g. the caller subsequently deletes the directory, which was PWD at call time) you shall set them as early as possible. See also [the first points of the section "Notes"](#notes). +- With a POSIX-compliant shell, one can close any file descriptor with "&-" (e.g. for StdIN `<&-`, for StdOUT `>&-` and for StdERR `2>&-`), instead of redirecting it from or to `/dev/null`.
+ Also note that when closing StdOUT or StdERR, anything writing to a closed file descriptor will (/ might / should / must? POSIX might tell.) fail, just as reading from a closed StdIN, in contrast to redirections from or to `/dev/null`. This is fine if one ensures that the commands executed do not use any closed file descriptors, e.g. by redirecting them individually for single commands or commands grouped by `{ ; }`. ## Notes - It is strongly recommended to explicitly set umask, PWD, StdIN, StdOUT and StdERR to known good values (or ensure that they are already sane) at the top level, because they are part of the environment, as depicted in the "generic form" above. -- The environment of a caller is always copied stepwise further down the call chain and can be altered at each step; additionally some shells (e.g., busybox's ash) also copy their regular variables (the ones not exported to the environment) down to sub-shells opened with `(…)`, but this is implementation dependent, hence nothing to rely on.
+- The environment of a caller is always copied stepwise further down the call chain and can be altered at each step; additionally some shells (e.g. busybox's ash) also copy their regular variables (the ones not exported to the environment) down to sub-shells opened with `(…)`, but this is implementation dependent, hence nothing to rely on.
Thus one can use environment variables to pass values down the call chain, though this is not elegant and clutters the environment, hence … - The best way to carry parameters across this sequence down the call chain are positional parameters, as described in the man-page of the shell of your choice for the option `-c` and depicted in [the examples above](#double-fork-daemonize-in-shell-code). -- The innermost sub-shell call (i.e., the one "inside" the `sh -c '…'`) is only deemed necessary on System V descendent UNIXes, in order to execute the \ in a shell, which is not a session leader (see section "[Background](background)" below). Hence on BSD-UNIXes one may omit the innermost `(…)` and use `… setsid --fork sh -c ' []' [[arg0] [] []] []; …` (or for a single command: `… setsid --fork [] [] []; …`) instead. Well, Linux is a System V UNIX by design, but all code is newly and independently written, in contrast to, e.g., HP-UX, IRIX, Sinix etc. (Solaris 2?), which contain code from the original System V Release 4 ("SVR4"). I have not researched much, which property / flaw of a "System V descendent UNIX" this addresses and how real the dangers are, because once the scheme of double-forking in shell code is understood, it is easy to ensure not to be a session leader by opening a sub-shell via `(…)`. +- The innermost sub-shell call (i.e. the one "inside" the `sh -c '…'`) is only deemed necessary on System V descendent UNIXes, in order to execute the \ in a shell, which is not a session leader (see section "[Background](background)" below). Hence on BSD-UNIXes one may omit the innermost `(…)` and use `… setsid --fork sh -c ' []' [[arg0] [] []] []; …` (or for a single command: `… setsid --fork [] [] []; …`) instead. Well, Linux is a System V UNIX by design, but all code is newly and independently written, in contrast to, e.g. HP-UX, IRIX, Sinix etc. (Solaris 2?), which contain code from the original System V Release 4 ("SVR4"). I have not researched much, which property / flaw of a "System V descendent UNIX" this addresses and how real the dangers are, because once the scheme of double-forking in shell code is understood, it is easy to ensure not to be a session leader by opening a sub-shell via `(…)`. - For terminating a (sub-)shell via SIGTERM, this shell must not be an interactive shell (POSIX requires interactive shell instances to ignore a SIGTERM: Open a shell, type `kill $$`, see that nothing happens, then `kill -HUP $$` to close it).
- Thus for experimenting / testing in an interactive shell, one might wrap the whole statement (e.g., in the form of one of the examples above) in a(nother) `sh -c '…' &` (i.e., detaching it via `&`) to mimic an indepentently running shell which executes your test and target this shell instance with a `kill $!` when your experiment does not terminate on its own. -- The caller of the ["Terminating the caller" example](#terminating-the-caller) is terminated immediately after calling it. There are some other ways to ensure that execution of the original caller ends, e.g., see [the subsequent examples above](#waiting-for-the-caller-to-finish-waits-endlessly-if-it-does-not), plus "[Larger variations](#larger-variations)", below. + Thus for experimenting / testing in an interactive shell, one might wrap the whole statement (e.g. in the form of one of the examples above) in a(nother) `sh -c '…' &` (i.e. detaching it via `&`) to mimic an indepentently running shell which executes your test and target this shell instance with a `kill $!` when your experiment does not terminate on its own. +- The caller of the ["Terminating the caller" example](#terminating-the-caller) is terminated immediately after calling it. There are some other ways to ensure that execution of the original caller ends, e.g. see [the subsequent examples above](#waiting-for-the-caller-to-finish-waits-endlessly-if-it-does-not), plus "[Larger variations](#larger-variations)", below. - The statement must be started detached (`setsid` does that implicitly, if *not* using the option `-w`: Thus no `&` shall be appended), otherwise the caller would be stopped during the execution of the statement, hence the caller would not be able to receive and handle a SIGTERM sent to it by `kill`. -- While explicitly specifying to run the \ in a sub-shell (by `(…)`) inside the `sh -c '…'` may be superfluous in some special cases, do not omit it: Even though IMHO all shells start a single detached command (i.e., ` [] [] &`) automatically in a sub-shell, many execute all external commands in a sub-shell etc., there might be cases in which this assumption will fall short.
+- While explicitly specifying to run the \ in a sub-shell (by `(…)`) inside the `sh -c '…'` may be superfluous in some special cases, do not omit it: Even though IMHO all shells start a single detached command (i.e. ` [] [] &`) automatically in a sub-shell, many execute all external commands in a sub-shell etc., there might be cases in which this assumption will fall short.
Plus, expressing the execution in a sub-shell explicitly does no harm, improves readability (one does not have to know / think about the specific shell's automatisms) and enhances portability (some of this behaviour depends on the implementation). -- Double-forking / daemonising (with or without the "anti-session-leader extra-fork" for SysV-UNIXes) is impossible to achieve via `nohup` or `disown` AFAICS (e.g., trying forms as `umask 0123; cd /tmp; nohup [] [] &` or `umask 7777; cd /; () [] & disown [-h] [$!]`), because with both forms the callee will stay in the same session as the caller, with the sessions's TTY being assigned to the callee. +- Double-forking / daemonising (with or without the "anti-session-leader extra-fork" for SysV-UNIXes) is impossible to achieve via `nohup` or `disown` AFAICS (e.g. trying forms as `umask 0123; cd /tmp; nohup [] [] &` or `umask 7777; cd /; () [] & disown [-h] [$!]`), because with both forms the callee will stay in the same session as the caller, with the sessions's TTY being assigned to the callee. - If you want to check, if the process hierarchy looks as it should (see section "[Background](#background)" below), the tree-view of GNU-`ps` (option `--forest`) or `pstree` are quite useful to see the hierarchical aspects much easier, but obscure other important aspects to observe: the SessionID and TTY.
- Observing the dynamics of these actions can be best achieved by instrumenting the calling and called script with `ps -o stat,tty,user,group,pgid,sid,ppid,pid,comm,args` (field names from busybox's `ps`, other `ps`-implementations may call the SessionID-field `ssid`) at appropiate locations, plus some, but not too tight filtering (e.g., `ps -eo … | grep …` etc.), and output redirection to a file (e.g., `>> mylog.txt`). See [this branch for an example](https://github.com/storeman-developers/harbour-storeman-installer/tree/2.0.44), the minimalistic `ps` call options are used in order to work with busybox's `ps` implementation (which is not fully POSIX compliant). + Observing the dynamics of these actions can be best achieved by instrumenting the calling and called script with `ps -o stat,tty,user,group,pgid,sid,ppid,pid,comm,args` (field names from busybox's `ps`, other `ps`-implementations may call the SessionID-field `ssid`) at appropiate locations, plus some, but not too tight filtering (e.g. `ps -eo … | grep …` etc.), and output redirection to a file (e.g. `>> mylog.txt`). See [this branch for an example](https://github.com/storeman-developers/harbour-storeman-installer/tree/2.0.44), the minimalistic `ps` call options are used in order to work with busybox's `ps` implementation (which is not fully POSIX compliant). ## Larger variations - Because one usually starts the whole double-fork to the innermost \ from a shell (respectively, a script interpreted by one), one may omit the outer sub-shell call via `(…)` (as depicted in the "[minimalistic form](#minimalistic-form)", above) and use the shell from which the double fork is called to set umask and PWD there; the outer sub-shell call was solely employed for not altering the callers environment. @@ -82,14 +82,14 @@ Inerits umask, PWD and and file descriptors from caller ## Motivation #### My issue: Trigger installing an RPM from a scriptlet of another one -I had to trigger the installation of an RPM package from within an spec file scriptlet of an "installer" RPM package, for which the `%posttrans` scriptlet is the natural place, as it is run last. Side note: In general this should not be necessary, my initial reaction was "this is conceptually wrong and shall be handled by proper dependency definitions", but it turned out to be a special case with restrictions which do create a necessity for this; other workarounds would be much harder to deploy for an average user. Here, the "installer" package has to enable an appropriate, dynamically determined repository (dependent on CPU-architecture, installed OS release etc.) in order to access the correct main package; unless this repository is enabled (i.e., as long the installation of the "installer" package has not finished), resolving dependencies for a main package cannot be performed, because the (/ any) specific main package is not yet accessible. +I had to trigger the installation of an RPM package from within an spec file scriptlet of an "installer" RPM package, for which the `%posttrans` scriptlet is the natural place, as it is run last. Side note: In general this should not be necessary, my initial reaction was "this is conceptually wrong and shall be handled by proper dependency definitions", but it turned out to be a special case with restrictions which do create a necessity for this; other workarounds would be much harder to deploy for an average user. Here, the "installer" package has to enable an appropriate, dynamically determined repository (dependent on CPU-architecture, installed OS release etc.) in order to access the correct main package; unless this repository is enabled (i.e. as long the installation of the "installer" package has not finished), resolving dependencies for a main package cannot be performed, because the (/ any) specific main package is not yet accessible. I know that other people have solved this by utilising `cron` or `systemd`, but after having successfully [implemented this via an indirection by a systemd unit](https://github.com/storeman-developers/harbour-storeman-installer/tree/2.0.31/), I realised: -- The indirection via `cron` or `systemd` achieves that the started process is fully detached from the caller: I.e., that the caller is not an ancestor (parent, grandparent etc.) of the callee, plus that it is run within a different session (see `ps`'s field `sid` for the SessionID) and hence does not share a TTY with the caller, any longer. -- One does not want any time-based waiting, because no one can tell how long the initial "installer" package installation will take on a non-deterministic software stack (i.e., not a real-time system); imagine a machine is heavily swapping and hence (almost) grinding to a halt. Thus timer units or cron jobs are not suitable to implement this robustly. +- The indirection via `cron` or `systemd` achieves that the started process is fully detached from the caller: I.e. that the caller is not an ancestor (parent, grandparent etc.) of the callee, plus that it is run within a different session (see `ps`'s field `sid` for the SessionID) and hence does not share a TTY with the caller, any longer. +- One does not want any time-based waiting, because no one can tell how long the initial "installer" package installation will take on a non-deterministic software stack (i.e. not a real-time system); imagine a machine is heavily swapping and hence (almost) grinding to a halt. Thus timer units or cron jobs are not suitable to implement this robustly. - Consequently one has to transmit the PID of the `%posttrans` scriptlet interpreter (usually `bash`) to the fully detached process, when it is instanciated, so it can wait for the `%posttrans` interpreter to finish execution of the scriptlet. Systemd allows for a single parameter to be transmitted to "instanciated units", but the wait function (a `while` or `until` loop) has to be implemented in an external script called by an `ExecStartPre=` statement (or pack the whole wait function awkwardly in an `sh -c '…'`), because systemd does not allow for loops or any other kind of programme flow control. - That was the moment I realised that a single, own shell script is more elegant and provides one with many more degrees of freedom than being limited to systemd's unit syntax. The only open design question was then how to become fully detached from the caller. I remembered the concept of double-forking / "daemonizing" for UNIX daemons, which were once usually written in C, to fully detach a process from its caller. -- The final twist for a robust implementation was [to trigger the installation of the main package *also* in a fully detached manner by double-forking, then waiting for the grandparent to finish (i.e., the installer script)](https://github.com/storeman-developers/harbour-storeman-installer/blob/2.1.6/bin/harbour-storeman-installer#L207-L223), because the main package automatically triggers the removal of the "installer" package (including its "installer" script) by a `Conflicts:` dependency on it. This way the main package can be kept free of any special measures WRT the two stepped installation procedure (except for the single `Conflicts: ` statement) and thus can still be directly installed after manually enabling the correct repository or downloading a suitable RPM package. +- The final twist for a robust implementation was [to trigger the installation of the main package *also* in a fully detached manner by double-forking, then waiting for the grandparent to finish (i.e. the installer script)](https://github.com/storeman-developers/harbour-storeman-installer/blob/2.1.6/bin/harbour-storeman-installer#L207-L223), because the main package automatically triggers the removal of the "installer" package (including its "installer" script) by a `Conflicts:` dependency on it. This way the main package can be kept free of any special measures WRT the two stepped installation procedure (except for the single `Conflicts: ` statement) and thus can still be directly installed after manually enabling the correct repository or downloading a suitable RPM package. #### General information about various aspects of double forking / daemonising Hence I started searching the WWW for how to perform a double fork / daemonise in shell code, without finding anything really useful for UNIX shells, but really good explanations and examples in C, Python, Ruby etc.: @@ -106,7 +106,7 @@ Hence I started searching the WWW for how to perform a double fork / daemonise i - https://news.ycombinator.com/item?id=8355376
This discussion confirmed my belief at that point, that (double-)forking is the only way to go. -I wonder, why I have not found any proper and complete example for shell code, because I believe that people must have written UNIX daemons in shell code and also started them this way (e.g., per classic sysv-init), but maybe this was done in the 70s, 80s and 90s when no forums as Stackexchange, Stackoverflow or GitHub-gists etc. existed.
+I wonder, why I have not found any proper and complete example for shell code, because I believe that people must have written UNIX daemons in shell code and also started them this way (e.g. per classic sysv-init), but maybe this was done in the 70s, 80s and 90s when no forums as Stackexchange, Stackoverflow or GitHub-gists etc. existed.
So I ended up researching, implementing, testing and documenting this for myself and everybody else.
diff --git a/rpm/harbour-storeman-installer.rpmlintrc b/rpm/harbour-storeman-installer.rpmlintrc index a2b7bbf..f26e020 100644 --- a/rpm/harbour-storeman-installer.rpmlintrc +++ b/rpm/harbour-storeman-installer.rpmlintrc @@ -12,7 +12,7 @@ addFilter('invalid-license') # - It extracts strange changelog entries out of Git, if a %%changelog section is used addFilter('incoherent-version-in-changelog') # - It sometimes re-writes the %version-%release strings of package names, -# when referencing (only) a branch (i.e., not a git tag), for example, +# when referencing (only) a branch (i.e. not a git tag), for example, # 0.5.2-1 to 0.5.2+main.20230129011931.1.g584263a-1.8.1.jolla addFilter('filename-too-long-for-joliet') diff --git a/rpm/harbour-storeman-installer.spec b/rpm/harbour-storeman-installer.spec index 414513e..c209173 100644 --- a/rpm/harbour-storeman-installer.spec +++ b/rpm/harbour-storeman-installer.spec @@ -4,18 +4,18 @@ Name: harbour-storeman-installer # The Git tag format must adhere to / since 2023-05-18. # The tag must adhere to semantic versioning, for details see # https://semver.org/ -Version: 2.2.4 +Version: 2.2.5 # The tag comprises one of {alpha,beta,rc,release} postfixed with a -# natural number greater or equal to 1 (e.g., "beta3") and may additionally be +# natural number greater or equal to 1 (e.g. "beta3") and may additionally be # postfixed with a plus character ("+"), the name of the packager and a release -# number chosen by her (e.g., "rc2+jane4"). `{alpha|beta|rc|release}` +# number chosen by her (e.g. "rc2+jane4"). `{alpha|beta|rc|release}` # indicates the expected status of the software. No other identifiers shall be # used for any published version, but for the purpose of testing infrastructure # other nonsensual identifiers as `adud` may be used, which do *not* trigger a # build at GitHub and OBS, when configured accordingly; mind the sorting # (`adud` < `alpha`). For details and reasons, see # https://github.com/Olf0/sfos-upgrade/wiki/Git-tag-format -Release: release5 +Release: release6 # The Group tag should comprise one of the groups listed here: # https://github.com/mer-tools/spectacle/blob/master/data/GROUPS Group: Software Management/Package Manager @@ -44,7 +44,7 @@ Requires(posttrans): PackageKit # ToDo: Check if the GNU-versions of these packages (named as alternatives below) # also provide the aliases ("virtual packages") denoted here, then these can be # used; ultimately most of these packages shall be already installed, anyway. -# 1. `coreutils` (for e.g., `touch` and many other very basic UNIX tools): +# 1. `coreutils` (for e.g. `touch` and many other very basic UNIX tools): # Requires: (busybox-symlinks-coreutils or gnu-coreutils) Requires: coreutils # Requires(post,posttrans): (busybox-symlinks-coreutils or gnu-coreutils) @@ -125,7 +125,7 @@ cp bin/%{name} %{buildroot}%{_bindir}/ %post # The %%post scriptlet is deliberately run when installing and updating. -# Create a persistent log file, i.e., which is not managed by RPM and hence +# Create a persistent log file, i.e. which is not managed by RPM and hence # is unaffected by removing the %%{name} RPM package: if [ ! -e %{logfile} ] then @@ -183,7 +183,7 @@ umask 113 # because the scriptlets are executed with PWD safely set to /. setsid --fork sh -c '(%{_bindir}/%{name} "$1" "$2")' sh_call_inst-storeman "$$" "%{logfile}" >> "%{logfile}" 2>&1 <&- # The first 15 characters of the spawned process' name -# (to be used for, e.g., `ps` and `pgrep` / `pkill`) are: +# (to be used for, e.g. `ps` and `pgrep` / `pkill`) are: # sh_call_inst-st exit 0