aboutsummaryrefslogtreecommitdiff
path: root/src/cpt-lib.in
diff options
context:
space:
mode:
Diffstat (limited to 'src/cpt-lib.in')
-rw-r--r--src/cpt-lib.in1302
1 files changed, 852 insertions, 450 deletions
diff --git a/src/cpt-lib.in b/src/cpt-lib.in
index bf58fbe..1896920 100644
--- a/src/cpt-lib.in
+++ b/src/cpt-lib.in
@@ -9,7 +9,9 @@
# Currently maintained by Cem Keylan.
version() {
- log "Carbs Packaging Tools" @VERSION@
+ out "Carbs Packaging Tools, version $cpt_version" \
+ @LICENSE@
+
exit 0
}
@@ -23,11 +25,40 @@ log() {
#
# All messages are printed to stderr to allow the user to hide build
# output which is the only thing printed to stdout.
- #
- # '${3:-->}': If the 3rd argument is missing, set prefix to '->'.
- # '${2:+colorb}': If the 2nd argument exists, set text style of '$1'.
- printf '%b%s %b%b%s%b %s\n' \
- "$colory" "${3:-->}" "$colre" "${2:+$colorb}" "$1" "$colre" "$2" >&2
+ case $# in
+ 1) printf '%b->%b %s\n' "$colory" "$colre" "$1" ;;
+ 2) printf '%b->%b %b%s%b %s\n' "$colory" "$colre" "$colorb" "$1" "$colre" "$2" ;;
+ 3) printf '%b%s%b %b%s%b %s\n' "$colory" "${3:-->}" "$colre" "$colorb" "$1" "$colre" "$2" ;;
+ *) return 1
+ esac >&2
+}
+
+warn() {
+ # Print a warning message
+ log "$1" "$2" "${3:-WARNING}"
+}
+
+outv() {
+ # Call `out()` when CPT_VERBOSE is set.
+ [ "$CPT_VERBOSE" = 1 ] || return 0
+ out "$@"
+}
+
+logv() {
+ # Call `log()` when CPT_VERBOSE is set.
+ [ "$CPT_VERBOSE" = 1 ] || return 0
+ log "$@"
+}
+
+warnv() {
+ # Call `warn()` when CPT_VERBOSE is set.
+ [ "$CPT_VERBOSE" = 1 ] || return 0
+ warn "$@"
+}
+
+execv() {
+ # Redirect the output to /dev/null unless CPT_VERBOSE is set.
+ if [ "$CPT_VERBOSE" = 1 ]; then "$@"; else "$@" >/dev/null 2>&1; fi
}
die() {
@@ -36,6 +67,124 @@ die() {
exit 1
}
+colors_enabled() {
+ case ${CPT_COLOR:=auto} in
+ auto) [ -t 1 ] ;;
+ 1|always) return 0 ;;
+ 0|never) return 1 ;;
+ *) die "Unknown color value: '$CPT_COLOR'"
+ esac
+}
+
+_dep_append() {
+ dep_graph=$(printf '%s\n%s %s\n' "$dep_graph" "$@" ;)
+}
+
+_tsort() {
+ # Return a linear reverse topological sort of the piped input, so we
+ # generate a proper build order. Returns 1 if a dependency cycle occurs.
+ #
+ # I was really excited when I saw POSIX specified a tsort(1) implementation,
+ # but the specification is quite vague, it doesn't specify cycles as a
+ # reason of error, and implementations differ on how it's handled. coreutils
+ # tsort(1) exits with an error, while openbsd tsort(1) doesn't. Both
+ # implementations are correct according to the specification.
+ #
+ # The script below was taken from <https://gist.github.com/apainintheneck/1803fb91dde3ba048ec51d44fa6065a4>
+ #
+ # The MIT License (MIT)
+ # Copyright (c) 2023 Kevin Robell
+ #
+ # Permission is hereby granted, free of charge, to any person obtaining a
+ # copy of this software and associated documentation files (the “Software”),
+ # to deal in the Software without restriction, including without limitation
+ # the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ # and/or sell copies of the Software, and to permit persons to whom the
+ # Software is furnished to do so, subject to the following conditions:
+ #
+ # The above copyright notice and this permission notice shall be included in
+ # all copies or substantial portions of the Software.
+ #
+ # THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ # DEALINGS IN THE SOFTWARE.
+ awk '{
+ for (i = 1; i <= NF; ++i) {
+ # Store each node.
+ nodes[$i] = 1
+ if (is_child) {
+ child = $i
+ # Skip nodes that point to themselves.
+ # This traditionally means that the node
+ # is disconnected from the rest of the graph.
+ if (parent != child) {
+ # Store from parent to child.
+ idx = ++child_count[parent]
+ child_graph[parent, idx] = child
+ # Store count from child to parent.
+ ++parent_count[child]
+ }
+ } else {
+ parent = $i
+ }
+ # Flip switch
+ is_child = !is_child
+ }
+ }
+ END {
+ # Print errors to the stderr
+ stderr = "/dev/stderr"
+
+ # Sanity Check
+ if (is_child) {
+ print("Error: odd number of input values: expected pairs of values") > stderr
+ exit(1)
+ }
+
+ #####
+ # Topological Sort
+ #####
+
+ # Remove unconnected nodes first.
+ for (node in nodes) {
+ if (parent_count[node] == 0 && child_count[node] == 0) {
+ delete nodes[node]
+ print(node)
+ }
+ }
+
+ # Remove the rest of the nodes starting with those without parents.
+ while (length(nodes) > 0) {
+ removed_node = 0
+ for (node in nodes) {
+ # Delete and print nodes without any remaining parents.
+ if (parent_count[node] == 0) {
+ delete nodes[node]
+ removed_node = 1
+ # Decrease child_count for each parent node.
+ for (i = child_count[node]; i > 0; --i) {
+ child = child_graph[node, i]
+ --parent_count[child]
+ }
+ print(node)
+ }
+ }
+
+ # If we havent removed any nodes, it means that there
+ # are no nodes without any remaining parents so we have
+ # a cycle.
+ if (!removed_node) {
+ print("Error: Cycle found") > stderr
+ exit(1)
+ }
+ }
+ }'
+}
+
trap_set() {
# Function to set the trap value.
case ${1:-cleanup} in
@@ -43,25 +192,30 @@ trap_set() {
trap pkg_clean EXIT
trap 'pkg_clean; exit 1' INT
;;
- block) trap '' INT ;;
- unset) trap - EXIT INT ;;
+ handle-int)
+ trap pkg_clean INT
+ ;;
+ block) trap '' INT ;;
+ unset) trap - EXIT INT ;;
esac
}
-sepchar() (
+sepchar() {
# Seperate every character on the given string without resorting to external
# processes.
[ "$1" ] || return 0; str=$1; set --
while [ "$str" ]; do
- str_tmp=$str
- for i in $(_seq $(( ${#str} - 1 ))); do
- str_tmp=${str_tmp%?}
- done
- set -- "$@" "$str_tmp"
- str=${str#$str_tmp}
+ set -- "$@" "${str%"${str#?}"}"
+ str=${str#?}
done
printf '%s\n' "$@"
-)
+}
+
+_re() {
+ # Check that the string supplied in $2 conforms to the regular expression
+ # of $1.
+ printf %s "${2:?}" | grep -Eq "$1"
+}
_seq() (
# Pure shell counter meant to be used in 'for' loops.
@@ -72,13 +226,21 @@ _seq() (
printf '%s' "$buf"
)
+_multiply_char() (
+ buf=
+ for i in $(_seq "$2"); do
+ buf="$buf$1"
+ done
+ out "$buf"
+)
+
_stat() (
_user=; eval set -- "$(ls -ld "$1")"
id -u "${_user:=$3}" >/dev/null 2>&1 || _user=root
printf '%s' "$_user"
)
-_readlinkf() (
+_readlinkf() {
# Public domain POSIX sh readlink function by Koichi Nakashima
[ "${1:-}" ] || return 1
max_symlinks=40
@@ -112,13 +274,29 @@ _readlinkf() (
target=${link#*" $target -> "}
done
return 1
-)
+}
+
+_get_digest() {
+ # Get digest algorithm from the given file. It looks for a header on the
+ # file declaring the digest algorithm. Currently only BLAKE3 is supported.
+ # If the file does not include a header, the function will assume that it is
+ # using sha256 as a digest algorithm. If the given file doesn't exist it will
+ # return 1.
+ [ -r "$1" ] || return 1
+ read -r chk < "$1"
+ case $chk in
+ %BLAKE3) chk=b3sum ;;
+ %*) die "Unknown digest algorithm: '${chk#\%}'" ;;
+ *) chk=sh256
+ esac
+ out "$chk"
+}
# This is the public domain getoptions shell library. It also forms a usage
# function.
# URL: https://github.com/ko1nksm/getoptions (v2.5.0)
# License: Creative Commons Zero v1.0 Universal
-# shellcheck disable=2016,2086
+# shellcheck disable=2016,2086,2317
getoptions() {
_error='' _on=1 _off='' _export='' _plus='' _mode='' _alt='' _rest=''
_flags='' _nflags='' _opts='' _help='' _abbr='' _cmds='' _init=@empty IFS=' '
@@ -313,6 +491,7 @@ getoptions() {
}
# URL: https://github.com/ko1nksm/getoptions (v2.5.0)
# License: Creative Commons Zero v1.0 Universal
+# shellcheck disable=2317
getoptions_help() {
_width='30,12' _plus='' _leading=' '
@@ -357,18 +536,34 @@ getoptions_help() {
echo "}"
}
+# 2086:
+# The lack of quotes are intentional. We do this so `getoptions()` do not try
+# to parse the empty string.
+# 2120:
+# The library does not call this function with any positional arguments, but
+# that does not mean that other programs will not do it, so this can also be
+# safely ignored.
+# shellcheck disable=2086,2120
global_options() {
- msg -- '' 'Global Options:'
- flag CPT_FORCE -f --force init:@export -- "Force operation"
- flag CPT_PROMPT -y --no-prompt on:0 off:0 init:@export -- "Do not prompt for confirmation"
- param CPT_ROOT --root init:@export -- "Use an alternate root directory"
- disp :usage -h --help -- "Show this help message"
- disp :version -v --version -- "Print version information"
-}
-
-warn() {
- # Print a warning message
- log "$1" "$2" "${3:-WARNING}"
+ # These are options that are supported by most utilities. If the optional
+ # argument 'silent' is given, the usage will not print these options, but
+ # the arguments will still be accepted. Alternatively, if the 'compact'
+ # argument is given, the function only prints the '--help' and '--version'
+ # flags. Sometimes it doesn't make sense to pollute the screen with options
+ # that will be rarely ever used.
+ _h=hidden:1
+ case $1 in
+ silent) _c=$_h ;;
+ compact) _c='' ;;
+ *) msg -- '' 'Global Options:'; _c='' _h=''
+ esac
+ flag CPT_FORCE -f --force $_h init:@export -- "Force operation"
+ flag CPT_PROMPT -y --no-prompt on:0 off:0 $_h init:@export -- "Do not prompt for confirmation"
+ param CPT_ROOT --root $_h init:@export -- "Use an alternate root directory"
+ param CPT_COLOR --color $_h init:@export -- "Colorize the output [default:auto]"
+ disp :usage -h --help $_c -- "Show this help message"
+ disp :version -v --version $_c -- "Print version information"
+ flag CPT_VERBOSE --verbose $_h init:@export -- "Be more verbose"
}
contains() {
@@ -383,6 +578,22 @@ regesc() {
sed 's|\\|\\\\|g;s|\[|\\[|g;s|\$|\\$|g;s|\.|\\.|g;s|\*|\\*|g;s|\^|\\^|g'
}
+pkg_download() {
+ # $1: URL
+ # $2: Output (Optional)
+ set -- "$1" "$(_readlinkf "${2:-${1##*/}}")"
+ case ${dl_prog##*/} in
+ axel) set -- -o "$2" "$1" ;;
+ aria2c) set -- -d "${2%/*}" -o "${2##*/}" "$1" ;;
+ curl) set -- -fLo "$2" "$1" ;;
+ wget|wget2) set -- -O "$2" "$1" ;;
+ esac
+
+ "$dl_prog" "$@" || {
+ rm -f "$2"
+ return 1
+ }
+}
prompt() {
# If a CPT_NOPROMPT variable is set, continue.
@@ -407,6 +618,9 @@ as_root() {
# We are exporting package manager variables, so that we still have the
# same repository paths / access to the same cache directories etc.
+ #
+ # It doesn't matter whether CPT_HOOK is defined or not.
+ # shellcheck disable=2153
set -- HOME="$HOME" \
USER="$user" \
XDG_CACHE_HOME="$XDG_CACHE_HOME" \
@@ -421,8 +635,10 @@ as_root() {
CPT_PATH="$CPT_PATH" \
CPT_PID="$CPT_PID" \
CPT_PROMPT="$CPT_PROMPT" \
+ CPT_REPO_CACHE="$CPT_REPO_CACHE" \
CPT_ROOT="$CPT_ROOT" \
CPT_TMPDIR="$CPT_TMPDIR" \
+ CPT_VERBOSE="$CPT_VERBOSE" \
"$@"
case ${su##*/} in
@@ -446,29 +662,48 @@ pop() {
}
run_hook() {
- # Store the CPT_HOOK variable so that we can revert it if it is changed.
- oldCPT_HOOK=$CPT_HOOK
-
- # If a fourth parameter 'root' is specified, source the hook from a
- # predefined location to avoid privilige escalation through user scripts.
- [ "$4" ] && CPT_HOOK=$CPT_ROOT/etc/cpt-hook
-
- [ -f "$CPT_HOOK" ] || { CPT_HOOK=$oldCPT_HOOK; return 0 ;}
+ # Check that hooks exist before announcing that we are running a hook.
+ set +f
+ for hook in "$cpt_confdir/hooks/"* "$CPT_HOOK"; do
+ [ -f "$hook" ] && {
+ if [ "$2" ]; then
+ logv "$2" "Running $1 hook"
+ else
+ logv "Running $1 hook"
+ fi
+ break
+ }
+ done
- [ "$2" ] && log "$2" "Running $1 hook"
+ # Run all the hooks found in the configuration directory, and the user
+ # defined hook.
+ for hook in "$cpt_confdir/hooks/"* "$CPT_HOOK"; do
+ set -f
+ [ -f "$hook" ] || continue
+ TYPE=${1:-null} PKG=${2:-null} DEST=${3:-null} . "$hook"
+ done
+}
- TYPE=${1:-null} PKG=${2:-null} DEST=${3:-null} . "$CPT_HOOK"
- CPT_HOOK=$oldCPT_HOOK
+# An optional argument could be provided to enforce a compression algorithm.
+# shellcheck disable=2120
+compress() {
+ case ${1:-$CPT_COMPRESS} in
+ bz2) bzip2 -z ;;
+ gz) gzip -6 ;;
+ xz) xz -zT 0 ;;
+ zst) zstd -3 ;;
+ lz) lzip -6 ;;
+ esac
}
decompress() {
case $1 in
- *.tar) cat ;;
- *.bz2) bzip2 -cd ;;
- *.lz) lzip -cd ;;
- *.xz|*.txz) xz -dcT 0 ;;
- *.tgz|*.gz) gzip -cd ;;
- *.zst) zstd -cd ;;
+ *.tar|*.cpio) cat ;;
+ *.bz2) bzip2 -cd ;;
+ *.lz) lzip -cd ;;
+ *.xz|*.txz) xz -dcT 0 ;;
+ *.tgz|*.gz) gzip -cd ;;
+ *.zst) zstd -cd ;;
esac < "$1"
}
@@ -486,69 +721,6 @@ sh256() {
while read -r hash _; do printf '%s %s\n' "$hash" "$1"; done
}
-tar_extract() {
- # Tarball extraction function that prefers pax(1) over tar(1). The reason we
- # are preferring pax is that we can strip components without relying on
- # ugly hacks such as the ones we are doing for 'tar'. Using 'tar' means that
- # we either have to sacrifice speed or portability, and we are choosing to
- # sacrifice speed. Fortunately, we don't have to make such a choice when
- # using pax.
- case "${extract##*/}" in
- pax) decompress "$1" | pax -r -s '/[^\/]*/./' ;;
- gtar|bsdtar) decompress "$1" | "$tar" xf - --strip-components 1 ;;
- tar) decompress "$1" > .ktar
-
- "$tar" xf .ktar || return
-
- # We now list the contents of the tarball so we can do our
- # version of 'strip-components'.
- "$tar" tf .ktar |
- while read -r file; do printf '%s\n' "${file%%/*}"; done |
-
- # Do not repeat files.
- uniq |
-
- # For every directory in the base we move each file
- # inside it to the upper directory.
- while read -r dir ; do
-
- # Skip if we are not dealing with a directory here.
- # This way we don't remove files on the upper directory
- # if a tar archive doesn't need directory stripping.
- [ -d "${dir#.}" ] || continue
-
- # Change into the directory in a subshell so we don't
- # need to cd back to the upper directory.
- (
- cd "$dir"
-
- # We use find because we want to move hidden files
- # as well.
- #
- # Skip the file if it has the same name as the directory.
- # We will deal with it later.
- #
- # Word splitting is intentional here.
- # shellcheck disable=2046
- find . \( ! -name . -prune \) ! -name "$dir" \
- -exec mv -f {} .. \;
-
- # If a file/directory with the same name as the directory
- # exists, append a '.cptbak' to it and move it to the
- # upper directory.
- ! [ -e "$dir" ] || mv "$dir" "../${dir}.cptbak"
- )
- rmdir "$dir"
-
- # If a backup file exists, move it into the original location.
- ! [ -e "${dir}.cptbak" ] || mv "${dir}.cptbak" "$dir"
- done
-
- # Clean up the temporary tarball.
- rm -f .ktar
- esac
-}
-
pkg_owner() {
set +f
@@ -562,6 +734,18 @@ pkg_owner() {
[ "$1" ] && printf '%s\n' "$1"
}
+pkg_owner_multi() {
+ set +f
+
+ [ "$3" ] || set -- "$1" "$2" "$sys_db"/*/manifest
+
+ grep "$@" | while read -r pkg_owner; do
+ pkg_owner=${pkg_owner%/*}
+ pkg_owner=${pkg_owner##*/}
+ printf '%s\n' "${pkg_owner##*/}"
+ done
+}
+
pkg_isbuilt() (
# Check if a package is built or not.
read -r ver rel < "$(pkg_find "$1")/version"
@@ -580,14 +764,20 @@ pkg_lint() {
repo_dir=$(pkg_find "$1")
cd "$repo_dir" || die "'$repo_dir' not accessible"
- [ -f sources ] || warn "$1" "Sources file not found"
+ [ -f sources ] || warnv "$1" "Sources file not found"
[ -x build ] || die "$1" "Build file not found or not executable"
[ -s version ] || die "$1" "Version file not found or empty"
read -r _ release 2>/dev/null < version || die "Version file not found"
[ "$release" ] || die "Release field not found in version file"
- [ "$2" ] || [ -f checksums ] || die "$pkg" "Checksums are missing"
+ # If we have a second argument, we are generating the checksums file,
+ # so we don't need to check whether there is one.
+ [ -z "$2" ] || return 0
+
+ # Check for a checksums file only if there is a sources file.
+ [ -f sources ] || return 0
+ [ -f checksums ] || die "$pkg" "Checksums are missing"
}
pkg_find() {
@@ -688,12 +878,8 @@ pkg_sources() {
repo_dir=$(pkg_find "$1")
while read -r src dest || [ "$src" ]; do
- # Remote git/hg repository or comment.
- if [ -z "${src##\#*}" ] ||
- [ -z "${src##git+*}" ] ||
- [ -z "${src##hg+*}" ]
-
- then :
+ # Remote repository or comment.
+ if _re "$re_vcs_or_com" "$src"; then :
# Remote source (cached).
elif [ -f "${src##*/}" ]; then
@@ -703,10 +889,15 @@ pkg_sources() {
elif [ -z "${src##*://*}" ]; then
log "$1" "Downloading $src"
- curl "$src" -fLo "${src##*/}" || {
- rm -f "${src##*/}"
- die "$1" "Failed to download $src"
- }
+ # We don't want our trap to exit immediately here if we receive an
+ # interrupt, we handle this ourselves.
+ trap_set handle-int
+
+ # Download the source
+ pkg_download "$src" || die "$1" "Failed to download $src"
+
+ # Restore original trap value.
+ trap_set cleanup
# Local source.
elif [ -f "$repo_dir/$src" ]; then
@@ -732,32 +923,17 @@ pkg_extract() {
mkdir -p "$mak_dir/$1/$dest" && cd "$mak_dir/$1/$dest"
case $src in
- # Git repository.
- git+*)
- # Split the source into URL + OBJECT (branch or commit).
- url=${src##git+} com=${url##*[@#]} com=${com#${url%[@#]*}}
-
- log "$1" "Cloning ${url%[@#]*}"; {
- git init
- git remote add origin "${url%[@#]*}"
- case "$url" in
- # Tags are specified via '@'
- *@*) git fetch -t --depth=1 origin "$com" || git fetch ;;
- *) git fetch --depth=1 origin "$com" || git fetch
- esac
- git checkout "${com:-FETCH_HEAD}"
- } || die "$1" "Failed to clone $src"
- ;;
- # Mercurial repository.
- hg+*)
- # Split the source into URL + OBJECT (branch or commit).
- url=${src##hg+} com=${url##*[@#]} com=${com#${url%[@#]*}}
+ # VCS Repository
+ git+*|hg+*|fossil+*)
+ backend=${src%%+*}
+ url=${src##"${backend}"+} com=${url##*[@#]} com=${com#"${url%[@#]*}"}
- # Unfortunately, there is no shallow cloning with Mercurial.
- log "$1" "Cloning ${url%[@#]*}"
- hg clone -u "${com:-tip}"
+ # Add back @ to com
+ case $url in *@*) com=@$com; esac
+ log "$1" "Cloning ${url%[#@]*}"
+ "pkg_vcs_clone_$backend" "${url%[#@]*}" "$com"
;;
# Comment or blank line.
@@ -766,11 +942,9 @@ pkg_extract() {
# Only 'tar', 'cpio', and 'zip' archives are currently supported for
# extraction. Other filetypes are simply copied to '$mak_dir'
# which allows for manual extraction.
- *://*.tar|*://*.tar.??|*://*.tar.???|*://*.tar.????|*://*.tgz|*://*.txz)
- tar_extract "$src_dir/$1/${src##*/}" ;;
-
- *://*.cpio|*://*.cpio.??|*://*.cpio.???|*://*.cpio.????)
- decompress "$src_dir/$1/${src##*/}" | pax -r ;;
+ *://*.tar|*://*.tar.??|*://*.tar.???|*://*.tar.????|*://*.tgz|\
+ *://*.txz|*://*.cpio|*://*.cpio.??|*://*.cpio.???|*://*.cpio.????)
+ decompress "$src_dir/$1/${src##*/}" | pax -rs '|[^/]*|.|' ;;
*://*.zip)
unzip "$src_dir/$1/${src##*/}" ||
@@ -799,12 +973,10 @@ pkg_depends() {
# Resolve all dependencies and generate an ordered list.
# This does a depth-first search. The deepest dependencies are
# listed first and then the parents in reverse order.
- contains "$deps" "$1" || {
- # Filter out non-explicit, aleady installed dependencies.
- # Only filter installed if called from 'pkg_build()'.
- [ "$pkg_build" ] && [ -z "$2" ] &&
- (pkg_list "$1" >/dev/null) && return
-
+ #
+ # shellcheck disable=2015
+ contains "$pkgs" "$1" && [ -z "$2" ] || {
+ [ "$2" = raw ] && _dep_append "$1" "$1"
while read -r dep type || [ "$dep" ]; do
# Skip comments and empty lines.
[ "${dep##\#*}" ] || continue
@@ -817,6 +989,16 @@ pkg_depends() {
make) [ "$2" = tree ] && [ -z "${3#first-nomake}" ] && continue
esac
+ # Filter out non-explicit, already installed dependencies if called
+ # from 'pkg_build()'.
+ [ "$pkg_build" ] && (pkg_list "$dep" >/dev/null) && continue
+
+ if [ "$2" = explicit ] || [ "$3" ]; then
+ _dep_append "$dep" "$dep"
+ else
+ _dep_append "$dep" "$1"
+ fi
+
# Recurse through the dependencies of the child packages. Forward
# the 'tree' operation.
if [ "$2" = tree ]; then
@@ -826,12 +1008,15 @@ pkg_depends() {
fi
done 2>/dev/null < "$(pkg_find "$1")/depends" ||:
- # After child dependencies are added to the list,
- # add the package which depends on them.
- [ "$2" = explicit ] || [ "$3" ] || deps="$deps $1 "
+ pkgs="$pkgs $1 "
}
}
+pkg_depends_commit() {
+ # Set deps, and cleanup dep_graph, pkgs
+ deps=$(printf '%s\n' "$dep_graph" | _tsort) dep_graph='' pkgs='' || warn "Dependency cycle detected"
+}
+
pkg_order() {
# Order a list of packages based on dependence and
# take into account pre-built tarballs if this is
@@ -839,9 +1024,10 @@ pkg_order() {
order=; redro=; deps=
for pkg do case $pkg in
- *.tar.*) deps="$deps $pkg " ;;
+ *.tar.*) _dep_append "$pkg" "$pkg" ;;
*) pkg_depends "$pkg" raw
esac done
+ pkg_depends_commit
# Filter the list, only keeping explicit packages.
# The purpose of these two loops is to order the
@@ -859,7 +1045,7 @@ pkg_strip() {
# system as well as on the tarballs we ship for installation.
# Package has stripping disabled, stop here.
- [ -f "$mak_dir/$pkg/nostrip" ] && return
+ [ "$CPT_NOSTRIP" ] || [ -f "$mak_dir/$pkg/nostrip" ] && return
log "$1" "Stripping binaries and libraries"
@@ -882,21 +1068,31 @@ pkg_strip() {
done 2>/dev/null ||:
}
+pkg_fix_deps_fullpath() {
+ # Return the canonical path of libraries extracted by readelf.
+ while read -r line _ rslv _; do
+ [ "$line" = "$1" ] || continue
+ case $rslv in
+ ldd) out "$line" ;;
+ *) out "$rslv" ;;
+ esac
+ done
+}
+
pkg_fix_deps() {
# Dynamically look for missing runtime dependencies by checking each binary
# and library with either 'ldd' or 'readelf'. This catches any extra
# libraries and or dependencies pulled in by the package's build suite.
- log "$1" "Checking for missing dependencies"
+ log "$1" "Checking for missing dependencies (using ${elf_prog##*/})"
# Go to the directory containing the built package to
# simplify path building.
cd "$pkg_dir/$1/$pkg_db/$1"
- # Make a copy of the depends file if it exists to have a
- # reference to 'diff' against.
+ # Make a copy of the depends file if it exists to have a reference to 'diff'
+ # against.
if [ -f depends ]; then
- cp -f depends "$mak_dir/d"
- dep_file=$mak_dir/d
+ dep_file=$(_tmp_cp depends)
else
dep_file=/dev/null
fi
@@ -905,40 +1101,72 @@ pkg_fix_deps() {
pkg_name=$1
set +f; set -f -- "$sys_db/"*/manifest
- # Get a list of binaries and libraries, false files
- # will be found, however it's faster to get 'ldd' to check
- # them anyway than to filter them out.
- find "$pkg_dir/$pkg_name/" -type f 2>/dev/null |
+ # We create two separate files for storing dependency information.
+ #
+ # 'lddfile' is where we will be storing the output of ldd, so that we can
+ # reference it later.
+ #
+ # 'dep_file_list' is where we will be listing the needed files which we will
+ # be passing to grep.
+ #
+ lddfile=$(_tmp_create lddfile) dep_file_list=$(_tmp_create dfl)
+
+ pkg_fix_deps_find() {
+ # We run the similar command twice, might as well be a function.
+ # Basically runs find on the package directory and executes the
+ # given command.
+ end=+; [ "$1" = ldd ] && end=';'
- while read -r file; do
- case ${elf_prog:-ldd} in
- *readelf) "$elf_prog" -d "$file" 2>/dev/null ;;
- *) ldd "$file" 2>/dev/null ;;
- esac |
- while read -r dep; do
- # Skip lines containing 'ldd'.
- [ "${dep##*ldd*}" ] || continue
- case $dep in *NEEDED*\[*\] | *'=>'*) ;; *) continue; esac
-
- # readelf output:
- # 0x0000 (NEEDED) Shared library: [libc.so]
- dep=${dep##*\[}
- dep=${dep%%\]*}
-
- # ldd output:
- # libc.so => /lib/ld-musl-x86_64.so.1
- dep=${dep#* => }
- dep=${dep% *}
-
- # Figure out which package owns the file. Skip file if it is owned
- # by the current package. This also handles cases where a '*-bin'
- # package exists on the system, so the package manager doesn't think
- # that the package we are building depends on the *-bin version of
- # itself, or any other renamed versions of the same software.
- pkg_owner -l "/${dep#/}\$" "$PWD/manifest" >/dev/null && continue
- pkg_owner -l "/${dep#/}\$" "$@" ||:
- done ||:
- done >> depends
+ # Get a list of binaries and libraries, false files will be found,
+ # however it's faster to get 'ldd' to check them anyway than to filter
+ # them out.
+ #
+ # We are terminating exec, so no worries.
+ # shellcheck disable=2067
+ find "$pkg_dir/$pkg_name/" -type f -exec "$@" {} "$end" 2>/dev/null |
+ sed 's/([^)]*) *$//' | sort -u
+ }
+
+ # Record all the dependencies in the 'lddfile'. This will include all
+ # dependencies, including non-direct ones. Unless the user prefers ldd,
+ # readelf will be used to filter the non-direct dependencies out.
+ pkg_fix_deps_find ldd -- > "$lddfile"
+
+ case "$elf_prog" in
+ *readelf) pkg_fix_deps_find "$elf_prog" -d ;;
+ *) cat "$lddfile"
+ esac | while read -r dep; do
+ # Skip lines containing 'ldd'.
+ [ "${dep##*ldd*}" ] || continue
+ case $dep in *NEEDED*\[*\] | *'=>'*) ;; *) continue; esac
+
+ # readelf output:
+ # 0x0000 (NEEDED) Shared library: [libc.so]
+ dep=${dep##*\[}
+ dep=${dep%%\]*}
+
+ # Retrieve the fullpath of the library from our ldd buffer.
+ case $elf_prog in
+ *readelf) dep=$(pkg_fix_deps_fullpath "$dep" < "$lddfile")
+ esac
+
+ # ldd output:
+ # libc.so => /lib/ld-musl-x86_64.so.1
+ dep=${dep#* => }
+ dep=${dep% *}
+
+ # Figure out which package owns the file. Skip file if it is owned
+ # by the current package. This also handles cases where a '*-bin'
+ # package exists on the system, so the package manager doesn't think
+ # that the package we are building depends on the *-bin version of
+ # itself, or any other renamed versions of the same software.
+ pkg_owner -l "/${dep#/}\$" "$PWD/manifest" >/dev/null && continue
+ out "/${dep#/}\$"
+ done >> "$dep_file_list"
+
+ # We write all the files into 'dep_file_list' so that we don't need to call
+ # grep on our entire database manifest hundreds of times.
+ pkg_owner_multi -lf "$dep_file_list" "$@" >> depends
# Remove duplicate entries from the new depends file.
# This removes duplicate lines looking *only* at the
@@ -946,7 +1174,7 @@ pkg_fix_deps() {
sort -uk1,1 -o depends depends 2>/dev/null ||:
# Display a diff of the new dependencies against the old ones.
- diff -U 3 "$dep_file" depends 2>/dev/null ||:
+ execv diff -U 3 "$dep_file" depends 2>/dev/null ||:
# Remove the depends file if it is empty.
[ -s depends ] || rm -f depends
@@ -968,7 +1196,7 @@ pkg_manifest() (
# sed: Remove the first character in each line (./dir -> /dir) and
# remove all lines which only contain '.'.
find . -type d -exec printf '%s/\n' {} + -o -print |
- sort -r | sed '/^\.\/$/d;ss.ss' > "${2:-$pkg_dir}/$1/$pkg_db/$1/manifest"
+ sort -r | sed '/^\.\/*$/d;ss.ss' > "${2:-$pkg_dir}/$1/$pkg_db/$1/manifest"
)
pkg_etcsums() (
@@ -980,10 +1208,16 @@ pkg_etcsums() (
# /etc/ directory for use in "smart" handling of these files.
log "$1" "Generating etcsums"
+ # Try to get the digest algorithm from the installed etcsums file. This
+ # makes sure that old packages continue to have the same digest algorithm
+ # and not a bunch of '.new' files are installed. It's not foolproof at all,
+ # but at least it keeps the /etc directory as clean as possible.
+ digest=$(_get_digest "$sys_db/$1/etcsums") || digest=b3sum
+ case $digest in b3sum) out "%BLAKE3"; esac > "$pkg_dir/$1/$pkg_db/$1/etcsums"
find etc -type f | while read -r file; do
- sh256 "$file"
- done > "$pkg_dir/$1/$pkg_db/$1/etcsums"
+ "$digest" "$file"
+ done >> "$pkg_dir/$1/$pkg_db/$1/etcsums"
)
pkg_tar() {
@@ -992,22 +1226,12 @@ pkg_tar() {
log "$1" "Creating tarball"
# Read the version information to name the package.
- read -r version release < "$(pkg_find "$1")/version"
+ read -r version release < "$pkg_dir/$1/$pkg_db/$1/version"
# Create a tarball from the contents of the built package.
- "$tar" cf - -C "$pkg_dir/$1" . |
- case $CPT_COMPRESS in
- bz2) bzip2 -z ;;
- xz) xz -zT 0 ;;
- gz) gzip -6 ;;
- zst) zstd -3 ;;
- lz) lzip -6 ;;
- *) gzip -6 ;; # Fallback to gzip
- esac \
- > "$bin_dir/$1#$version-$release.tar.$CPT_COMPRESS"
-
+ cd "$pkg_dir/$1"
+ pax -w . | compress > "$bin_dir/$1#$version-$release.tar.$CPT_COMPRESS"
log "$1" "Successfully created tarball"
-
run_hook post-package "$1" "$bin_dir/$1#$version-$release.tar.$CPT_COMPRESS"
}
@@ -1026,6 +1250,7 @@ pkg_build() {
# separately from those detected as dependencies.
explicit="$explicit $pkg "
} done
+ pkg_depends_commit
[ "$pkg_update" ] || explicit_build=$explicit
@@ -1203,10 +1428,12 @@ pkg_checksums() {
[ -f "$repo_dir/sources" ] || return 0
+ case ${2:-b3sum} in b3sum) out "%BLAKE3"; esac
+
while read -r src _ || [ "$src" ]; do
- # Comment.
- if [ -z "${src##\#*}" ]; then
- continue
+
+ # Skip checksums if it's a comment, or a VCS repository.
+ if _re "$re_vcs_or_com" "$src"; then continue
# File is local to the package.
elif [ -f "$repo_dir/$src" ]; then
@@ -1216,17 +1443,14 @@ pkg_checksums() {
elif [ -f "$src_dir/$1/${src##*/}" ]; then
src_path=$src_dir/$1
- # File is a git repository.
- elif [ -z "${src##git+*}" ]; then continue
-
# Die here if source for some reason, doesn't exist.
else
die "$1" "Couldn't find source '$src'"
fi
- # An easy way to get 'sha256sum' to print with the 'basename'
+ # An easy way to get 'b3sum' to print with the 'basename'
# of files is to 'cd' to the file's directory beforehand.
- (cd "$src_path" && sh256 "${src##*/}") ||
+ (cd "$src_path" && "${2:-b3sum}" "${src##*/}") ||
die "$1" "Failed to generate checksums"
done < "$repo_dir/sources"
}
@@ -1234,13 +1458,18 @@ pkg_checksums() {
pkg_verify() {
# Verify all package checksums. This is achieved by generating a new set of
# checksums and then comparing those with the old set.
- verify_cmd="NR==FNR{a[\$1];next}/^git .*/{next}!((\$1)in a){exit 1}"
+ vcmd="NR==FNR{a[\$1];next}/^git .*/{next}!((\$1)in a){exit 1}END{if(NR/2!=FNR)exit 1}"
for pkg; do
repo_dir=$(pkg_find "$pkg")
+
[ -f "$repo_dir/sources" ] || continue
- pkg_checksums "$pkg" | awk "$verify_cmd" - "$repo_dir/checksums" || {
+ # Determine the type of digest algorithm from the checksums file to do
+ # verification with.
+ digest="$(_get_digest "$repo_dir/checksums")"
+
+ pkg_checksums "$pkg" "$digest" | awk "$vcmd" - "$repo_dir/checksums" || {
log "$pkg" "Checksum mismatch"
# Instead of dying above, log it to the terminal. Also define a
@@ -1256,6 +1485,9 @@ pkg_conflicts() {
# Check to see if a package conflicts with another.
log "$1" "Checking for package conflicts"
+ c_manifest=$(_tmp_create conflict-manifest)
+ c_conflicts=$(_tmp_create conflicts)
+
# Filter the tarball's manifest and select only files
# and any files they resolve to on the filesystem
# (/bin/ls -> /usr/bin/ls).
@@ -1275,9 +1507,9 @@ pkg_conflicts() {
# Combine the dirname and file values, and print them into the
# temporary manifest to be parsed.
- printf '%s/%s\n' "${dirname#$CPT_ROOT}" "${file##*/}"
+ printf '%s/%s\n' "${dirname#"$CPT_ROOT"}" "${file##*/}"
- done < "$tar_dir/$1/$pkg_db/$1/manifest" > "$CPT_TMPDIR/$pid/manifest"
+ done < "$tar_dir/$1/$pkg_db/$1/manifest" > "$c_manifest"
p_name=$1
@@ -1286,7 +1518,7 @@ pkg_conflicts() {
# shellcheck disable=2046,2086
set -- $(set +f; pop "$sys_db/$p_name/manifest" from "$sys_db"/*/manifest)
- [ -s "$CPT_TMPDIR/$pid/manifest" ] || return 0
+ [ -s "$c_manifest" ] || return 0
# In rare cases where the system only has one package installed
# and you are reinstalling that package, grep will try to read from
@@ -1302,13 +1534,12 @@ pkg_conflicts() {
# Store the list of found conflicts in a file as we will be using the
# information multiple times. Storing it in the cache dir allows us
# to be lazy as they'll be automatically removed on script end.
- "$grep" -Fxf "$CPT_TMPDIR/$pid/manifest" -- "$@" > "$CPT_TMPDIR/$pid/conflict" ||:
-
+ sed '/\/$/d' "$@" | sort "$c_manifest" - | uniq -d > "$c_conflicts" ||:
# Enable alternatives automatically if it is safe to do so.
# This checks to see that the package that is about to be installed
# doesn't overwrite anything it shouldn't in '/var/db/cpt/installed'.
- "$grep" -q ":/var/db/cpt/installed/" "$CPT_TMPDIR/$pid/conflict" ||
+ "$grep" -q "/var/db/cpt/installed/" "$c_conflicts" ||
choice_auto=1
# Use 'grep' to list matching lines between the to
@@ -1337,7 +1568,7 @@ pkg_conflicts() {
# this work.
#
# Pretty nifty huh?
- while IFS=: read -r _ con; do
+ while read -r con; do
printf '%s\n' "Found conflict $con"
# Create the "choices" directory inside of the tarball.
@@ -1359,13 +1590,13 @@ pkg_conflicts() {
log "this must be fixed in $p_name. Contact the maintainer"
die "by checking 'git log' or by running 'cpt-maintainer'"
}
- done < "$CPT_TMPDIR/$pid/conflict"
+ done < "$c_conflicts"
# Rewrite the package's manifest to update its location
# to its new spot (and name) in the choices directory.
pkg_manifest "$p_name" "$tar_dir" 2>/dev/null
- elif [ -s "$CPT_TMPDIR/$pid/conflict" ]; then
+ elif [ -s "$c_conflicts" ]; then
log "Package '$p_name' conflicts with another package" "" "!>"
log "Run 'CPT_CHOICE=1 cpt i $p_name' to add conflicts" "" "!>"
die "as alternatives."
@@ -1398,15 +1629,22 @@ pkg_swap() {
# its manifest file to reflect this. We then resort this file
# so no issues arise when removing packages.
cp -Pf "$CPT_ROOT/$2" "$pkg_owns>${alt#*>}"
- sed "s#^$(regesc "$2")\$#${PWD#$CPT_ROOT}/$pkg_owns>${alt#*>}#" \
+ sed "s#^$(regesc "$2")\$#${PWD#"$CPT_ROOT"}/$pkg_owns>${alt#*>}#" \
"../installed/$pkg_owns/manifest" |
sort -r -o "../installed/$pkg_owns/manifest"
+ else
+ # If the file doesn't exist, we assume that there was a previous owner,
+ # but the package was then removed. We want the message to be short
+ # and clear, I thought of writing "Swapping [...] from 'null' to '$1'",
+ # but that would probably sound more like a package manager bug. Instead
+ # we are printing the message below which should be informative enough.
+ log "Installing '$2' from '$1'"
fi
# Convert the desired alternative to a real file and rewrite
# the manifest file to reflect this. The reverse of above.
mv -f "$alt" "$CPT_ROOT/$2"
- sed "s#^${PWD#$CPT_ROOT}/$(regesc "$alt")\$#$2#" "../installed/$1/manifest" |
+ sed "s#^${PWD#"$CPT_ROOT"}/$(regesc "$alt")\$#$2#" "../installed/$1/manifest" |
sort -r -o "../installed/$1/manifest"
}
@@ -1420,23 +1658,25 @@ pkg_etc() {
mkdir -p "$CPT_ROOT/$dir"
done
+ digest=$(_get_digest "$_etcsums") || digest=b3sum
+
# Handle files in /etc/ based on a 3-way checksum check.
find etc ! -type d | while read -r file; do
- { sum_new=$(sh256 "$file")
- sum_sys=$(cd "$CPT_ROOT/"; sh256 "$file")
- sum_old=$("$grep" "$file$" "$mak_dir/c"); } 2>/dev/null ||:
+ { sum_new=$("$digest" "$file")
+ sum_sys=$(cd "$CPT_ROOT/"; "$digest" "$file")
+ sum_old=$("$grep" "$file$" "$_etcsums"); } 2>/dev/null ||:
- log "$pkg_name" "Doing 3-way handshake for $file"
- printf '%s\n' "Previous: ${sum_old:-null}"
- printf '%s\n' "System: ${sum_sys:-null}"
- printf '%s\n' "New: ${sum_new:-null}"
+ logv "$pkg_name" "Doing 3-way handshake for $file"
+ outv "Previous: ${sum_old:-null}"
+ outv "System: ${sum_sys:-null}"
+ outv "New: ${sum_new:-null}"
# Use a case statement to easily compare three strings at
# the same time. Pretty nifty.
case ${sum_old:-null}${sum_sys:-null}${sum_new} in
# old = Y, sys = X, new = Y
"${sum_new}${sum_sys}${sum_old}")
- log "Skipping $file"
+ logv "Skipping $file"
continue
;;
@@ -1446,7 +1686,7 @@ pkg_etc() {
"${sum_old}${sum_old}${sum_old}"|\
"${sum_old:-null}${sum_sys}${sum_sys}"|\
"${sum_sys}${sum_old}"*)
- log "Installing $file"
+ logv "Installing $file"
new=
;;
@@ -1489,10 +1729,11 @@ pkg_remove() {
# remove anything from packages that create empty directories for a
# purpose (such as baselayout).
manifest_list="$(set +f; pop "$sys_db/$1/manifest" from "$sys_db/"*/manifest)"
+ dirs="$(_tmp_name "directories")"
# shellcheck disable=2086
- [ "$manifest_list" ] && grep -h '/$' $manifest_list | sort -ur > "$mak_dir/dirs"
+ [ "$manifest_list" ] && grep -h '/$' $manifest_list | sort -ur > "$dirs"
- run_hook pre-remove "$1" "$sys_db/$1" root
+ run_hook pre-remove "$1" "$sys_db/$1"
while read -r file; do
# The file is in '/etc' skip it. This prevents the package
@@ -1500,7 +1741,7 @@ pkg_remove() {
[ "${file##/etc/*}" ] || continue
if [ -d "$CPT_ROOT/$file" ]; then
- "$grep" -Fxq "$file" "$mak_dir/dirs" 2>/dev/null && continue
+ "$grep" -Fxq "$file" "$dirs" 2>/dev/null && continue
rmdir "$CPT_ROOT/$file" 2>/dev/null || continue
else
rm -f "$CPT_ROOT/$file"
@@ -1511,7 +1752,7 @@ pkg_remove() {
# we no longer need to block 'Ctrl+C'.
trap_set cleanup
- run_hook post-remove "$1" "$CPT_ROOT/" root
+ run_hook post-remove "$1" "$CPT_ROOT/"
log "$1" "Removed successfully"
}
@@ -1534,12 +1775,14 @@ pkg_install() {
fi
mkdir -p "$tar_dir/$pkg_name"
+ cd "$tar_dir/$pkg_name"
+
log "$pkg_name" "Extracting $tar_file"
# Extract the tarball to catch any errors before installation begins.
- decompress "$tar_file" | "$tar" xf - -C "$tar_dir/$pkg_name"
+ decompress "$tar_file" | pax -rpp
- [ -f "$tar_dir/$pkg_name/$pkg_db/$pkg_name/manifest" ] ||
+ [ -f "./$pkg_db/$pkg_name/manifest" ] ||
die "'${tar_file##*/}' is not a valid CPT package"
# Ensure that the tarball's manifest is correct by checking that
@@ -1547,13 +1790,13 @@ pkg_install() {
[ "$CPT_FORCE" != 1 ] && log "$pkg_name" "Checking package manifest" &&
while read -r line; do
# Skip symbolic links
- [ -h "$tar_dir/$pkg_name/$line" ] ||
- [ -e "$tar_dir/$pkg_name/$line" ] || {
- log "File $line missing from tarball but mentioned in manifest" "" "!>"
- TARBALL_FAIL=1
- }
- done < "$tar_dir/$pkg_name/$pkg_db/$pkg_name/manifest"
- [ "$TARBALL_FAIL" ] && {
+ [ -h "./$line" ] ||
+ [ -e "./$line" ] || {
+ log "File $line missing from tarball but mentioned in manifest" "" "!>"
+ tarball_fail=1
+ }
+ done < "$pkg_db/$pkg_name/manifest"
+ [ "$tarball_fail" ] && {
log "You can still install this package by setting CPT_FORCE variable"
die "$pkg_name" "Missing files in manifest"
}
@@ -1562,18 +1805,17 @@ pkg_install() {
# Make sure that all run-time dependencies are installed prior to
# installing the package.
- [ -f "$tar_dir/$pkg_name/$pkg_db/$pkg_name/depends" ] &&
+ [ -f "$pkg_db/$pkg_name/depends" ] &&
[ "$CPT_FORCE" != 1 ] &&
while read -r dep dep_type || [ "$dep" ]; do
[ "${dep##\#*}" ] || continue
[ "$dep_type" ] || pkg_list "$dep" >/dev/null ||
install_dep="$install_dep'$dep', "
- done < "$tar_dir/$pkg_name/$pkg_db/$pkg_name/depends"
+ done < "$pkg_db/$pkg_name/depends"
[ "$install_dep" ] && die "$1" "Package requires ${install_dep%, }"
- run_hook pre-install "$pkg_name" "$tar_dir/$pkg_name" root
-
+ run_hook pre-install "$pkg_name" "$tar_dir/$pkg_name"
pkg_conflicts "$pkg_name"
log "$pkg_name" "Installing package incrementally"
@@ -1585,8 +1827,8 @@ pkg_install() {
# If the package is already installed (and this is an upgrade) make a
# backup of the manifest and etcsums files.
- cp -f "$sys_db/$pkg_name/manifest" "$mak_dir/m" 2>/dev/null ||:
- cp -f "$sys_db/$pkg_name/etcsums" "$mak_dir/c" 2>/dev/null ||:
+ _manifest=$(_tmp_cp "$sys_db/$pkg_name/manifest" 2>/dev/null) ||:
+ _etcsums=$(_tmp_cp "$sys_db/$pkg_name/etcsums" 2>/dev/null) ||:
# This is repeated multiple times. Better to make it a function.
pkg_rsync() {
@@ -1601,7 +1843,7 @@ pkg_install() {
pkg_etc
# Remove any leftover files if this is an upgrade.
- "$grep" -vFxf "$sys_db/$pkg_name/manifest" "$mak_dir/m" 2>/dev/null |
+ "$grep" -vFxf "$sys_db/$pkg_name/manifest" "$_manifest" 2>/dev/null |
while read -r file; do
file=$CPT_ROOT/$file
@@ -1638,144 +1880,231 @@ pkg_install() {
"$sys_db/$pkg_name/post-install" ||:
fi
- run_hook post-install "$pkg_name" "$sys_db/$pkg_name" root
+ run_hook post-install "$pkg_name" "$sys_db/$pkg_name"
log "$pkg_name" "Installed successfully"
}
-pkg_fetch() {
- log "Updating repositories"
+pkg_repository_update() {
+ # Function to update the given package repository.
+ cd "$1"
+ repo_type=$(pkg_vcs_info)
+ repo_root=${repo_type#"$PWD":}
+ repo_type=${repo_type##*:} repo_root=${repo_root%:*}
+ contains "$repos" "$repo_root" || {
+ repos="$repos $repo_root "
+ cd "$repo_root"
+
+ "pkg_vcs_pull_$repo_type"
+
+ # Repositories can contain a "Message of the Day" file in order to
+ # relay important information to their users.
+ ! [ -r "$repo_root/MOTD" ] || {
+ printf '%s\n%s\n%s\n\n' \
+ "$(_multiply_char '=' 60)" \
+ "Message of the Day [$PWD]" \
+ "$(_multiply_char '=' 60)"
+ cat "$repo_root/MOTD"
+ printf '\n%s\n' "$(_multiply_char '=' 60)"
+ }
+ }
+}
- run_hook pre-fetch
+pkg_vcs_clone_git() {
+ # $1: Clone URL
+ # $2: Branch or Commit Object
+ git init
+ git remote add origin "${1%[#@]*}"
+ case $2 in
+ @*) git fetch -t --depth=1 origin "${2#@}" || git fetch; set -- "$1" "${2#@}" ;;
+ *) git fetch --depth=1 origin "$2" || git fetch
+ esac
+ git checkout "${2:-FETCH_HEAD}"
+}
- # Create a list of all repositories.
- # See [1] at top of script.
- # shellcheck disable=2046,2086
- { IFS=:; set -- $CPT_PATH; IFS=$old_ifs ;}
+pkg_vcs_clone_hg() {
+ # $1: Clone URL
+ # $2: Branch or Commit Object
+ hg clone -u "${2:-tip}" "${1%[#@]*}" .
+}
- # Update each repository in '$CPT_PATH'. It is assumed that
- # each repository is 'git' tracked.
- for repo; do
- # Go to the root of the repository (if it exists).
- cd "$repo"
- cd "$(git rev-parse --show-toplevel 2>/dev/null)" 2>/dev/null ||
- cd "$(hg root 2>/dev/null)" 2>/dev/null ||:
+pkg_vcs_clone_fossil() {
+ # $1: Clone URL
+ # $2: Branch or Commit Object
+ fossil open -f "${1%[#@]*}" "${2:-trunk}"
+}
- if [ -d .git ]; then
+pkg_vcs_pull_fossil() {
+ # Pull function for Fossil.
+ log "$PWD" " "
+ [ "$(fossil remote 2>/dev/null)" != off ] || {
+ out "No remote, skipping."
+ return 0
+ }
- [ "$(git remote 2>/dev/null)" ] || {
- log "$repo" " "
- printf '%s\n' "No remote, skipping."
- continue
- }
+ # Ensure we have proper permissions to do the pull operation.
+ if [ -w "$PWD" ] && [ "$uid" != 0 ]; then
+ fossil pull
+ fossil update
+ else
+ pkg_vcs_as_root "fossil pull && fossil update"
+ fi
+}
- contains "$repos" "$PWD" || {
- repos="$repos $PWD "
+pkg_vcs_pull_git() {
+ # Pull function for Git.
+ if [ "$(git remote 2>/dev/null)" ]; then
+ # Display a message if signing is enabled for this repository.
+ case $(git config merge.verifySignatures) in
+ true) log "$PWD" "[signed] " ;;
+ *) log "$PWD" " " ;;
+ esac
- # Display a tick if signing is enabled for this
- # repository.
- case $(git config merge.verifySignatures) in
- true) log "$PWD" "[signed] " ;;
- *) log "$PWD" " " ;;
- esac
+ # Ensure we have proper permissions to do the pull operation.
+ if [ -w "$PWD" ] && [ "$uid" != 0 ]; then
+ git fetch
+ git merge
+ git submodule update --remote --init -f
+ else
+ pkg_vcs_as_root \
+ "git fetch && git merge && git submodule update --remote --init -f"
+ fi
+ else
+ log "$PWD" " "
+ # Skip if there are no submodules
+ [ -f .gitmodules ] || {
+ out "No remote, skipping."
+ return 0
+ }
+ if [ -w "$PWD" ] && [ "$uid" != 0 ]; then
+ git submodule update --remote --init -f
+ else
+ pkg_vcs_as_root "git submodule update --remote --init -f"
+ fi
+ fi
+}
- if [ -w "$PWD" ] && [ "$uid" != 0 ]; then
- git fetch
- git merge
- git submodule update --remote --init -f
+pkg_vcs_pull_hg() {
+ # Pull function for Mercurial.
+ log "$PWD" " "
+ [ "$(hg showconfig paths 2>/dev/null)" ] || {
+ out "No remote, skipping."
+ return 0
+ }
- else
- [ "$uid" = 0 ] || log "$PWD" "Need root to update"
-
- # Find out the owner of the repository and spawn
- # git as this user below.
- #
- # This prevents 'git' from changing the original
- # ownership of files and directories in the rare
- # case that the repository is owned by a 3rd user.
- (
- user=$(_stat "$PWD")
-
- [ "$user" = root ] ||
- log "Dropping permissions to $user for pull"
-
- git_cmd="git fetch && git merge && git submodule update --remote --init -f"
- case $su in *su) git_cmd="'$git_cmd'"; esac
-
- # Spawn a subshell to run multiple commands as
- # root at once. This makes things easier on users
- # who aren't using persist/timestamps for auth
- # caching.
- user=$user as_root sh -c "$git_cmd"
- )
- fi
- }
- elif [ -d .hg ]; then
+ if [ -w "$PWD" ] && [ "$uid" != 0 ]; then
+ hg pull
+ hg update
+ else
+ pkg_vcs_as_root "hg pull && hg update"
+ fi
+}
- [ "$(hg showconfig paths 2>/dev/null)" ] || {
- log "$repo" " "
- printf '%s\n' "No remote, skipping."
- continue
- }
+pkg_vcs_pull_rsync() {
+ # Pull function for rsync repositories. The details of our rsync
+ # repositories are explained in the user manual.
+ log "$PWD" " "
- contains "$repos $PWD" || {
- repos="$repos $PWD"
+ # Read remote repository address from the '.rsync' file.
+ read -r remote < .rsync
+ if [ -w "$PWD" ] && [ "$uid" != 0 ]; then
+ rsync -acvzzC --include=core --delete "$remote/" "$PWD"
+ else
+ pkg_vcs_as_root "rsync -acvzzC --include=core --delete \"$remote/\" \"$PWD\""
+ fi
+}
- if [ -w "$PWD" ] && [ "$uid" != 0 ]; then
- hg pull
- hg update
- else
- [ "$uid" ] || log "$PWD" "Need root to update"
+pkg_vcs_pull_local() {
+ # Local repository. We don't do a "pull" here, we just notify the user that
+ # this is the case.
+ log "$PWD" " "
+ out "Not a remote repository, skipping."
+}
- # We are going to do the same operation as above, to
- # find the owner of the repository.
- (
- user=$(_stat "$PWD")
+pkg_vcs_as_root() (
+ # Helper function for pkg_vcs_pull* functions used for proper
+ # privilege escalation.
+ [ "$uid" = 0 ] || log "$PWD" "Need root to update"
- [ "$user" = root ] ||
- log "Dropping permissions to $user for pull"
+ # Find out the owner of the repository and spawn the operation as the user
+ # below.
+ #
+ # This prevents the VCS from changing the original ownership of files and
+ # directories in the rare case that the repository is owned by a third user.
+ user=$(_stat "$PWD")
- hg_cmd="hg pull && hg update"
+ [ "$user" = root ] || log "Dropping permissions to $user for pull"
+ case ${su##*/} in su) set -- "'$1'"; esac
- case $su in *su) hg_cmd="'$hg_cmd'"; esac
- user=$user as_root sh -c "$hg_cmd"
- )
- fi
- }
- elif [ -f .rsync ]; then
- # If an .rsync_root file exists, we check that the repository root
- # exists. If it does, we change to that directory to do the fetch.
- # This way, we allow for partial repositories while making sure that
- # we can fetch the repository in a single operation.
- [ -f .rsync_root ] && {
- read -r rsync_root < .rsync_root
- [ -f "$rsync_root/.rsync" ] && cd "$rsync_root"
- }
- contains "$repos" "$PWD" || {
- repos="$repos $PWD"
- read -r remote < .rsync
- if [ -w "$PWD" ] && [ "$uid" != 0 ]; then
- rsync -acvzzC --include=core --delete "$remote/" "$PWD"
- else
- [ "$uid" = 0 ] || log "$PWD" "Need root to update"
+ # Spawn a subhsell to run multiple commands as root at once. This makes
+ # things easier on users who aren't using persist/timestamps for auth
+ # caching.
+ as_root sh -c "$@"
+)
- # Similar to the git update, we find the owner of
- # the repository and spawn rsync as that user.
- (
- user=$(_stat "$PWD")
+pkg_vcs_info() {
+ # Finds and returns repository information for the current directory. It
+ # will return current directory, repository root, and the type of repository
+ # in a colon separated format.
- [ "$user" = root ] ||
- log "Dropping permissions to $user for pull"
+ : "${repo_file:=$cac_dir/repository-cache}"
+ set --
- user=$user as_root rsync -acvzzC --include=core --delete "$remote/" "$PWD"
- )
- fi
- }
- else
- log "$repo" " "
- printf '%s\n' "Not a remote repository, skipping."
- fi
- done
+ if [ "$CPT_REPO_CACHE" != 0 ] && information=$(grep "^$PWD:" "$repo_file" 2>/dev/null); then
+ # Repository information is already cached.
+ printf '%s\n' "$information" | sed 1q
+ return
+ elif rootdir=$(git rev-parse --show-toplevel 2>/dev/null); then
+ # Git repository
+ backend=git
+ elif rootdir=$(hg root 2>/dev/null); then
+ # Mercurial repository
+ backend=hg
+ elif rootdir=$(fossil info 2>/dev/null | grep ^local-root:); then
+ # Fossil repository
+ backend=fossil
+
+ # We want to remove the initial spacing before the root directory, and
+ # the leading dash on the root directory.
+ rootdir=${rootdir#local-root: *} rootdir=${rootdir%/}
+ elif [ -f .rsync ]; then
+ backend=rsync
+ rootdir=$PWD
+
+ # If an .rsync_root file exists, we check that the repository root
+ # exists. If it does, we change to that directory to do the fetch.
+ # This way, we allow for partial repositories while making sure that
+ # we can fetch the repository in a single operation.
+ [ -f .rsync_root ] && {
+ read -r rsync_root < .rsync_root
+ [ -f "$rsync_root/.rsync" ] && rootdir=$(_readlinkf "$rsync_root")
+ }
+ else
+ # Local repository
+ backend=local
+ rootdir=$PWD
+ fi
+
+ # We cache all these information, so that we don't have to spend much time
+ # looking these up the next time we are doing it. If CPT_REPO_CACHE is set
+ # to 0, we will not write this cache.
+ [ "$CPT_REPO_CACHE" = 0 ] || set -- "$repo_file"
+ printf '%s:%s:%s\n' "$PWD" "$rootdir" "$backend" | tee -a "$@"
+}
+
+pkg_fetch() {
+ log "Updating repositories"
+
+ run_hook pre-fetch
+
+ # Create a list of all repositories.
+ # See [1] at top of script.
+ # shellcheck disable=2046,2086
+ { IFS=:; set -- $CPT_PATH; IFS=$old_ifs ;}
+
+ # Update each repository in '$CPT_PATH'. It is assumed that
+ # each repository is 'git' tracked.
+ for repo; do pkg_repository_update "$repo"; done
run_hook post-fetch
}
@@ -1786,7 +2115,12 @@ pkg_updates(){
# an update.
[ "$CPT_FETCH" = 0 ] || pkg_fetch
- log "Checking for new package versions"
+ # Be quiet if we are doing self update, no need to print the same
+ # information twice. We add this basic function, because we will be using it
+ # more than once.
+ _not_update () { [ "$cpt_self_update" ] || "$@" ;}
+
+ _not_update log "Checking for new package versions"
set +f
@@ -1800,7 +2134,7 @@ pkg_updates(){
# Compare installed packages to repository packages.
[ "$db_ver-$db_rel" != "$re_ver-$re_rel" ] && {
- printf '%s\n' "$pkg_name $db_ver-$db_rel ==> $re_ver-$re_rel"
+ _not_update printf '%s\n' "$pkg_name $db_ver-$db_rel ==> $re_ver-$re_rel"
outdated="$outdated$pkg_name "
}
done
@@ -1821,6 +2155,13 @@ pkg_updates(){
exit 0
}
+ [ "$outdated" ] || {
+ log "Everything is up to date"
+ return
+ }
+
+ _not_update log "Packages to update: ${outdated% }"
+
contains "$outdated" cpt && {
log "Detected package manager update"
log "The package manager will be updated first"
@@ -1831,18 +2172,17 @@ pkg_updates(){
cpt-install cpt
log "Updated the package manager"
- log "Re-run 'cpt update' to update your system"
-
- exit 0
- }
-
- [ "$outdated" ] || {
- log "Everything is up to date"
- return
+ log "Re-executing the package manager to continue the update"
+
+ # We export this variable so that cpt knows it's running for the second
+ # time. We make the new process promptless, and we avoid fetching
+ # repositories. We are assuming that the user was already prompted once,
+ # and that their repositories are up to date, or they have also passed
+ # the '-y' or '-n' flags themselves which leads to the same outcome.
+ export cpt_self_update=1
+ exec cpt-update -yn
}
- log "Packages to update: ${outdated% }"
-
# Tell 'pkg_build' to always prompt before build.
pkg_update=1
@@ -1858,12 +2198,12 @@ pkg_updates(){
}
pkg_get_base() (
- # Print the packages defined in the /etc/cpt-base file.
+ # Print the packages defined in the CPT base file.
# If an argument is given, it prints a space seperated list instead
# of a list seperated by newlines.
- # cpt-base is an optional file, return with success if it doesn't exist.
- [ -f "$CPT_ROOT/etc/cpt-base" ] || return 0
+ # CPT base is an optional file, return with success if it doesn't exist.
+ [ -f "$cpt_base" ] || return 0
# If there is an argument, change the format to use spaces instead of
# newlines.
@@ -1874,13 +2214,20 @@ pkg_get_base() (
# subshell. That is our purpose here, thank you very much.
# shellcheck disable=SC2030
while read -r pkgname _; do
+ # Ignore comments
[ "${pkgname##\#*}" ] || continue
+
+ # Store the package list in arguments
set -- "$@" "$pkgname"
+
+ # Retrieve the dependency tree of the package, so they are listed as
+ # base packages too. This ensures that no packages are broken in a
+ # "base reset", and the user has a working base.
deps=$(pkg_gentree "$pkgname" xn)
for dep in $deps; do
contains "$*" "$dep" || set -- "$@" "$dep"
done
- done < "$CPT_ROOT/etc/cpt-base"
+ done < "$cpt_base"
# Format variable is intentional.
# shellcheck disable=2059
@@ -1905,6 +2252,7 @@ pkg_gentree() (
esac
done
pkg_depends "$1" tree "$make_deps"
+ pkg_depends_commit
# Unless 'f' is given, pop the package from the list so that we don't list
# the package (for example if it's part of the base package list). Normally
@@ -1916,7 +2264,9 @@ pkg_gentree() (
# shellcheck disable=2086
[ -z "${2##*f*}" ] || deps=$(pop "$1" from $deps)
- eval set -- "$deps"
+ # Word splitting is intentional.
+ # shellcheck disable=2086
+ set -- $deps
pkg_order "$@"
if [ "$reverse" ]; then eval set -- "$redro"; else eval set -- "$order"; fi
[ "$1" ] || return 0
@@ -1929,8 +2279,11 @@ pkg_gentree() (
pkg_query_meta() {
# Query the 'meta' file of the given meta package. If there is no meta file,
# or the key being queried is unavailable, the function will return with
- # error.
- repo_dir=$(pkg_find "$1")
+ # error. Full path can be specified instead of package names.
+ case $1 in
+ */*) repo_dir=$1 ;;
+ *) repo_dir=$(pkg_find "$1")
+ esac
[ -f "$repo_dir/meta" ] || return
while IFS=': ' read -r key val; do
case $key in
@@ -1952,29 +2305,51 @@ pkg_clean() {
rm -rf -- "${CPT_TMPDIR:=$cac_dir/proc}/$pid"
}
+_tmp_name() {
+ # Name a temporary file/directory
+ out "$tmp_dir/$1"
+}
+
+_tmp_cp() {
+ # Copy given file to the temporary directory and return its name. If a
+ # second argument is not given, use the basename of the copied file.
+ _ret=${2:-${1##*/}}
+ _ret=$(_tmp_name "$_ret")
+ cp -p "$1" "$_ret"
+ out "$_ret"
+}
+
+_tmp_create() {
+ # Create given file to the temporary directory and return its name
+ create_tmp
+ _ret=$(_tmp_name "$1")
+ :> "$_ret" || return 1
+ out "$_ret"
+}
+
+create_tmp() {
+ # Create the required temporary directories and set the variables which
+ # point to them.
+ mak_dir=$tmp_dir/build
+ pkg_dir=$tmp_dir/pkg
+ tar_dir=$tmp_dir/export
+ mkdir -p "$mak_dir" "$pkg_dir" "$tar_dir"
+}
+
create_cache() {
- # A temporary directory can be specified apart from the cache
- # directory in order to build in a user specified directory.
- # /tmp could be used in order to build on ram, useful on SSDs.
- # The user can specify CPT_TMPDIR for this.
+ # DEPRECATED, use create_tmp() instead.
#
- # Create the required temporary directories and set the variables
- # which point to them.
- mkdir -p "${tmp_dir:=${CPT_TMPDIR:=$cac_dir/proc}/$pid}"
-
# If an argument is given, skip the creation of other cache directories.
- # This here makes shellcheck extremely angry, so I am globally disabling
- # SC2119.
- [ "$1" ] || mkdir -p "${mak_dir:=$tmp_dir/build}" \
- "${pkg_dir:=$tmp_dir/pkg}" \
- "${tar_dir:=$tmp_dir/export}"
-
+ [ "$1" ] || create_tmp
}
# main()
{
set -ef
+ # Package manager version.
+ cpt_version=@VERSION@
+
# If a parser definition exists, let's run it ourselves. This makes sure we
# get the variables as soon as possible.
command -v parser_definition >/dev/null && {
@@ -1983,24 +2358,44 @@ create_cache() {
eval set -- "$REST"
}
- # Create the cache directories for CPT and set the variables which point
- # to them. This is seperate from temporary directories created in
- # create_cache(). That's because we need these variables set on most
- # occasions.
- mkdir -p "${cac_dir:=${CPT_CACHE:=${XDG_CACHE_HOME:-$HOME/.cache}/cpt}}" \
- "${src_dir:=$cac_dir/sources}" \
- "${log_dir:=$cac_dir/logs}" \
- "${bin_dir:=$cac_dir/bin}"
-
- # Set the location to the repository and package database.
- pkg_db=var/db/cpt/installed
-
# The PID of the current shell process is used to isolate directories
# to each specific CPT instance. This allows multiple package manager
# instances to be run at once. Store the value in another variable so
# that it doesn't change beneath us.
pid=${CPT_PID:-$$}
+ # A temporary directory can be specified apart from the cache directory in
+ # order to build in a user specified directory. /tmp could be used in order
+ # to build on ram, useful on SSDs. The user can specify $CPT_TMPDIR for
+ # this. We now also support the usage of $XDG_RUNTIME_DIR, so the directory
+ # naming can be confusing to some. Here are possible $tdir names (by order
+ # of preference):
+ #
+ # 1. $CPT_TMPDIR
+ # 2. $XDG_RUNTIME_DIR/cpt
+ # 3. $XDG_CACHE_DIR/cpt/proc
+ # 4. $HOME/.cache/cpt/proc
+ #
+ # We create the main temporary directory here to avoid permission issues
+ # that can arise from functions that call as_root(). However, the
+ # $pid directories are special for each process and aren't created unless
+ # `create_tmp()` is used.
+ #
+ # We used to assign and create the directories at the same time using a
+ # shell hack, but it made the variables editable outside of the package
+ # manager, but we don't actually want that. Variables that are lower case
+ # aren't meant to be interacted or set by the user.
+ cac_dir=${CPT_CACHE:=${XDG_CACHE_HOME:-${HOME:?}/.cache}}/cpt
+ src_dir=$cac_dir/sources
+ log_dir=$cac_dir/logs
+ bin_dir=$cac_dir/bin
+ tdir=${CPT_TMPDIR:=${XDG_RUNTIME_DIR:-$cac_dir/proc}${XDG_RUNTIME_DIR:+/cpt}}
+ tmp_dir=$tdir/$pid
+ mkdir -p "$cac_dir" "$src_dir" "$log_dir" "$bin_dir" "$tdir"
+
+ # Set the location to the repository and package database.
+ pkg_db=var/db/cpt/installed
+
# Force the C locale to speed up things like 'grep' which disable unicode
# etc when this is set. We don't need unicode and a speed up is always
# welcome.
@@ -2015,17 +2410,6 @@ create_cache() {
# POSIX correctness (grep quoted to avoid shellcheck false-positive).
grep=$(command -v ggrep) || grep='grep'
- # Prefer libarchive tar or GNU tar if installed as they are much
- # much faster than busybox's implementation. Very much worth it if
- # you value performance.
- tar=$(command -v bsdtar || command -v gtar) || tar=tar
-
- # Prefer libarchive tar, GNU tar, or the POSIX defined pax for tarball
- # extraction, as they can strip components, which is much much faster than
- # our portability function. Our first preference is pax, because it is
- # actually slightly faster than bsdtar and GNU tar.
- extract=$(command -v pax || command -v "$tar")
-
# Figure out which 'sudo' command to use based on the user's choice or
# what is available on the system.
su=${CPT_SU:-$(command -v ssu ||
@@ -2045,6 +2429,15 @@ create_cache() {
command -v llvm-readelf ||
command -v eu-readelf)"} || elf_prog=ldd
+ # Use one of the following programs to download package sources. Downloads
+ # are made using the `pkg_download()` function.
+ dl_prog=${CPT_DOWNLOADER:="$(
+ command -v curl ||
+ command -v wget ||
+ command -v wget2 ||
+ command -v axel ||
+ command -v aria2c)"} || dl_prog=curl
+
# Make note of the user's current ID to do root checks later on.
# This is used enough to warrant a place here.
uid=$(id -u)
@@ -2054,10 +2447,7 @@ create_cache() {
# Make sure that the CPT_ROOT doesn't end with a '/'. This might
# break some operations.
- [ -z "$CPT_ROOT" ] || [ "${CPT_ROOT##*/}" ] || {
- warn "" "Your CPT_ROOT variable shouldn't end with '/'"
- CPT_ROOT=${CPT_ROOT%/}
- }
+ CPT_ROOT=${CPT_ROOT%"${CPT_ROOT##*[!/]}"}
# Define an optional sys_arch variable in order to provide
# information to build files with architectural information.
@@ -2067,20 +2457,32 @@ create_cache() {
# the get go. It will be created as needed by package installation.
sys_db=$CPT_ROOT/$pkg_db
+ # CPT system configuration directory
+ cpt_confdir=$CPT_ROOT@SYSCONFDIR@/cpt
+
+ # Backwards compatibility for the old cpt-base location
+ cpt_base=$CPT_ROOT/etc/cpt-base
+ [ -f "$cpt_confdir/base" ] && cpt_base=$cpt_confdir/base
+
+ # Regular expression used in pkg_checksums() and pkg_sources() in order to
+ # identify VCS and comments
+ re_vcs_or_com='^(#|(fossil|git|hg)\+)'
+
# This allows for automatic setup of a CPT chroot and will
# do nothing on a normal system.
mkdir -p "$CPT_ROOT/" 2>/dev/null ||:
- # Set a value for CPT_COMPRESS if it isn't set.
- : "${CPT_COMPRESS:=gz}"
-
- # Unless being piped or the user specifically doesn't want colors, set
- # colors. This can of course be overriden if the user specifically want
- # colors during piping.
- if { [ "$CPT_COLOR" != 0 ] && [ -t 1 ] ;} || [ "$CPT_COLOR" = 1 ]; then
- colory="\033[1;33m" colorb="\033[1;36m" colre="\033[m"
- fi
+ # Set the default compression to gzip, and warn the user if the value is
+ # invalid.
+ case ${CPT_COMPRESS:=gz} in
+ bz2|gz|xz|zst|lz) ;;
+ *) warn "'$CPT_COMPRESS' is not a valid CPT_COMPRESS value, falling back to 'gz'"
+ CPT_COMPRESS=gz
+ esac
+ # Set colors if they are to be enabled.
+ # shellcheck disable=2034
+ colors_enabled && colory="\033[1;33m" colorb="\033[1;34m" colre="\033[m" colbold="\033[1m"
}
# If the library is being called with its own name, run arguments.