forked from Lainports/freebsd-ports
list : lists available builds clone : creates a new build by cloning a previous one portsupdate : update a ports tree to the latest ZFS snapshot srcupdate : update a src tree to the latest ZFS snapshot cleanup : clean up or remove a build on the clients destroy : remove a build on the server There is some trickiness here in that various commands either expect to run as root, or expect to run as a ports-* user. For the latter case we can easily use su to proxy as the ports user when running as root; for the former we use the buildproxy to validate and re-execute the command as root.
498 lines
11 KiB
Bash
Executable file
498 lines
11 KiB
Bash
Executable file
#!/bin/sh
|
|
|
|
# configurable variables
|
|
pb=/var/portbuild
|
|
|
|
# XXX unused
|
|
get_latest_snap() {
|
|
snap=$1
|
|
|
|
zfs list -rHt snapshot ${snap} | tail -1 | awk '{print $1}'
|
|
}
|
|
|
|
now() {
|
|
date +%Y%m%d%H%M%S
|
|
}
|
|
|
|
do_list() {
|
|
arch=$1
|
|
branch=$2
|
|
|
|
buildpar=/var/portbuild/${arch}/${branch}/builds
|
|
|
|
if [ -d ${buildpar} ]; then
|
|
snaps=$(cd ${buildpar}; ls -1d 2* 2> /dev/null)
|
|
echo "The following builds are active:"
|
|
echo ${snaps}
|
|
|
|
if [ -L ${buildpar}/latest -a -d ${buildpar}/latest/ ]; then
|
|
link=$(readlink ${buildpar}/latest)
|
|
link=${link%/}
|
|
link=${link##*/}
|
|
|
|
echo "Latest build is: ${link}"
|
|
fi
|
|
else
|
|
echo "No such build environment ${arch}/${branch}"
|
|
exit 1
|
|
fi
|
|
}
|
|
|
|
do_create() {
|
|
arch=$1
|
|
branch=$2
|
|
buildid=$3
|
|
builddir=$4
|
|
shift 4
|
|
|
|
zfs create -o mountpoint=${builddir} a/portbuild/${arch}/${branch}/${buildid} \
|
|
|| (echo "Couldn't create build"; exit 1)
|
|
|
|
echo "New build ID is ${buildid}"
|
|
|
|
}
|
|
|
|
do_clone() {
|
|
arch=$1
|
|
branch=$2
|
|
buildid=$3
|
|
builddir=$4
|
|
shift 4
|
|
|
|
if [ "$#" -gt 0 ]; then
|
|
newid=$1
|
|
shift
|
|
else
|
|
newid=$(now)
|
|
fi
|
|
|
|
tmp=$(realpath ${builddir})
|
|
tmp=${tmp%/}
|
|
newbuilddir="${tmp%/*}/${newid}"
|
|
|
|
oldfs=a/portbuild/${arch}/${buildid}
|
|
newfs=a/portbuild/${arch}/${newid}
|
|
|
|
zfs snapshot ${oldfs}@${newid}
|
|
zfs clone ${oldfs}@${newid} ${newfs}
|
|
zfs set mountpoint=${newbuilddir} ${newfs}
|
|
zfs promote ${newfs}
|
|
|
|
if zfs list -H -t filesystem ${oldfs}/ports 2> /dev/null; then
|
|
portsnap=${oldfs}/ports@${newid}
|
|
zfs snapshot ${portsnap}
|
|
zfs clone ${portsnap} ${newfs}/ports
|
|
zfs promote ${newfs}/ports
|
|
fi
|
|
|
|
if zfs list -H -t filesystem ${oldfs}/src 2> /dev/null; then
|
|
srcsnap=${oldfs}/src@${newid}
|
|
zfs snapshot ${srcsnap}
|
|
zfs clone ${srcsnap} ${newfs}/src
|
|
zfs promote ${newfs}/src
|
|
fi
|
|
|
|
if [ -d ${newbuilddir} ]; then
|
|
if [ ! -f ${pbab}/builds/previous/.keep ]; then
|
|
build destroy ${arch} ${branch} previous
|
|
fi
|
|
rm -f ${pbab}/builds/previous
|
|
mv ${pbab}/builds/latest ${pbab}/builds/previous
|
|
|
|
ln -sf ${newbuilddir} ${pbab}/builds/latest
|
|
fi
|
|
|
|
echo "New build ID is ${newid}"
|
|
}
|
|
|
|
do_portsupdate() {
|
|
arch=$1
|
|
branch=$2
|
|
buildid=$3
|
|
builddir=$4
|
|
shift 4
|
|
|
|
portsfs=a/portbuild/${arch}/${buildid}/ports
|
|
|
|
echo "================================================"
|
|
echo "Reimaging ZFS ports tree on ${builddir}/ports"
|
|
echo "================================================"
|
|
destroy_fs a/portbuild/${arch} ${buildid} /ports || exit 1
|
|
|
|
now=$(now)
|
|
zfs snapshot a/snap/ports@${now}
|
|
zfs clone a/snap/ports@${now} ${portsfs}
|
|
zfs set mountpoint=${builddir}/ports ${portsfs}
|
|
cp ${builddir}/ports/cvsdone ${builddir}
|
|
}
|
|
|
|
do_srcupdate() {
|
|
arch=$1
|
|
branch=$2
|
|
buildid=$3
|
|
builddir=$4
|
|
shift 4
|
|
|
|
srcfs=a/portbuild/${arch}/${buildid}/src
|
|
|
|
echo "================================================"
|
|
echo "Reimaging ZFS src tree on ${builddir}/src"
|
|
echo "================================================"
|
|
|
|
destroy_fs a/portbuild/${arch} ${buildid} /src || exit 1
|
|
|
|
case ${branch} in
|
|
8|8-exp)
|
|
srcbranch=HEAD
|
|
;;
|
|
*-exp)
|
|
srcbranch=${branch%-exp}
|
|
;;
|
|
*)
|
|
srcbranch=${branch}
|
|
esac
|
|
now=$(now)
|
|
|
|
zfs snapshot a/snap/src-${srcbranch}@${now}
|
|
zfs clone a/snap/src-${srcbranch}@${now} ${srcfs}
|
|
zfs set mountpoint=${builddir}/src ${srcfs}
|
|
|
|
}
|
|
|
|
cleanup_client() {
|
|
arch=$1
|
|
branch=$2
|
|
buildid=$3
|
|
mach=$4
|
|
arg=$5
|
|
|
|
# XXX use same exclusion protocol as claim-chroot
|
|
|
|
echo "Started cleaning up ${arch}/${branch} build ID ${buildid} on ${mach}"
|
|
|
|
test -f ${pb}/${arch}/portbuild.${mach} && . ${pb}/${arch}/portbuild.${mach}
|
|
|
|
# Kill off builds and clean up chroot
|
|
${pb}/scripts/dosetupnode ${arch} ${branch} ${buildid} ${mach} -nocopy -queue
|
|
|
|
if [ "${arg}" = "-full" ]; then
|
|
${ssh_cmd} ${client_user}@${mach} ${sudo_cmd} rm -rf ${pb}/${arch}/${branch}/builds/${buildid}/.ready ${pb}/${arch}/${branch}/builds/${buildid} /tmp/.setup-${buildid}
|
|
fi
|
|
echo "Finished cleaning up ${arch}/${branch} build ID ${buildid} on ${mach}"
|
|
|
|
}
|
|
|
|
do_cleanup() {
|
|
arch=$1
|
|
branch=$2
|
|
buildid=$3
|
|
builddir=$4
|
|
arg=$5
|
|
shift 5
|
|
|
|
for i in `cat ${pb}/${arch}/mlist`; do
|
|
cleanup_client ${arch} ${branch} ${buildid} ${i} ${arg} &
|
|
done
|
|
wait
|
|
}
|
|
|
|
do_upload() {
|
|
arch=$1
|
|
branch=$2
|
|
buildid=$3
|
|
builddir=$4
|
|
shift 4
|
|
|
|
echo "Not implemented yet"
|
|
exit 1
|
|
|
|
}
|
|
|
|
test_fs() {
|
|
local fs=$1
|
|
|
|
zfs list -Ht filesystem | awk '{print $1}' | grep -q "$fs"
|
|
|
|
}
|
|
|
|
|
|
get_latest_child() {
|
|
local fs=$1
|
|
|
|
# Return the child of this filesystem with lexicographically
|
|
# highest name
|
|
#
|
|
# XXX if a filesystem is cloned into a different prefix
|
|
# (e.g. different arch) then we may not get the most recent one
|
|
# but that should not happen.
|
|
zfs get -H -o name,value origin | grep ${fs} | sort | \
|
|
(while read zfs origin; do
|
|
if [ "${origin%@*}" = "${fs}" ]; then
|
|
child=${zfs}
|
|
fi
|
|
done; echo ${child})
|
|
}
|
|
|
|
get_parent() {
|
|
local fs=$1
|
|
|
|
# Check whether this filesystem has a parent
|
|
zfs get -H -o value origin ${fs} | \
|
|
(read snap;
|
|
case "${snap}" in
|
|
-|a/snap/*)
|
|
;;
|
|
*)
|
|
parent=${snap}
|
|
;;
|
|
esac; echo ${parent})
|
|
}
|
|
|
|
destroy_fs() {
|
|
fs=$1
|
|
buildid=$2
|
|
subfs=$3
|
|
|
|
fullfs=${fs}/${buildid}${subfs}
|
|
if test_fs "${fullfs}"; then
|
|
|
|
# We can destroy a leaf filesystem (having no dependent
|
|
# clones) with no further effort. However if we are
|
|
# destroying the root of the clone tree then we have to
|
|
# promote a child to be the new root.
|
|
#
|
|
# XXX In principle we might have to iterate until we end up as
|
|
# a leaf but I don't know if this can happen.
|
|
echo "Filesystem ${fullfs}"
|
|
child=$(get_latest_child ${fullfs})
|
|
parent=$(get_parent ${fullfs})
|
|
echo "Filesystem has parent ${parent}"
|
|
if [ -z "${child}" ]; then
|
|
echo "Filesystem is a leaf"
|
|
else
|
|
echo "Filesystem has latest child ${child}"
|
|
# Check whether filesystem is root
|
|
if [ -z "${parent}" ]; then
|
|
echo "Filesystem is root; promoting ${child}"
|
|
zfs promote ${child}
|
|
parent=$(get_parent ${fullfs})
|
|
echo "New parent is ${parent}"
|
|
else
|
|
echo "Filesystem has parent ${parent} and cannot be destroyed"
|
|
return 1
|
|
fi
|
|
fi
|
|
|
|
# We might have snapshots on the target filesystem, e.g. if it
|
|
# is both the head and tail of its clone tree. They should be
|
|
# unreferenced.
|
|
(zfs list -H -o name | grep "^${fullfs}@" | xargs -n 1 zfs destroy) || return 1
|
|
|
|
# The target filesystem should now be unreferenced
|
|
zfs destroy -f "${fullfs}" || return 1
|
|
|
|
# Clean up the initial snapshot(s) that were promoted onto a
|
|
# cloned filesystem. It could have been propagated several
|
|
# times so we don't know where it ended up. Therefore we
|
|
# can't match for the ${buildid} part of ${fullfs}.
|
|
#
|
|
# XXX might be doing a substring match of subfs but we can't
|
|
# prepend / because a null subfs will not match
|
|
|
|
# Destroy the origin snapshot, which should be unreferenced
|
|
if [ ! -z "${parent}" ]; then
|
|
zfs destroy -f ${parent} || return 1
|
|
fi
|
|
fi
|
|
}
|
|
|
|
do_destroy() {
|
|
arch=$1
|
|
branch=$2
|
|
buildid=$3
|
|
builddir=$4
|
|
shift 4
|
|
|
|
buildid=$(resolve ${pb} ${arch} ${branch} ${buildid})
|
|
if [ -z "${buildid}" ]; then
|
|
echo "Invalid build ID ${buildid}"
|
|
exit 1
|
|
fi
|
|
|
|
latestid=$(resolve ${pb} ${arch} ${branch} latest)
|
|
if [ "${buildid}" = "${latestid}" ]; then
|
|
echo "Cannot destroy latest build"
|
|
exit 1
|
|
fi
|
|
|
|
destroy_fs a/portbuild/${arch} ${buildid} /ports || exit 1
|
|
destroy_fs a/portbuild/${arch} ${buildid} /src || exit 1
|
|
destroy_fs a/portbuild/${arch} ${buildid} || exit 1
|
|
|
|
rmdir ${builddir}
|
|
|
|
}
|
|
|
|
# Run a command as root if running as user
|
|
# Authentication and command validation is taken care of by buildproxy
|
|
proxy_root() {
|
|
cmd=$1
|
|
arch=$2
|
|
branch=$3
|
|
buildid=$4
|
|
builddir=$5
|
|
shift 5
|
|
args=$@
|
|
|
|
id=$(id -u)
|
|
if [ ${id} != "0" ]; then
|
|
/var/portbuild/scripts/buildproxy-client "build ${cmd} ${arch} ${branch} ${buildid} ${args}"
|
|
error=$?
|
|
if [ ${error} -eq 254 ]; then
|
|
echo "Proxy error"
|
|
fi
|
|
else
|
|
eval "do_${cmd} ${arch} ${branch} ${buildid} ${builddir} ${args}"
|
|
error=$?
|
|
fi
|
|
|
|
exit ${error}
|
|
}
|
|
|
|
# Run a command as the ports-${arch} user if root
|
|
proxy_user() {
|
|
cmd=$1
|
|
arch=$2
|
|
branch=$3
|
|
buildid=$4
|
|
builddir=$5
|
|
shift 5
|
|
args=$@
|
|
|
|
id=$(id -u)
|
|
if [ ${id} != "0" ]; then
|
|
eval "do_${cmd} ${arch} ${branch} ${buildid} \"${builddir}\" ${args}"
|
|
error=$?
|
|
else
|
|
su ports-${arch} -c "build ${cmd} ${arch} ${branch} ${buildid} \"${builddir}\" ${args}"
|
|
error=$?
|
|
fi
|
|
|
|
exit ${error}
|
|
}
|
|
|
|
usage () {
|
|
echo "usage: build <command> <arch> <branch> [<buildid>] [<options> ...]"
|
|
exit 1
|
|
}
|
|
|
|
##################
|
|
|
|
if [ $# -lt 3 ]; then
|
|
usage
|
|
fi
|
|
|
|
cmd=$1
|
|
arch=$2
|
|
branch=$3
|
|
shift 3
|
|
|
|
. ${pb}/${arch}/portbuild.conf
|
|
. ${pb}/scripts/buildenv
|
|
|
|
pbab=${pb}/${arch}/${branch}
|
|
|
|
validate_env ${arch} ${branch} || exit 1
|
|
|
|
# Not every command requires a buildid as arg
|
|
if [ $# -ge 1 ]; then
|
|
buildid=$1
|
|
shift 1
|
|
|
|
# Most commands require a buildid that is valid on the server. The
|
|
# exception is "cleanup" which is cleaning up a client build that may
|
|
# already be destroyed on the server.
|
|
case "$cmd" in
|
|
cleanup)
|
|
# Resolve symlinks but don't bail if the build doesn't exist.
|
|
newbuildid=$(resolve ${pb} ${arch} ${branch} ${buildid})
|
|
if [ ! -z "${newbuildid}" -a "${newbuildid}" != "${buildid}" ]; then
|
|
echo "Resolved ${buildid} to ${newbuildid}"
|
|
buildid=${newbuildid}
|
|
|
|
builddir=$(realpath ${pbab}/builds/${buildid}/)
|
|
# We can't rely on buildenv for this code path
|
|
fi
|
|
;;
|
|
*)
|
|
newbuildid=$(resolve ${pb} ${arch} ${branch} ${buildid})
|
|
if [ -z "${newbuildid}" ]; then
|
|
echo "Build ID ${buildid} does not exist"
|
|
exit 1
|
|
fi
|
|
if [ ${newbuildid} != ${buildid} ]; then
|
|
echo "Resolved ${buildid} to ${newbuildid}"
|
|
buildid=${newbuildid}
|
|
fi
|
|
|
|
builddir=$(realpath ${pbab}/builds/${buildid}/)
|
|
|
|
buildenv ${pb} ${arch} ${branch} ${builddir}
|
|
;;
|
|
esac
|
|
fi
|
|
|
|
# Unprivileged commands
|
|
case "$cmd" in
|
|
list)
|
|
do_list ${arch} ${branch} $@
|
|
;;
|
|
create)
|
|
if [ -z "${buildid}" ]; then
|
|
buildid=$(now)
|
|
usage
|
|
fi
|
|
proxy_root create ${arch} ${branch} ${buildid} ${builddir} $@
|
|
;;
|
|
clone)
|
|
if [ -z "${buildid}" ]; then
|
|
usage
|
|
fi
|
|
proxy_root clone ${arch} ${branch} ${buildid} ${builddir} $@
|
|
;;
|
|
portsupdate)
|
|
if [ -z "${buildid}" ]; then
|
|
usage
|
|
fi
|
|
proxy_root portsupdate ${arch} ${branch} ${buildid} ${builddir} $@
|
|
;;
|
|
srcupdate)
|
|
if [ -z "${buildid}" ]; then
|
|
usage
|
|
fi
|
|
proxy_root srcupdate ${arch} ${branch} ${buildid} ${builddir} $@
|
|
;;
|
|
cleanup)
|
|
if [ -z "${buildid}" ]; then
|
|
usage
|
|
fi
|
|
# builddir may be null if cleaning up a destroyed build
|
|
proxy_user cleanup ${arch} ${branch} ${buildid} "${builddir}" $@
|
|
;;
|
|
upload)
|
|
if [ -z "${buildid}" ]; then
|
|
usage
|
|
fi
|
|
proxy_user upload ${arch} ${branch} ${buildid} ${builddir} $@
|
|
;;
|
|
destroy)
|
|
if [ -z "${buildid}" ]; then
|
|
usage
|
|
fi
|
|
proxy_root destroy ${arch} ${branch} ${buildid} ${builddir} $@
|
|
;;
|
|
*)
|
|
echo "Invalid command: $cmd"
|
|
exit 1
|
|
;;
|
|
esac
|