From 597398a9db674b36c1efb6f7dbc5a2b8e53ffd32 Mon Sep 17 00:00:00 2001
From: "Slawomir Wojciech Wojtczak (vermaden)"
Date: Fri, 27 Apr 2012 06:56:51 +0200
Subject: [PATCH] initial commit
---
README | 555 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++
beadm | 215 ++++++++++++++++++++++
2 files changed, 770 insertions(+)
create mode 100644 README
create mode 100755 beadm
diff --git a/README b/README
new file mode 100644
index 0000000..ee63195
--- /dev/null
+++ b/README
@@ -0,0 +1,555 @@
+
+
+FreeBSD ZFS Madness
+Slawomir Wojtczak (vermaden)
+2012/04/27
+
+Some time ago I found a good, reliable way of using and installing FreeBSD
+and described it in my Modern FreeBSD Install [1] [2] HOWTO. Now, more
+then a year later I come back with my experiences about that setup and a
+proposal of newer and probably better way of doing it.
+
+
+
+1. Introduction
+
+Same as year ago, I assume that You would want to create fresh installation of
+FreeBSD using one or more hard disks, but also with (laptops) and without GELI
+based full disk encryption.
+
+This guide was written when FreeBSD 9.0 and 8.3 were available and definitely
+works for 9.0, but I did not try all this on the older 8.3, if You find some
+issues on 8.3, let me know I will try to address them in this guide.
+
+Earlier, I was not that confident about booting from the ZFS pool, but there
+is some very neat feature that made me think ZFS boot is now mandatory. If You
+just smiled, You know that I am thinking about Boot Environments feature
+from Illumos/Solaris systems.
In case You are not familiar with the Boot
+Environments feature, check the Managing Boot Environments with Solaris
+11 Express PDF white paper [3]. Illumos/Solaris has the
+beadm(1M) [4] utility and while Philipp Wuensche wrote the
+manageBE script as replacement [5], it uses older style used
+at times when OpenSolaris (and SUN) were still having a great time.
+
+I spent last couple of days writing an up-to-date replacement for FreeBSD
+compatible beadm utility, and with some tweaks from today I just made
+it available at SourceForge [6] if You wish to test it. Currently its
+about 200 lines long, si it should be pretty simple to take a look at it. I
+tried to make it as compatible as possible with the 'upstream' version, along
+with some small improvements, it currently supports basic functions like list,
+create, destroy and activate.
+
+# beadm
+usage:
+ beadm subcommand cmd_options
+
+ subcommands:
+
+ beadm activate beName
+ beadm create [-e nonActiveBe | beName@snapshot] beName
+ beadm create beName@snapshot
+ beadm destroy beName
+ beadm destroy beName@snapshot
+ beadm list
+
+There are several subtle differences between mine implementation and
+Philipp's one, he defines and then relies upon ZFS property called
+freebsd:boot-environment=1 for each boot environment, I do not set
+any other additional ZFS properties. There is already org.freebsd:swap
+property used for SWAP on FreeBSD, so we may use org.freebsd:be in the
+future, but is just a thought, right now its not used. My version also supports
+activating boot environments received with zfs recv command from other
+systems (it just updates appreciate /boot/zfs/zpool.cache file).
+My implementation is also style compatible with current Illumos/Solaris
+beadm(1M) which is like the example below.
+
+# beadm create -e default upgrade-test
+Created successfully
+
+# beadm list
+BE Active Mountpoint Space Policy Created
+default N / 1.06M static 2012-02-03 15:08
+upgrade-test R - 560M static 2012-04-24 22:22
+new - - 8K static 2012-04-24 23:40
+
+# zfs list -r sys/ROOT
+NAME USED AVAIL REFER MOUNTPOINT
+sys/ROOT 562M 8.15G 144K none
+sys/ROOT/default 1.48M 8.15G 558M legacy
+sys/ROOT/new 8K 8.15G 558M none
+sys/ROOT/upgrade-test 560M 8.15G 558M none
+
+# beadm activate default
+Activated successfully
+
+# beadm list
+BE Active Mountpoint Space Policy Created
+default NR / 1.06M static 2012-02-03 15:08
+upgrade-test - - 560M static 2012-04-24 22:22
+new - - 8K static 2012-04-24 23:40
+
+The boot environments are located in the same plase as in Illumos/Solaris, at
+pool/ROOT/environment place.
+
+
+
+2. Now You're Thinking with Portals
+
+The main purpose of the Boot Environments concept is to make all
+risky tasks harmless, to provide an easy way back from possible troubles.
+Think about upgrading the system to newer version, an update of 30+ installed
+packages to latest versions, testing software or various solutions before
+taking the final decision, and much more. All these tasks are now harmless
+thanks to the Boot Environments, but this is just the tip of the
+iceberg.
+
+You can now move desired boot environment to other machine, physical or
+virtual and check how it will behave there, check hardware support on the other
+hardware for example or make a painless hardware upgrade. You may also clone
+Your desired boot environment and ... start it as a Jail for some more
+experiments or move Your old physical server install into FreeBSD Jail because
+its not that heavily used anymore but it still have to be available.
+
+Other good example may be just created server on Your laptop inside
+VirtualBox virtual machine. After you finish the creation process and tests,
+You may move this boot environment to the real server and put it into
+production. Or even move it into VMware ESX/vSphere virtual machine and use
+it there.
+
+As You see the possibilities with Boot Environments are unlimited.
+
+
+
+
+3. The Install Process
+
+I created 3 possible schemas which should cover most demands, choose one
+and continue to the next step.
+
+3.1. Server with Two Disks
+
+I assume that this server has 2 disks and we will create ZFS mirror across
+them, so if any of them will be gone the system will still work as usual.
+I also assume that these disks are ada0 and ada1. If You
+have SCSI/SAS drives there, they may be named da0 and da1
+accordingly. The procedures below will wipe all data on these disks,
+You have been warned.
+
+
+- Boot from the FreeBSD USB/DVD.
+- Select the 'Live CD' option.
+- login: root
+- # sh
+- # DISKS="ada0 ada1"
+- # for I in ${DISKS}; do
+ > NUMBER=$( echo ${I} | tr -c -d '0-9' )
+ > gpart destroy -F ${I}
+ > gpart create -s GPT ${I}
+ > gpart add -t freebsd-boot -l bootcode${NUMBER} -s 128k ${I}
+ > gpart add -t freebsd-zfs -l sys${NUMBER} ${I}
+ > gpart bootcode -b /boot/pmbr -p /boot/gptzfsboot -i 1 ${I}
+ > done
+- # zpool create -f -o cachefile=/tmp/zpool.cache sys mirror /dev/gpt/sys*
+- # zfs set mountpoint=none sys
+- # zfs set checksum=fletcher4 sys
+- # zfs set atime=off sys
+- # zfs create sys/ROOT
+- # zfs create -o mountpoint=/mnt sys/ROOT/default
+- # zpool set bootfs=sys/ROOT/default sys
+- # cd /usr/freebsd-dist/
+- # for I in base.txz kernel.txz; do
+ > tar --unlink -xvpJf ${I} -C /mnt
+ > done
+- # cp /tmp/zpool.cache /mnt/boot/zfs/
+- # cat << EOF >> /mnt/boot/loader.conf
+ > zfs_load=YES
+ > vfs.root.mountfrom="zfs:sys/ROOT/default"
+ > EOF
+- # cat << EOF >> /mnt/etc/rc.conf
+ > zfs_enable=YES
+ > EOF
+- # :> /mnt/etc/fstab
+- # zfs umount -a
+- # zfs set mountpoint=legacy sys/ROOT/default
+- # reboot
+
+
+After these instructions and reboot we have these GPT partitions available,
+this example is on a 512MB disk.
+
+# gpart show
+=> 34 1048509 ada0 GPT (512M)
+ 34 256 1 freebsd-boot (128k)
+ 290 1048253 2 freebsd-zfs (511M)
+
+=> 34 1048509 ada1 GPT (512M)
+ 34 256 1 freebsd-boot (128k)
+ 290 1048253 2 freebsd-zfs (511M)
+
+# gpart list | grep label
+ label: bootcode0
+ label: sys0
+ label: bootcode1
+ label: sys1
+
+# zpool status
+ pool: sys
+ state: ONLINE
+ scan: none requested
+config:
+
+ NAME STATE READ WRITE CKSUM
+ sys ONLINE 0 0 0
+ mirror-0 ONLINE 0 0 0
+ gpt/sys0 ONLINE 0 0 0
+ gpt/sys1 ONLINE 0 0 0
+
+errors: No known data errors
+
+
+
+3.2. Server with One Disk
+
+If Your server configuration has only one disk, lets assume its
+ada0, then You need different points 5. and 7. to make, use these
+instead of the ones above.
+
+
+- # DISKS="ada0"
+
+
+
+- # zpool create -f -o cachefile=/tmp/zpool.cache sys /dev/gpt/sys*
+
+
+All other steps are the same.
+
+
+
+3.3. Read Warrior Laptop
+
+The procedure is quite diffrent for Laptop because we will use the full disk
+encryption mechanism provided by GELI and then setup the ZFS pool. Its not
+currently possible to boot off from the ZFS pool on top of encrypted GELI
+provider, so we will use setup similar to the Server with ... one but
+with additional local pool for /home and /root
+partitions. It will be password based and You will be asked to type-in that
+password at every boot. The install process is generally the same with new
+instructions added for the GELI encrypted local pool, I put them with
+different color to make the difference more visible.
+
+
+- Boot from the FreeBSD USB/DVD.
+- Select the 'Live CD' option.
+- login: root
+- # sh
+- # DISKS="ada0"
+- # for I in ${DISKS}; do
+ > NUMBER=$( echo ${I} | tr -c -d '0-9' )
+ > gpart destroy -F ${I}
+ > gpart create -s GPT ${I}
+ > gpart add -t freebsd-boot -l bootcode${NUMBER} -s 128k ${I}
+ > gpart add -t freebsd-zfs -l sys${NUMBER} -s 10G ${I}
+ > gpart add -t freebsd-zfs -l local${NUMBER} ${I}
+ > gpart bootcode -b /boot/pmbr -p /boot/gptzfsboot -i 1 ${I}
+ > done
+- # zpool create -f -o cachefile=/tmp/zpool.cache sys /dev/gpt/sys0
+- # zfs set mountpoint=none sys
+- # zfs set checksum=fletcher4 sys
+- # zfs set atime=off sys
+- # zfs create sys/ROOT
+- # zfs create -o mountpoint=/mnt sys/ROOT/default
+- # zpool set bootfs=sys/ROOT/default sys
+- # geli init -b -s 4096 -e AES-CBC -l 128 /dev/gpt/local0
+- # geli attach /dev/gpt/local0
+- # zpool create -f -o cachefile=/tmp/zpool.cache local /dev/gpt/local0.eli
+- # zfs set mountpoint=none local
+- # zfs set checksum=fletcher4 local
+- # zfs set atime=off local
+- # zfs create local/home
+- # zfs create -o mountpoint=/mnt/root local/root
+- # cd /usr/freebsd-dist/
+- # for I in base.txz kernel.txz; do
+ > tar --unlink -xvpJf ${I} -C /mnt
+ > done
+- # cp /tmp/zpool.cache /mnt/boot/zfs/
+- # cat << EOF >> /mnt/boot/loader.conf
+ > zfs_load=YES
+ > geom_eli_load=YES
+ > vfs.root.mountfrom="zfs:sys/ROOT/default"
+ > EOF
+- # cat << EOF >> /mnt/etc/rc.conf
+ > zfs_enable=YES
+ > EOF
+- # :> /mnt/etc/fstab
+- # zfs umount -a
+- # zfs set mountpoint=legacy sys/ROOT/default
+- # zfs set mountpoint=/home local/home
+- # zfs set mountpoint=/root local/root
+- # reboot
+
+
+After these instructions and reboot we have these GPT partitions available,
+this example is on a 4GB disk.
+
+# gpart show
+=> 34 8388541 ada0 GPT (4.0G)
+ 34 256 1 freebsd-boot (128k)
+ 290 2097152 2 freebsd-zfs (1.0G)
+ 2097442 6291133 3 freebsd-zfs (3G)
+
+# gpart list | grep label
+ label: bootcode0
+ label: sys0
+ label: local0
+
+# zpool status
+ pool: local
+ state: ONLINE
+ scan: none requested
+config:
+
+ NAME STATE READ WRITE CKSUM
+ sys ONLINE 0 0 0
+ gpt/local0.eli ONLINE 0 0 0
+
+errors: No known data errors
+
+ pool: sys
+ state: ONLINE
+ scan: none requested
+config:
+
+ NAME STATE READ WRITE CKSUM
+ sys ONLINE 0 0 0
+ gpt/sys0 ONLINE 0 0 0
+
+errors: No known data errors
+
+
+
+4. Basic Setup after Install
+
+
+ - Login as root with empty password.
+ login: root
+password: [ENTER]
+
+ - Create initial snapshot after install.
+ # zfs snapshot -r sys/ROOT/default@install
+
+ - Set new root password.
+ # passwd
+
+ - Set machine's hostname.
+ # echo hostname=hostname.domain.com >> /etc/rc.conf
+
+ - Set proper timezone.
+ # tzsetup
+
+ - Add some swap space.
+ If You used the Server with ... type, then use this to add swap.
+ # zfs create -V 1G -o org.freebsd:swap=on \
+ -o checksum=off \
+ -o sync=disabled \
+ -o primarycache=none \
+ -o secondarycache=none sys/swap
+# swapon /dev/zvol/sys/swap
+ If You used the Road Warrior Laptop one, then use this one below,
+ this was the swap space will also be encrypted.
+ # zfs create -V 1G -o org.freebsd:swap=on \
+ -o checksum=off \
+ -o sync=disabled \
+ -o primarycache=none \
+ -o secondarycache=none local/swap
+# swapon /dev/zvol/local/swap
+
+ - Create snapshot called configured or production
+ After You configured Your fresh FreeBSD system, added needed packages
+ and services, create snapshot called configured or
+ production so if You mess something, You can always go back in time
+ to bring working configuration back. mess something.
+ # zfs snapshot -r sys/ROOT/default@configured
+
+
+
+
+
+
+
+5. Enable Boot Environments
+
+Here are some simple instructions on how to download and enable the
+beadm command line utility for easy Boot Environments
+administration.
+
+# fetch https://downloads.sourceforge.net/project/beadm/beadm -o /usr/sbin/beadm
+# chmod +x /usr/sbin/beadm
+# rehash
+# beadm list
+BE Active Mountpoint Space Policy Created
+default NR / 592M static 2012-04-25 02:03
+
+
+
+6. WYSIWTF
+
+Now we have a working ZFS only FreeBSD system, I will put some example here
+about what You now can do with this type of installation and of course the
+Boot Environments feature.
+
+6.1. Create New Boot Environmnent Before Upgrade
+
+
+ - Create new environment from the current one.
+ # beadm create upgrade
+
+ - Activate it.
+ # beadm activate upgrade
+
+ - Reboot into it.
+ # shutdown -r now
+
+ - Mess with it.
+ You are now free to do anything You like fo or the upgrade process,
+ but even if You break everything, You still have a working default
+ working environment.
+
+
+
+6.2. Perform Upgrade within a Jail
+
+This concept is about creating new boot environment from the
+desired one, lets call it jailed, then start that new environment
+inside a FreeBSD Jail and perform upgrade there. After You have finished all
+tasks related to this upgrade and You are satisfied with the achieved results,
+shutdown that Jail, set the boot environment into that just upgraded Jail
+called jailed and reboot into just upgraded system without any
+risks.
+
+
+ - Create new boot environment called jailed.
+ # beadm create -e default jailed
+Created successfully
+
+ - Create /usr/jails directory.
+ # mkdir /usr/jails
+
+ - Set mount point of new boot environment to /usr/jails/jailed dir.
+ # zfs set mountpoint=/usr/jails/jailed sys/ROOT/jailed
+
+ - Enable FreeBSD Jails mechanism and the jailed Jail in
+ /etc/rc.conf file.
+ # cat << EOF >> /etc/rc.conf
+> jail_enable=YES
+> jail_list="jailed"
+> jail_jailed_rootdir="/usr/jails/jailed"
+> jail_jailed_hostname="jailed"
+> jail_jailed_ip="10.20.30.40"
+> jail_jailed_devfs_enable="YES"
+> EOF
+
+ - Start the Jails mechanism.
+ # /etc/rc.d/jail start
+Configuring jails:.
+Starting jails: jailed.
+
+ - Check if the jailed Jail started.
+ # jls
+ JID IP Address Hostname Path
+ 1 10.20.30.40 jailed /usr/jails/jailed
+
+ - Login into the jailed Jail.
+ # jexec 1 tcsh
+
+ - PERFORM ACTUAL UPGRADE.
+
+ - Stop the jailed Jail.
+ # /etc/rc.d/jail stop
+Stopping jails: jailed.
+
+ - Disable Jails mechanism in /etc/rc.conf file.
+# sed -i '' -E s/"^jail_enable.*$"/"jail_enable=NO"/g /etc/rc.conf
+
+ - Activate just upgraded jailed boot environment.
+ # bootfs-beadm activate jailed
+Activated successfully
+
+ - Restart the system into upgraded system.
+ # shutdown -r now
+
+
+
+6.3. Import Boot Environmnent from Other Machine
+
+Lets assume, that You need to upgrade or do some major modification to
+some of Your servers, You will then create new boot environment from the
+default one, move it to other 'free' machine, perform these tasks there
+and after everything is done, move the modified boot environment to the
+production without any risks. You may as well trasnport that environment
+into You laptop/workstation and upgrade it in a Jail like in step 6.2 of
+this guide.
+
+
+ - Create new environment on the production server.
+ # beadm create upgrade
+Created successfully.
+
+ - Send the upgrade environment to test server.
+ # zfs send sys/ROOT/upgrade | ssh TEST zfs recv -u sys/ROOT/upgrade
+
+ - Activate the upgrade environment on the test server.
+ # beadm activate upgrade
+Activated successfully.
+
+ - Reboot into the upgrade environment on the test server.
+ # shutdown -r now
+
+ - PERFORM ACTUAL UPGRADE AFTER REBOOT.
+
+ - Sent the upgraded upgrade environment onto production server.
+ # zfs send sys/ROOT/upgrade | ssh PRODUCTION zfs recv -u sys/ROOT/upgrade
+
+ - Activate upgraded upgrade environment on the production server.
+ # beadm activate upgrade
+Activated successfully.
+
+ - Reboot into the upgrade environment on the production server.
+ # shutdown -r now
+
+
+7. References
+
+
+
+The last part of the HOWTO remains the same as Year ago ...
+
+You can now add your users, services and packages as usual on any FreeBSD system, have fun ;)
+
+
+
+
+
+
+
+
diff --git a/beadm b/beadm
new file mode 100755
index 0000000..278fabf
--- /dev/null
+++ b/beadm
@@ -0,0 +1,215 @@
+#!/bin/sh
+
+# Copyright (c) 2012 Slawomir Wojciech Wojtczak (vermaden)
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that following conditions are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS 'AS IS' AND ANY
+# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY
+# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+unset LC_ALL
+unset LANG
+PATH=${PATH}:/bin:/usr/bin:/sbin:/usr/sbin
+
+[ $( uname -r | cut -d '.' -f1 ) -lt 8 ] \
+ && echo "ERROR: beadm only works on FreeBSD 8.0 or later."
+
+__usage() {
+ NAME=${0##*/}
+ echo "usage:"
+ echo " ${NAME} subcommand cmd_options"
+ echo
+ echo " subcommands:"
+ echo
+ echo " ${NAME} activate beName"
+ echo " ${NAME} create [-e nonActiveBe | beName@snapshot] beName"
+ echo " ${NAME} create beName@snapshot"
+ echo " ${NAME} destroy beName"
+ echo " ${NAME} destroy beName@snapshot"
+ echo " ${NAME} list"
+ exit 1
+}
+
+__be_exist() { # 1=DATASET
+ zfs list -H -o name ${1} 1> /dev/null 2> /dev/null || {
+ echo "ERROR: Boot environment '${1##*/}' does not exist"
+ exit 1
+ }
+}
+
+__be_snapshot() { # 1=DATASET/SNAPSHOT
+ echo "${1}" | grep -q "@"
+}
+
+__be_new() { # 1=SOURCE 2=TARGET
+ __be_snapshot && {
+ zfs clone ${1} ${2}
+ } || {
+ zfs list -H -o name ${1}@${2##*/} 1> /dev/null 2> /dev/null && {
+ echo "ERROR: Snapshot '${1}@${2##*/}' exists"
+ exit 1
+ }
+ zfs snapshot -r ${1}@${2##*/} 1> /dev/null 2> /dev/null || {
+ echo "ERROR: Cannot create snapshot '${1}@${2##*/}'"
+ exit 1
+ }
+ zfs clone ${1}@${2##*/} ${2}
+ }
+ BASENAME=${1##*/}
+ zfs list -H -o name -t filesystem -r ${1} \
+ | grep -v -E "${1}$" \
+ | while read I
+ do
+ DATASET=$( echo ${I} | sed s/"${POOL}\/ROOT\/${BASENAME}\/"//g )
+ zfs clone ${I}@${2##*/} ${2}/${DATASET}
+ done
+ echo "Created successfully"
+}
+
+ROOTFS=$( mount | awk '/ \/ / {print $1}' )
+echo ${ROOTFS} | grep -q -E "^/dev/" && {
+ echo "ERROR: This system does not boot from ZFS pool"
+ exit 1
+}
+POOL=$( echo ${ROOTFS} | awk -F '/' '{print $1}' )
+BOOTFS=$( zpool list -H -o bootfs ${POOL} )
+
+case ${1} in
+ (list) # --------------------------------------------------------------------
+ POOL_PREFIX="${POOL}/ROOT"
+ LIST=$( zfs list -o name,used,mountpoint,creation -s creation -H -d 1 -r ${POOL}/ROOT | grep -E "^${POOL}/ROOT/" )
+ WIDTH_CREATION=$( echo "${LIST}" | awk '{print $5}' | wc -L )
+ WIDTH_NAME=$( echo "${LIST}" | awk '{print $1}' | wc -L )
+ WIDTH_NAME=$(( ${WIDTH_NAME} - ${#POOL_PREFIX} - 1 ))
+ printf "%-${WIDTH_NAME}s %-6s %-10s %5s %6s %s\n" \
+ BE Active Mountpoint Space Policy Created
+ echo "${LIST}" \
+ | while read NAME USED MOUNTPOINT C R E A T
+ do
+ NAME=${NAME##*/}
+ unset ACTIVE
+ [ "${POOL_PREFIX}/${NAME}" = "${ROOTFS}" ] && ACTIVE="${ACTIVE}N"
+ [ "${POOL_PREFIX}/${NAME}" = "${BOOTFS}" ] && ACTIVE="${ACTIVE}R"
+ [ -z "${ACTIVE}" ] && ACTIVE="-"
+ printf "%-${WIDTH_NAME}s %-6s " ${NAME} ${ACTIVE}
+ case ${ACTIVE} in
+ (N|NR) MOUNT="/" ;;
+ (*) MOUNT="-" ;;
+ esac
+ printf "%-10s %5s %-6s " ${MOUNT} ${USED} "static"
+ date -j -f "%a %b %d %H:%M %Y" "${C} ${R} ${E} ${A} ${T}" +"%Y-%m-%d %H:%M"
+ done
+ ;;
+ (create) # ------------------------------------------------------------------
+ case ${#} in
+ (4)
+ [ ${2} = "-e" ] || __usage
+ __be_exist ${POOL}/ROOT/${3}
+ zfs list -H -o name ${POOL}/ROOT/${4} 2> /dev/null && {
+ echo "ERROR: Boot environment '${4}' already exists"
+ exit 1
+ }
+ __be_new ${POOL}/ROOT/${3} ${POOL}/ROOT/${4}
+ ;;
+ (2)
+ __be_snapshot ${2} && {
+ zfs snapshot ${POOL}/ROOT/${2} 2> /dev/null || {
+ echo "ERROR: Cannot create '${2}' snapshot"
+ exit 1
+ }
+ echo "Created successfully"
+ } || {
+ __be_new ${ROOTFS} ${POOL}/ROOT/${2}
+ }
+ ;;
+ (*)
+ __usage
+ ;;
+ esac
+ ;;
+ (activate) # ----------------------------------------------------------------
+ [ "${BOOTFS}" = "${POOL}/ROOT/${2}" ] || {
+ [ "${ROOTFS}" = "${POOL}/ROOT/${2}" ] || {
+ MNT="/tmp/BE"
+ mkdir -p ${MNT} || {
+ echo "ERROR: Cannot create '${MNT}' directory"
+ exit 1
+ }
+ zfs set mountpoint=${MNT} ${POOL}/ROOT/${2}
+ cp /boot/zfs/zpool.cache ${MNT}/boot/zfs/zpool.cache
+ sed -i '' -E s/"^vfs.root.mountfrom=.*$"/"vfs.root.mountfrom=\"zfs:${POOL}\/ROOT\/${2##*/}\""/g ${MNT}/boot/loader.conf
+ zfs set mountpoint=legacy ${POOL}/ROOT/${2}
+ }
+ zpool set bootfs=${POOL}/ROOT/${2} ${POOL} && {
+ } || {
+ echo "ERROR: Failed to activate '${POOL}/ROOT/${2}'"
+ exit 1
+ }
+ }
+ zfs list -H -o name -t filesystem -r ${POOL}/ROOT/${2} \
+ | while read I
+ do
+ zfs promote ${POOL}/ROOT/${2} 2> /dev/null
+ done
+ echo "Activated successfully"
+ ;;
+ (destroy) # ----------------------------------------------------------------
+ __be_exist ${POOL}/ROOT/${2}
+ [ "${BOOTFS}" = "${POOL}/ROOT/${2}" ] && {
+ echo "ERROR: '${POOL}/ROOT/${2}' is current active boot environment"
+ exit 1
+ }
+ echo "Are you sure you want to destroy '${2}'?"
+ echo -n "This action cannot be undone (y/[n]): "
+ read CHOICE
+ case ${CHOICE} in
+ (Y|y|[Yy][Ee][Ss])
+ __be_snapshot ${POOL}/ROOT/${2} && {
+ zfs destroy ${POOL}/ROOT/${2} 1> /dev/null 2> /dev/null || {
+ echo "ERROR: Snapshot '${2}' is origin for other boot environment(s)"
+ exit 1
+ }
+ } || {
+ ORIGINS=$( zfs list -r -H -o origin ${POOL}/ROOT/${2} )
+ zfs destroy ${POOL}/ROOT/${2} 1> /dev/null 2> /dev/null || {
+ zfs destroy -r ${POOL}/ROOT/${2} 2>&1 \
+ | grep "${POOL}/ROOT/" \
+ | grep -v "@" \
+ | while read I
+ do
+ zfs promote ${I} 2> /dev/null
+ done
+
+ echo "${ORIGINS}" \
+ | while read I
+ do
+ zfs destroy -r ${I} 2> /dev/null
+ done
+ }
+ }
+ echo "Destroyed successfully"
+ ;;
+ (*)
+ echo "'${2}' has not been destroyed"
+ ;;
+ esac
+ ;;
+ (*) # -----------------------------------------------------------------------
+ __usage
+ ;;
+esac