diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..2d7750b --- /dev/null +++ b/.gitignore @@ -0,0 +1,2 @@ +/iso +.*.swp diff --git a/.travis.yml b/.travis.yml new file mode 100644 index 0000000..7e86a9b --- /dev/null +++ b/.travis.yml @@ -0,0 +1,2 @@ +language: bash + diff --git a/README.md b/README.md index 5b04430..05e1d48 100644 --- a/README.md +++ b/README.md @@ -1,89 +1,60 @@ -# Rpooler -## A ZFS rpool wrapper for the Ubuntu 18.04 Ubiquity Installer -This is a wrapper that automates the creation of a fully bootable zfs root pool with Ubuntu 18.04 installed. It was crafted off the step-by-step [HOWTO install Ubuntu 18.04 to a Whole Disk Native ZFS Root Filesystem using Ubiquity GUI installer](https://github.com/zfsonlinux/pkg-zfs/wiki/HOWTO-install-Ubuntu-18.04-to-a-Whole-Disk-Native-ZFS-Root-Filesystem-using-Ubiquity-GUI-installer). The goals are to further simplfy the installation process and encourage best practices through the guided process. - -Instructions: -1) Boot Ubuntu 18.04 Desktop Live CD -2) Select "Try Ubuntu" -3) Open terminal (Ctrl+Alt+t) -4) `wget https://raw.github.com/ghfields/rpooler/master/rpooler.sh` -5) `sudo bash rpooler.sh` - - -## What to expect when running script -``` -Installer script for ZFS whole disk installation using Ubuntu GUI (Ubiquity) ----------------------------------------------------------------------------- - What do you want to name your pool? -rpool - -These are the drives on your system: - /dev/disk/by-id/ata-VBOX_CD-ROM_VB2-01700376 - /dev/disk/by-id/ata-VBOX_HARDDISK_VB9c4c6292-31c83b83 - What vdev layout do you want to use? (hint: tab completion works): -/dev/disk/by-id/ata-VBOX_HARDDISK_VB9c4c6292-31c83b83 - - Which zpool & zfs options do you wish to set at creation? --o feature@multi_vdev_crash_dump=disabled -o feature@large_dnode=disabled -o feature@sha512=disabled -o feature@skein=disabled -o feature@edonr=disabled -o ashift=12 -O atime=off -O compression=lz4 -O normalization=formD -O recordsize=1M -O xattr=sa - -Zpool would create 'rpool' with the following layout: - - rpool - ata-VBOX_HARDDISK_VB9c4c6292-31c83b83 - - Does this look correct (y/n): -y - -The Ubiquity made swapfile will not function and will be removed. -Based on your system's 3.85 GB of RAM, Ubuntu suggests a swap of 2 GB. - What size, in GB, should the created swap zvol be? (0 for none): -2 -Zvol swap size: 2 GB - Is this correct (y/n): -y - -Configuring the Ubiquity Installer ----------------------------------- - 1) Choose any options you wish until you get to the 'Installation Type' screen. - 2) Select 'Erase disk and install Ubuntu' and click 'Continue'. - 3) Change the 'Select drive:' dropdown to '/dev/zd0 - 10.7 GB Unknown' and click 'Install Now'. - 4) A popup summarizes your choices and asks 'Write the changes to disks?'. Click 'Continue'. - 5) At this point continue through the installer normally. - 6) Finally, a message comes up 'Installation Complete'. Click the 'Continue Testing'. - This install script will continue. - -Press any key to launch Ubiquity. These instructions will remain visible in the terminal window. - - -====== -Ubiquity Launches -====== - -(Rsync output truncated) - -Setting up zfs-initramfs (0.7.5-1ubuntu16.2) ... -Processing triggers for libc-bin (2.27-3ubuntu1) ... -Processing triggers for initramfs-tools (0.130ubuntu3.1) ... -update-initramfs: Generating /boot/initrd.img-4.15.0-29-generic -cp: memory exhausted -Generating grub configuration file ... -Found linux image: /boot/vmlinuz-4.15.0-29-generic -Found initrd image: /boot/initrd.img-4.15.0-29-generic -Found memtest86+ image: /ROOT/ubuntu-1@/boot/memtest86+.elf -Found memtest86+ image: /ROOT/ubuntu-1@/boot/memtest86+.bin -done -Warning: The kernel is still using the old partition table. -The new table will be used at the next reboot or after you -run partprobe(8) or kpartx(8) -The operation has completed successfully. -Installing for i386-pc platform. -Installation finished. No error reported. - Would you like to create a snapshot before rebooting? : -y - -Script complete. Please reboot your computer to boot into your installation. -If first boot hangs, reset computer and try boot again. - - Do you want to restart now? -n -``` +# About + +zfs-install automates the creation of a fully bootable zfs root pool. + + +## Thanks + +Garrett Fields [original script](https://github.com/ghfields/rpooler) +Rui [vagrant apt cache](https://github.com/rgl/apt-cache-vagrant) + +## Roadmap +This project is in developement and unstable, please use rpooler in meantime. + +Please see wiki for general state of the project + +## zfs documentation + +aaron-toponce [General zfs introduction and guidelines](https://pthree.org/2012/04/17/install-zfs-on-debian-gnulinux/) + +video: + +Linda Kately [Open-ZFS Bootcamp](https://www.youtube.com/watch?v=mLbtJQmfumI&feature=youtu.be) + +## zfsonlinux community documentation for specific distributions. + +ubuntu-ubiquity [HOWTO install Ubuntu 18.04 to a Whole Disk Native ZFS Root Filesystem using Ubiquity GUI installer](https://github.com/zfsonlinux/pkg-zfs/wiki/HOWTO-install-Ubuntu-18.04-to-a-Whole-Disk-Native-ZFS-Root-Filesystem-using-Ubiquity-GUI-installer). + +## Goals +- Simplfy the installation process to the point of automation. +- Upstream and intergrate as is "legally" possible. I wish this script wasn't required. +- Encourage best practices through the guides practices. + +## Supported distributions +Currently only ubuntu using the ubiquity installer is the only option. + +- ubuntu/bionic + +TODO +- arch +- gentoo +- debian +- rhel +- centos +- opensuse +- linuxfromscratch + +## Instructions +1) Boot supported distribtion +2) run zfs-install.sh +3) answer any required questions. +4) Your done. + +## Contributions +Any and all contibutions in any form are encouraged and most welcome. + +There doesn't appear to be many people interested in this project, thats cool! +I would appreciate the time of any other zfs users. + +- What method do you use to "bootstrap" zfs for your use case ? +- I'm guessing most people are using packer to create custom iso's. is this Correct ? diff --git a/18.04 manual walkthrough.md b/doc/18.04 manual walkthrough.md similarity index 100% rename from 18.04 manual walkthrough.md rename to doc/18.04 manual walkthrough.md diff --git a/doc/fdisk_partition_types b/doc/fdisk_partition_types new file mode 100644 index 0000000..87563df --- /dev/null +++ b/doc/fdisk_partition_types @@ -0,0 +1,85 @@ +1 EFI System C12A7328-F81F-11D2-BA4B-00A0C93EC93B +2 MBR partition scheme 024DEE41-33E7-11D3-9D69-0008C781F39F +3 Intel Fast Flash D3BFE2DE-3DAF-11DF-BA40-E3A556D89593 +4 BIOS boot 21686148-6449-6E6F-744E-656564454649 +5 Sony boot partition F4019732-066E-4E12-8273-346C5641494F +6 Lenovo boot partition BFBFAFE7-A34F-448A-9A5B-6213EB736C22 +7 PowerPC PReP boot 9E1A2D38-C612-4316-AA26-8B49521E5A8B +8 ONIE boot 7412F7D5-A156-4B13-81DC-867174929325 +9 ONIE config D4E6E2CD-4469-46F3-B5CB-1BFF57AFC149 +10 Microsoft reserved E3C9E316-0B5C-4DB8-817D-F92DF00215AE +11 Microsoft basic data EBD0A0A2-B9E5-4433-87C0-68B6B72699C7 +12 Microsoft LDM metadata 5808C8AA-7E8F-42E0-85D2-E1E90434CFB3 +13 Microsoft LDM data AF9B60A0-1431-4F62-BC68-3311714A69AD +14 Windows recovery environment DE94BBA4-06D1-4D40-A16A-BFD50179D6AC +15 IBM General Parallel Fs 37AFFC90-EF7D-4E96-91C3-2D7AE055B174 +16 Microsoft Storage Spaces E75CAF8F-F680-4CEE-AFA3-B001E56EFC2D +17 HP-UX data 75894C1E-3AEB-11D3-B7C1-7B03A0000000 +18 HP-UX service E2A1E728-32E3-11D6-A682-7B03A0000000 +19 Linux swap 0657FD6D-A4AB-43C4-84E5-0933C84B4F4F +20 Linux filesystem 0FC63DAF-8483-4772-8E79-3D69D8477DE4 +21 Linux server data 3B8F8425-20E0-4F3B-907F-1A25A76F98E8 +22 Linux root (x86) 44479540-F297-41B2-9AF7-D131D5F0458A +23 Linux root (ARM) 69DAD710-2CE4-4E3C-B16C-21A1D49ABED3 +24 Linux root (x86-64) 4F68BCE3-E8CD-4DB1-96E7-FBCAF984B709 +25 Linux root (ARM-64) B921B045-1DF0-41C3-AF44-4C6F280D3FAE +26 Linux root (IA-64) 993D8D3D-F80E-4225-855A-9DAF8ED7EA97 +27 Linux reserved 8DA63339-0007-60C0-C436-083AC8230908 +28 Linux home 933AC7E1-2EB4-4F13-B844-0E14E2AEF915 +29 Linux RAID A19D880F-05FC-4D3B-A006-743F0F84911E +30 Linux extended boot BC13C2FF-59E6-4262-A352-B275FD6F7172 +31 Linux LVM E6D6D379-F507-44C2-A23C-238F2A3DF928 +32 FreeBSD data 516E7CB4-6ECF-11D6-8FF8-00022D09712B +33 FreeBSD boot 83BD6B9D-7F41-11DC-BE0B-001560B84F0F +34 FreeBSD swap 516E7CB5-6ECF-11D6-8FF8-00022D09712B +35 FreeBSD UFS 516E7CB6-6ECF-11D6-8FF8-00022D09712B +36 FreeBSD ZFS 516E7CBA-6ECF-11D6-8FF8-00022D09712B +37 FreeBSD Vinum 516E7CB8-6ECF-11D6-8FF8-00022D09712B +38 Apple HFS/HFS+ 48465300-0000-11AA-AA11-00306543ECAC +39 Apple UFS 55465300-0000-11AA-AA11-00306543ECAC +40 Apple RAID 52414944-0000-11AA-AA11-00306543ECAC +41 Apple RAID offline 52414944-5F4F-11AA-AA11-00306543ECAC +42 Apple boot 426F6F74-0000-11AA-AA11-00306543ECAC +43 Apple label 4C616265-6C00-11AA-AA11-00306543ECAC +44 Apple TV recovery 5265636F-7665-11AA-AA11-00306543ECAC +45 Apple Core storage 53746F72-6167-11AA-AA11-00306543ECAC +46 Solaris boot 6A82CB45-1DD2-11B2-99A6-080020736631 +47 Solaris root 6A85CF4D-1DD2-11B2-99A6-080020736631 +48 Solaris /usr & Apple ZFS 6A898CC3-1DD2-11B2-99A6-080020736631 +49 Solaris swap 6A87C46F-1DD2-11B2-99A6-080020736631 +50 Solaris backup 6A8B642B-1DD2-11B2-99A6-080020736631 +51 Solaris /var 6A8EF2E9-1DD2-11B2-99A6-080020736631 +52 Solaris /home 6A90BA39-1DD2-11B2-99A6-080020736631 +53 Solaris alternate sector 6A9283A5-1DD2-11B2-99A6-080020736631 +54 Solaris reserved 1 6A945A3B-1DD2-11B2-99A6-080020736631 +55 Solaris reserved 2 6A9630D1-1DD2-11B2-99A6-080020736631 +56 Solaris reserved 3 6A980767-1DD2-11B2-99A6-080020736631 +57 Solaris reserved 4 6A96237F-1DD2-11B2-99A6-080020736631 +58 Solaris reserved 5 6A8D2AC7-1DD2-11B2-99A6-080020736631 +59 NetBSD swap 49F48D32-B10E-11DC-B99B-0019D1879648 +60 NetBSD FFS 49F48D5A-B10E-11DC-B99B-0019D1879648 +61 NetBSD LFS 49F48D82-B10E-11DC-B99B-0019D1879648 +62 NetBSD concatenated 2DB519C4-B10E-11DC-B99B-0019D1879648 +63 NetBSD encrypted 2DB519EC-B10E-11DC-B99B-0019D1879648 +64 NetBSD RAID 49F48DAA-B10E-11DC-B99B-0019D1879648 +65 ChromeOS kernel FE3A2A5D-4F32-41A7-B725-ACCC3285A309 +66 ChromeOS root fs 3CB8E202-3B7E-47DD-8A3C-7FF2A13CFCEC +67 ChromeOS reserved 2E0A753D-9E48-43B0-8337-B15192CB1B5E +68 MidnightBSD data 85D5E45A-237C-11E1-B4B3-E89A8F7FC3A7 +69 MidnightBSD boot 85D5E45E-237C-11E1-B4B3-E89A8F7FC3A7 +70 MidnightBSD swap 85D5E45B-237C-11E1-B4B3-E89A8F7FC3A7 +71 MidnightBSD UFS 0394EF8B-237E-11E1-B4B3-E89A8F7FC3A7 +72 MidnightBSD ZFS 85D5E45D-237C-11E1-B4B3-E89A8F7FC3A7 +73 MidnightBSD Vinum 85D5E45C-237C-11E1-B4B3-E89A8F7FC3A7 +74 Ceph Journal 45B0969E-9B03-4F30-B4C6-B4B80CEFF106 +75 Ceph Encrypted Journal 45B0969E-9B03-4F30-B4C6-5EC00CEFF106 +76 Ceph OSD 4FBD7E29-9D25-41B8-AFD0-062C0CEFF05D +77 Ceph crypt OSD 4FBD7E29-9D25-41B8-AFD0-5EC00CEFF05D +78 Ceph disk in creation 89C57F98-2FE5-4DC0-89C1-F3AD0CEFF2BE +79 Ceph crypt disk in creation 89C57F98-2FE5-4DC0-89C1-5EC00CEFF2BE +80 OpenBSD data 824CC7A0-36A8-11E3-890A-952519AD3F61 +81 QNX6 file system CEF5A9AD-73BC-4601-89F3-CDEEEEE321A1 +82 Plan 9 partition C91818F9-8025-47AF-89D2-F030D7000C2C + + +Command (m for help): diff --git a/rpooler.sh b/rpooler.sh deleted file mode 100644 index b05b227..0000000 --- a/rpooler.sh +++ /dev/null @@ -1,186 +0,0 @@ -#!/bin/bash -green='\e[92m' -nocolor='\e[0m' -echo "" -echo "Installer script for ZFS whole disk installation using Ubuntu GUI (Ubiquity)" -echo "----------------------------------------------------------------------------" - -distver=$(lsb_release -cs) -if [ "$distver" != "bionic" ]; then - echo "This script requires Ubuntu 18.04 to run." - exit 1 -fi - -if [[ $EUID -ne 0 ]]; then - echo "This script must be run as root" - exit 1 -fi - -if !(apt update &> /dev/null && apt install -y zfsutils &> /dev/null); then - echo "Error installing zfsutils from the internet. Please check your connection." - exit 1 -fi - -while [[ $exitpoolselect == "" ]]; do - echo -e $green "What do you want to name your pool? " $nocolor - read -i "rpool" -e pool - echo "" - echo "These are the drives on your system:" - for i in $(ls /dev/disk/by-id/ -a |grep -v part |awk '{if(NR>2)print}');do echo -e ' \t' "/dev/disk/by-id/"$i;done - echo -e $green "What vdev layout do you want to use? (hint: tab completion works): " $nocolor - read -e layout - echo "" - echo -e $green "Which zpool & zfs options do you wish to set at creation? " $nocolor - read -i "-o feature@multi_vdev_crash_dump=disabled -o feature@large_dnode=disabled -o feature@sha512=disabled -o feature@skein=disabled -o feature@edonr=disabled -o ashift=12 -O atime=off -O compression=lz4 -O normalization=formD -O recordsize=1M -O xattr=sa" -e options - echo "" - echo -n "Zpool " - if (zpool create -nf $options $pool $layout); then - echo "" - while true; do - echo -e $green "Does this look correct (y/n):" $nocolor - read -i "y" -e yn - case $yn in - [Yy]* ) exitpoolselect="1"; break;; - [Nn]* ) break;; - * ) echo "Please answer yes or no.";; - esac - done - else - echo "" - echo "Your selections formed an invalid "zpool create" commmand. Please try again." - fi -done - - -systemramk=$(free -m | awk '/^Mem:/{print $2}') -systemramg=$(echo "scale=2; $systemramk/1024" | bc) -suggestswap=$(printf %.$2f $(echo "scale=2; sqrt($systemramk/1024)" | bc)) - -while [[ $exitfilesystemselect == "" ]]; do - echo "" - echo "The Ubiquity made swapfile will not function and will be removed." - echo "Based on your system's $systemramg GB of RAM, Ubuntu suggests a swap of $suggestswap GB." - echo -e $green "What size, in GB, should the created swap zvol be? (0 for none): " $nocolor - read -e -i $suggestswap swapzvol - echo "Zvol swap size: $swapzvol GB" - while true; do - echo -e $green "Is this correct (y/n):" $nocolor - read -i "y" -e yn - case $yn in - [Yy]* ) exitfilesystemselect="1"; break;; - [Nn]* ) break;; - * ) echo "Please answer yes or no.";; - esac - done -done - -if !(zpool create -f $options $pool $layout); then - echo "Error creating zpool. Terminating Script." - exit 1 -fi - -if !(zfs create -V 10G $pool/ubuntu-temp); then - echo "Error creating ZVOL. Terminating Script." - exit 1 -fi - -echo "" -echo "Configuring the Ubiquity Installer" -echo "----------------------------------" -echo -e ' \t' "1) Choose any options you wish until you get to the 'Installation Type' screen." -echo -e ' \t' "2) Select 'Erase disk and install Ubuntu' and click 'Continue'." -echo -e ' \t' "3) Change the 'Select drive:' dropdown to '/dev/zd0 - 10.7 GB Unknown' and click 'Install Now'." -echo -e ' \t' "4) A popup summarizes your choices and asks 'Write the changes to disks?'. Click 'Continue'." -echo -e ' \t' "5) At this point continue through the installer normally." -echo -e ' \t' "6) Finally, a message comes up 'Installation Complete'. Click the 'Continue Testing'." -echo -e ' \t' "This install script will continue." -echo "" -read -p "Press any key to launch Ubiquity. These instructions will remain visible in the terminal window." - -if !(ubiquity --no-bootloader); then - echo "Ubiquity Installer failed to complete. Terminating Script." - exit 1 -fi - -while [[ $exitrootselect == "" ]]; do - echo -e $green "Where on your pool do you want your root dataset? " $nocolor - echo -e "$pool\c" - read -i "/ROOT/ubuntu-1" -e root - echo "" - while true; do - echo -e $green "Create root dataset at $pool$root." $nocolor - echo -e $green "Is this correct (y/n):" $nocolor - read -i "y" -e yn - case $yn in - [Yy]* ) exitrootselect="1"; break;; - [Nn]* ) break;; - * ) echo "Please answer yes or no.";; - esac - done -done - -zfs create -p $pool$root - -if !(rsync -avPX --exclude '/swapfile' /target/. /$pool$root/.); then - echo "Rsync failed to complete. Terminating Script." - exit 1 -fi - -for d in proc sys dev; do mount --bind /$d /$pool$root/$d; done - -cp /etc/resolv.conf /$pool$root/etc/resolv.conf -sed -e '/\s\/\s/ s/^#*/#/' -i /$pool$root/etc/fstab #My take at comment out / line. -sed -e '/\sswap\s/ s/^#*/#/' -i /$pool$root/etc/fstab #My take at comment out swap line. - -if [[ $swapzvol -ne 0 ]]; then - zfs create -V "$swapzvol"G -b $(getconf PAGESIZE) -o compression=zle \ - -o logbias=throughput -o sync=always \ - -o primarycache=metadata -o secondarycache=none \ - -o com.sun:auto-snapshot=false $pool/swap - mkswap -f /dev/zvol/$pool/swap - echo RESUME=none > /$pool$root/etc/initramfs-tools/conf.d/resume - echo /dev/zvol/$pool/swap none swap defaults 0 0 >> /$pool$root/etc/fstab -fi - -chroot /$pool$root apt update -chroot /$pool$root apt install -y zfs-initramfs -chroot /$pool$root update-grub -drives="$(echo $layout | sed 's/\S*\(mirror\|raidz\|log\|spare\|cache\)\S*//g')" -for i in $drives; do - chroot /$pool$root sgdisk -a1 -n2:512:2047 -t2:EF02 $i - chroot /$pool$root grub-install $i - done - -umount -R /$pool$root -zfs set mountpoint=/ $pool$root - -while true; do - echo -e $green 'Would you like to create a snapshot before rebooting? : ' $nocolor - read -i "y" -e yn - case $yn in - [Yy]* ) zfs snapshot $pool$root@install-pre-reboot; break;; - [Nn]* ) break;; - * ) echo "Please answer yes or no.";; - esac - -done -swapoff -a -umount /target -zfs destroy $pool/ubuntu-temp -zpool export $pool -echo "" -echo "Script complete. Please reboot your computer to boot into your installation." -echo "If first boot hangs, reset computer and try boot again." -echo "" - -while true; do - echo -e $green 'Do you want to restart now? ' $nocolor - read -i "y" -e yn - case $yn in - [Yy]* ) shutdown -r 0; break;; - [Nn]* ) break;; - * ) echo "Please answer yes or no.";; - esac - -done -exit 0 diff --git a/src/ubuntu-debootstrap/build-livecd.sh b/src/ubuntu-debootstrap/build-livecd.sh new file mode 100755 index 0000000..6d9ea12 --- /dev/null +++ b/src/ubuntu-debootstrap/build-livecd.sh @@ -0,0 +1,184 @@ +#/bin/sh + +# ubuntu live environment build script +# + ZFS +# + +echo "This script is in development! please read source." +exit + +set -e + +export _PATH="$(pwd)" +export ARCH=amd64 +export RELEASE='bionic' +export RELEASE_VERISON='18.04' +export IMAGE_NAME='x' +export RELEASE_ID='9D1A0061' + +_install_depends() { + sudo apt-get install debootstrap syslinux squashfs-tools genisoimage netpbm +} + +_setup_chroot() { + mkdir -v -p "$_PATH/build/chroot" + sudo debootstrap --arch=$ARCH $RELEASE "$_PATH/build/chroot" +} + +_bind_dev() { + sudo mount --bind /dev "$_PATH/build/chroot/dev" +} + +_unbind_dev() { + sudo umount "$_PATH/build/chroot/dev" +} + +_setup_inet() { + sudo cp -v /etc/hosts "$_PATH/build/chroot/etc/hosts" + sudo cp -v /etc/resolv.conf "$_PATH/build/chroot/etc/resolv.conf" + sudo cp -v /etc/apt/sources.list "$_PATH/build/chroot/etc/apt/sources.list" +} + +_bug_430224() +{ # service running in chroot issue-upstart + ln -v -s /bin/true /sbin/initctl +} + +_setup_env() { + _bind_dev + # backup initctl + cp -v "$_PATH/build/chroot/sbin/initctl" "$_PATH/cfg/initctl.backup" + + # login to chroot environment + sudo chroot chroot + + # mount sys paths + mount none -v -t proc /proc + mount none -v -t sysfs /sys + mount none -v -t devpts /dev/pts + + # environment variables + export HOME=/root + export LC_ALL=C + + # apt + apt-key adv --keyserver keyserver.ubuntu.com --recv-keys $RELEASE_ID + apt-get update + apt-get install --yes dbus + dbus-uuidgen > /var/lib/dbus/machine-id + dpkg-divert --local --rename --add /sbin/initctl + + _bug_430224 + + apt-get install --yes ubuntu-standard casper lupin-casper + apt-get install --yes discover laptop-detect os-prober + apt-get install --yes linux-generic + apt-get install --yes ubiquity-frontend-gtk + + # Cleanup + rm /var/lib/dbus/machine-id + + # remove initctl diversion + rm /sbin/initctl + dpkg-divert --rename --remove /sbin/initctl + + if [ -f "/sbin/initctl" ]; then + read -p "restore initctl." + exit + fi + + # remove old kernal + ls -v /boot/vmlinuz-2.6.**-**-generic > old-kernal + sum=$(cat old-kernal | grep '[^ ]' | wc -l) + + if [ $sum -gt 1 ]; then + dpkg -l 'linux-*' | sed '/^ii/!d;/'"$(uname -r | sed "s/\(.*\)-\([^0-9]\+\)/\1/")"'/d;s/^[^ ]* [^ ]* \([^ ]*\).*/\1/;/[0-9]/!d' | xargs sudo apt-get -y purge + fi + + rm -v old-kernal + + # cleanup and unmounts + apt-get clean + rm -rf /tmp/* + + rm /etc/resolv.conf + + umount -lf /proc + umount -lf /sys + umount -lf /dev/pts + read -p "finished." + exit + read -p "exited." +} + +_build_image() { + mkdir -v -p "$_PATH/image/"{casper,isolinux,install} + + # kernal/initrd + cp "$_PATH/build/chroot/boot/vmlinuz-2.6.**-**-generic" "$_PATH/image"/casper/vmlinuz + cp "$_PATH/build/chroot/boot/initrd.img-2.6.**-**-generic" "$_PATH/image"/casper/initrd.lz + + # isolinux + cp /usr/lib/ISOLINUX/isolinux.bin "$_PATH/image"/isolinux/ + cp /usr/lib/syslinux/modules/bios/ldlinux.c32 "$_PATH/image"/isolinux/ + + # memtest + cp /boot/memtest86+.bin "$_PATH/image"/install/memtest + + # boot-time instructions + cp -v "$_PATH/cfg/isolinux.txt" "$_PATH/image/isolinux/" + + # boot loader configuration + cp -v "$_PATH/cfg/isolinux.cfg" "$_PATH/image/isolinux/" + + # compress chroot + sudo mksquashfs chroot "$_PATH/image/casper/filesystem.squashfs" + + # filesize + printf $(sudo du -sx --block-size=1 "$_PATH/build/chroot" | cut -f1) > "$_PATH/image/casper/filesystem.size" + + # create disk defines + cp -v "$_PATH/cfg/README.diskdefines" "$_PATH/image/README.diskdefines" + + # cfg ubuntu remix + touch "$_PATH/image/ubuntu" + + mkdir -v "$_PATH/image/.disk" + touch "$_PATH/image/base_installable" + echo "full_cd/single" > "$_PATH/image/cd_type" + echo "Ubuntu Remix $RELEASE_VERSION" > "$_PATH/image/info" + echo "$RELEASE_URL" > "$_PATH/image/release_notes_url" + + # calc md5 + sudo -s + (find "$_PATH/image/" -type f -print0 | xargs -0 md5sum | grep -v "\./md5sum.txt" > "$_PATH/image/md5sum.txt") + exit + + # create iso + sudo mkisofs -r -V "$IMAGE_NAME" \ + -cache-inodes -J -l \ + -b "$_PATH/image/isolinux/isolinux.bin" \ + -c "$_PATH/image/isolinux/boot.cat \ + -no-emul-boot \ + -boot-load-size 4 \ + -boot-info-table \ + -o "$_PATH/image/ubuntu-remix.iso "$_PATH/image" +} + +if [ -d "$_PATH/build"]; then + echo 'build exists.' + exit +else + _setup_chroot + _bind_dev + _setup_inet + _setup_env + _unbind_dev +fi + +read -p "Build image" + +if [ "$REPLY" == 'yes']; then + _build_image +fi + diff --git a/src/ubuntu-debootstrap/chroot_mount b/src/ubuntu-debootstrap/chroot_mount new file mode 100644 index 0000000..93d1477 --- /dev/null +++ b/src/ubuntu-debootstrap/chroot_mount @@ -0,0 +1,3 @@ +mount --rbind /dev /mnt/dev +mount --rbind /proc /mnt/proc +mount --rbind /sys /mnt/sys diff --git a/src/ubuntu-debootstrap/chroot_unmount b/src/ubuntu-debootstrap/chroot_unmount new file mode 100644 index 0000000..21f8ef1 --- /dev/null +++ b/src/ubuntu-debootstrap/chroot_unmount @@ -0,0 +1,3 @@ + +mount | grep -v zfs | tac | awk '/\/mnt/ {print $3}' | xargs -i{} umount -lf {} +zpool export $ZPOOL_NAME diff --git a/src/ubuntu-debootstrap/stage-1_setup b/src/ubuntu-debootstrap/stage-1_setup new file mode 100644 index 0000000..5ce2eb4 --- /dev/null +++ b/src/ubuntu-debootstrap/stage-1_setup @@ -0,0 +1,5 @@ + +apt-add-repository universe +apt update + +apt install --yes debootstrap gdisk zfs-initramfs mdadm vim diff --git a/src/ubuntu-debootstrap/stage-2_setup-zfs b/src/ubuntu-debootstrap/stage-2_setup-zfs new file mode 100644 index 0000000..dc49d9d --- /dev/null +++ b/src/ubuntu-debootstrap/stage-2_setup-zfs @@ -0,0 +1,41 @@ +#!/bin/sh + +set -x +set -e + +# Format disk +#ls /dev/disk/by-id/ + +export DISK_ID=ata-HGST_HTS721010A9E630_JR1000BN2PDN3E +export ZPOOL_NAME=nomad + +sgdisk --zap-all "/dev/disk/by-id/${DISK_ID}" + +# Run this if you need legacy (BIOS) booting: +#sgdisk -a1 -n2:34:2047 -t2:EF02 /dev/disk/by-id/scsi-SATA_disk1 + +#Run this for UEFI booting (for use now or in the future): +sgdisk -n3:1M:+512M -t3:EF00 "/dev/disk/by-id/$DISK_ID" +sgdisk -n1:0:0 -t1:BF01 "/dev/disk/by-id/$DISK_ID" + +zpool create -f -o ashift=12 -O atime=off -O canmount=off -O compression=lz4 -O normalization=formD -O xattr=sa -O mountpoint=/ -R /mnt nomad /dev/disk/by-id/ata-HGST_HTS721010A9E630_JR1000BN2PDN3E-part1 + +zfs create -o canmount=off -o mountpoint=none $ZPOOL_NAME/ROOT + +zfs create -o canmount=noauto -o mountpoint=/ $ZPOOL_NAME/ROOT/ubuntu +zfs mount $ZPOOL_NAME/ROOT/ubuntu + +zfs create -o setuid=off $ZPOOL_NAME/home +zfs create -o mountpoint=/root $ZPOOL_NAME/home/root +zfs create -o canmount=off -o setuid=off -o exec=off $ZPOOL_NAME/var +zfs create -o com.sun:auto-snapshot=false $ZPOOL_NAME/var/cache +zfs create -o acltype=posixacl -o xattr=sa $ZPOOL_NAME/var/log +zfs create $ZPOOL_NAME/var/spool +zfs create -o com.sun:auto-snapshot=false -o exec=on $ZPOOL_NAME/var/tmp + +zfs create -o com.sun:auto-snapshot=false \ + -o mountpoint=/var/lib/docker $ZPOOL_NAME/var/docker + +zfs create -o com.sun:auto-snapshot=false \ + -o setuid=off $ZPOOL_NAME/tmp +chmod 1777 /mnt/tmp diff --git a/src/ubuntu-debootstrap/stage-3_debootstrap b/src/ubuntu-debootstrap/stage-3_debootstrap new file mode 100644 index 0000000..05c0127 --- /dev/null +++ b/src/ubuntu-debootstrap/stage-3_debootstrap @@ -0,0 +1,3 @@ +chmod 1777 /mnt/var/tmp +debootstrap bionic /mnt +zfs set devices=off $ZPOOL_NAME diff --git a/src/ubuntu-debootstrap/stage-4_config-system b/src/ubuntu-debootstrap/stage-4_config-system new file mode 100644 index 0000000..a275336 --- /dev/null +++ b/src/ubuntu-debootstrap/stage-4_config-system @@ -0,0 +1,41 @@ +read -p "HOSTNAME=" HOSTNAME + +echo "$HOSTNAME" > /mnt/etc/hostname + +cat << EOF >> "/mnt/etc/hosts" +127.0.0.1 $HOSTNAME +EOF + +#ip addr show + +#read -p "NET_NAME=" NET_NAME + +#cat < "/mnt/etc/netplan/$NET_NAME.yml" +#network: +# version: 2 +# ethernets: +# $NET_NAME: +# dhcp4: true +#EOF + +cat << EOF > "/mnt/etc/netplan/01-netcfg.yaml" +network: + version: 2 + renderer: NetworkManager +EOF + +cat < /mnt/etc/apt/sources.list +deb http://archive.ubuntu.com/ubuntu bionic main universe +deb-src http://archive.ubuntu.com/ubuntu bionic main universe + +deb http://security.ubuntu.com/ubuntu bionic-security main universe +deb-src http://security.ubuntu.com/ubuntu bionic-security main universe + +deb http://archive.ubuntu.com/ubuntu bionic-updates main universe +deb-src http://archive.ubuntu.com/ubuntu bionic-updates main universe +EOF + +cat /mnt/etc/netplan/$NET_NAME.yml +cat /mnt/etc/apt/sources.list + + diff --git a/src/ubuntu-debootstrap/stage-5_chroot_setup b/src/ubuntu-debootstrap/stage-5_chroot_setup new file mode 100644 index 0000000..233de1a --- /dev/null +++ b/src/ubuntu-debootstrap/stage-5_chroot_setup @@ -0,0 +1,62 @@ +#!/bin/sh + +# Chroot into system and configure basic system + +#chroot /mnt /bin/bash --login + +DISK_ID=ata-HGST_HTS721010A9E630_JR1000BN2PDN3E +ZPOOL_NAME=nomad + +ln -s /proc/self/mounts /etc/mtab +apt update + +dpkg-reconfigure locales LANG=en_US.UTF-8 + +# echo "America/Eastern" | sudo tee /etc/timezone +#sudo dpkg-reconfigure --frontend noninteractive tzdata + +dpkg-reconfigure tzdata +apt install --yes --no-install-recommends linux-image-generic +apt install --yes zfs-initramfs + +# Install Grub +apt install dosfstools +mkdosfs -F 32 -n EFI "/dev/disk/by-id/${DISK_ID}-part3" +mkdir /boot/efi +echo PARTUUID=$(blkid -s PARTUUID -o value \ + /dev/disk/by-id/${DISK_ID}-part3) \ + /boot/efi vfat noatime,nofail,x-systemd.device-timeout=1 0 1 >> /etc/fstab +mount /boot/efi +apt install --yes grub-efi-amd64 + +# Setup system groups +addgroup --system lpadmin +addgroup --system sambashare +passwd + +# Fix mount ordering +zfs set mountpoint=legacy $ZPOOL_NAME/var/log +zfs set mountpoint=legacy $ZPOOL_NAME/var/tmp +cat << EOF >> /etc/fstab +$ZPOOL_NAME/var/log /var/log zfs noatime,nodev,noexec,nosuid 0 0 +$ZPOOL_NAME/var/tmp /var/tmp zfs noatime,nodev,nosuid 0 0 +EOF + +zfs set mountpoint=legacy $ZPOOL_NAME/tmp +cat << EOF >> /etc/fstab +$ZPOOL_NAME/tmp /tmp zfs noatime,nodev,nosuid 0 0 +EOF + +grub-probe / +#test for zfs + +update-initramfs -u -k all +update-grub + +grub-install --target=x86_64-efi --efi-directory=/boot/efi \ + --bootloader-id=ubuntu --recheck --no-floppy + +ls /boot/grub/*/zfs.mod +#check zfs module + +zfs snapshot $ZPOOL_NAME/ROOT/ubuntu@install diff --git a/src/ubuntu-debootstrap/stage-6_user_setup b/src/ubuntu-debootstrap/stage-6_user_setup new file mode 100644 index 0000000..7318a6e --- /dev/null +++ b/src/ubuntu-debootstrap/stage-6_user_setup @@ -0,0 +1,10 @@ +ZPOOL_NAME=nomad +USER_NAME=i + +zfs create $ZPOOL_NAME/home/$USER_NAME +adduser $USER_NAME +cp -a /etc/skel/.[!.]* /home/$USER_NAME +chown -R $USER_NAME:$USER_NAME /home/$USER_NAME + +usermod -a -G adm,cdrom,dip,lpadmin,plugdev,sambashare,sudo $USER_NAME + diff --git a/src/ubuntu-debootstrap/stage-7_swap b/src/ubuntu-debootstrap/stage-7_swap new file mode 100644 index 0000000..3f49d53 --- /dev/null +++ b/src/ubuntu-debootstrap/stage-7_swap @@ -0,0 +1,12 @@ +ZPOOL_NAME=nomad +ZPOOL_SWAP_SIZE='12G' +zfs create -V $ZPOOL_SWAP_SIZE -b $(getconf PAGESIZE) -o compression=zle \ + -o logbias=throughput -o sync=always \ + -o primarycache=metadata -o secondarycache=none \ + -o com.sun:auto-snapshot=false $ZPOOL_NAME/swap + +mkswap -f /dev/zvol/$ZPOOL_NAME/swap +echo /dev/zvol/$ZPOOL_NAME/swap none swap defaults 0 0 >> /etc/fstab +echo RESUME=none > /etc/initramfs-tools/conf.d/resume + +swapon -aV diff --git a/src/ubuntu-debootstrap/stage-8_pkg b/src/ubuntu-debootstrap/stage-8_pkg new file mode 100644 index 0000000..47f47d8 --- /dev/null +++ b/src/ubuntu-debootstrap/stage-8_pkg @@ -0,0 +1,4 @@ +apt dist-upgrade --yes +apt install --yes ubuntu-standard +apt install vim tmux i3 tilda git +apt install --yes xorg network-manager diff --git a/src/ubuntu-debootstrap/zpool_import b/src/ubuntu-debootstrap/zpool_import new file mode 100644 index 0000000..b18adf7 --- /dev/null +++ b/src/ubuntu-debootstrap/zpool_import @@ -0,0 +1,4 @@ +zpool export -a +zpool import -N -R /mnt nomad +zfs mount nomad/ROOT/ubuntu +zfs mount -a diff --git a/src/zfs-install.sh b/src/zfs-install.sh new file mode 100755 index 0000000..2158039 --- /dev/null +++ b/src/zfs-install.sh @@ -0,0 +1,574 @@ +#!/bin/bash + +# Author: Shaun Lloyd +# License: MIT +# Version: 0.7 +# About: Basic script for the installation of zfs as a root filesytem. +# Features: With help from vagrant,virtualbox,packer and a few others +# - install linux with zfs as root filesystem. +# - test zpool layouts +# +# FIXME grub-install fail. +# TODO Better disk menus. +# TODO stable vagrant builds. +# TODO fix --dry-run, --silent, --debug. zfs commands need to us _exec. +# TODO add _read() +# - Include timeouts, defaults, thresholds, warnings. +# TODO fix _echo() +# - formatting +# - add styles +# - add background +# TODO - add /bin, /pkg +# - add pkg-dev to vagrant + +set -e + +# root privilages required +[ "$UID" -eq 0 ] || exec sudo "$0" "$@" + +msg() +{ # standard message interface style copied from packer. + # msg + # opt: + # -e | --error Error + # -c | --cmd _exec stdout + # -d | --debug Debug + # -i | --info Notice + # -q | --question Question + + _TEXT='==> ' + _STYLE= + + while (( "$#" )); do + case "$1" in + -i | --info) _COLOR=yellow;; + -q | --question) _COLOR=green; _STYLE='--bold';; + -e | --error) _COLOR=red; _STYLE='--bold';; + -c | --command) _COLOR=blue; _STYLE='--bold';; + -d | --debug) _COLOR=purple; _STYLE='--bold';; + -*|--*=) msg -e "msg() unsupported flag $1";; + *) _TEXT+="$1 ";; + esac + shift + done + + case $_COLOR in + default) _COLOR='39';; + black) _COLOR='30';; white) _COLOR='97';; + red) _COLOR='31';; lred) _COLOR='91';; + green) _COLOR='32';; lgreen) _COLOR='92';; + yellow) _COLOR='33';; lyellow) _COLOR='93';; + blue) _COLOR='34';; lblue) _COLOR='94';; + purple) _COLOR='35';; lpurple) _COLOR='95';; + cyan) _COLOR='36';; lcyan) _COLOR='95';; + gray) _COLOR='37';; dgray) _COLOR='90';; + esac + case "$_STYLE" in + --bold) _STYLE='1;';; + esac + printf "\e[${_STYLE}${_COLOR}m${_TEXT}\e[0m\n" + +} + +_select_multi() +{ # _select_multi a b c ... + # TODO add comments to list + options=("${@:2}") + + menu() { + for i in ${!options[@]}; do + printf "%3d [%s] %s\n" $((i+1)) "${choices[i]:- }" "${options[i]}" + done + } + + while menu && read -rp "? " num && [[ "$num" ]]; do + for i in `seq 1 $( expr ${#options[@]} + 1 )`; do tput cuu1; tput el; done + [[ "$num" != *[![:digit:]]* ]] && + (( num > 0 && num <= ${#options[@]} )) || { continue; } + ((num--)); msg="${options[num]} was ${choices[num]:+un}checked" + [[ "${choices[num]}" ]] && choices[num]="" || choices[num]="+" + done + + _select= + for i in ${!options[@]}; do + [[ "${choices[i]}" ]] && _select+="${options[i]} " + done + export "$1"="$_select" +} + +_exec() +{ # _exec + # opt: set via cli, --dry-run --debug -l -c + CMD="$@" + msg -c "$CMD" + if [ "$OPT_DEBUG" == True ]; then + read -e -p "$ " -i "$CMD" _CMD + if [ "$_CMD" != "$CMD" ]; then + msg -c "$_CMD" + CMD=$_CMD + fi + fi + if [ "$OPT_DRYRUN" != True ]; then + if [ "$OPT_LOG_FILE" != '' ]; then + echo -e "$ $CMD" >> "$OPT_LOG_FILE" + bash -c "$CMD" 2>&1 | tee -a "$OPT_LOG_FILE" + else + bash -c "$CMD" + fi + fi + if [ "$OPT_LOG_CMD" != '' ]; then + echo -e "$CMD" >> "$OPT_LOG_CMD" + fi +} + +opt_cmdline() +{ # parse zfs-install.sh arguments + while (( "$#" )); do + case "$1" in + -d | --debug) OPT_DEBUG=True;; + -l | --log) + OPT_LOG_FILE="$2" + if [ -f "$OPT_LOG_FILE" ]; then + msg -q "log file exists" + read -e -p "Append, Delete ? [a/d] " -i 'a' + [[ "$REPLY" == 'd' ]] && echo '' > "$OPT_LOG_FILE" + fi;; + -c | --log-cmd) + OPT_LOG_CMD="$2" + if [ -f "$OPT_LOG_CMD" ]; then + msg -q "command log file exists" + read -e -p "Delete ? [d] " -i "d" + if [ $REPLY == 'd' ]; then + rm "$OPT_LOG_CMD" + else + msg -e "please choose another filename." + exit 1 + fi + fi + shift;; + --dry-run) OPT_DRYRUN=True;; + --help) + echo -e "\nzfs-install.sh \n" + echo -e "\t-d | --debug Interactive command execution." + echo -e "\t-l | --log Log stdout." + echo -e "\t-c | --log-cmd Log cmd sequence." + echo -e "\t--dry-run Process script but no command execution." + echo -e "\t--silent Fully automatic. ! NO IMPLEMENTED YET.\n"; + exit 1 + ;; + --password=*) OS_PASSWORD="${1#*=}";; + --disks=*) ZPOOL_DISKS="${1#*=}";; + --layout=*) ZPOOL_LAYOUT="${1#*=}";; + --root-ds=*) ZPOOL_ROOT_DS="${1#*=}";; + --root-fs=*) ZPOOL_ROOT_FS="${1#*=}";; + --pool-name=*) ZPOOL_POOL_NAME="${1#*=}";; + --swap) ZPOOL_SWAP=True;; + --swap-size=*) + if [ "${1#*=}" == '' ]; then + systemramk=$(free -m | awk '/^Mem:/{print $2}') + systemramg=$(echo "scale=2; $systemramk/1024" | bc) + suggestswap=$(printf %.$2f $(echo "scale=2; sqrt($systemramk/1024)" | bc)) + ZPOOL_SWAP_SIZE="$suggestswap" + else + ZPOOL_SWAP_SIZE="${1#*=}" + fi + ;; + -*|--*) msg -e "unsupported flag $1"; exit 1;; + esac + shift + done +} + +_zfs() +{ # basic zfs wrapper. + + # TODO better layout of partitions + # TODO basic used,avail stats per disk. + # TODO add support for VDEVS: file,mirror,raidz1/2/3,spare,cache,log + + case "$1" in + create) + # check options are set + [[ "${ZPOOL_LAYOUT}" == '' ]] && [[ "${ZPOOL_DISKS}" == '' ]] && disk select ZPOOL_DISKS + [[ "${ZPOOL_LAYOUT}" == '' ]] && _zfs layout ZPOOL_LAYOUT + [[ "${ZPOOL_ROOT_DS}" == '' ]] && read -e -p "ZPOOL_ROOT_DS=" ZPOOL_ROOT_DS + [[ "${ZPOOL_ROOT_FS}" == '' ]] && read -e -p "ZPOOL_ROOT_FS=" ZPOOL_ROOT_FS + [[ "${ZPOOL_POOL_NAME}" == '' ]] && read -ep "ZPOOL_POOL_NAME=" ZPOOL_POOL_NAME + + msg -i "ZPOOL: create" + msg -i "ZPOOL_LAYOUT=${ZPOOL_LAYOUT}" + msg -i "ZPOOL_ROOT_DS=${ZPOOL_ROOT_DS}" + msg -i "ZPOOL_ROOT_FS=${ZPOOL_ROOT_FS}" + msg -i "ZPOOL_POOL_NAME=${ZPOOL_POOL_NAME}" + msg -i "ZPOOL_SWAP=${ZPOOL_SWAP}" + msg -i "ZPOOL_SWAP_SIZE=${ZPOOL_SWAP_SIZE}" + + # create zpool + # FIXME need to get the boot partition ! + _exec "zpool create \ + -o ashift=12 \ + -o altroot=/mnt \ + -O atime=off \ + -O relatime=on \ + -O compression=lz4 \ + -O mountpoint=/$ZPOOL_POOL_NAME \ + -m none $ZPOOL_POOL_NAME $ZPOOL_LAYOUT" + + # create filesystem dataset for the root filesystem + _exec "zfs create \ + -o mountpoint=none \ + $ZPOOL_POOL_NAME/$ZPOOL_ROOT_DS" + + # create boot environment + _exec "zfs create \ + -o mountpoint=/ \ + $ZPOOL_POOL_NAME/$ZPOOL_ROOT_DS/$ZPOOL_ROOT_FS" + + _exec "zpool set bootfs=$ZPOOL_POOL_NAME/$ZPOOL_ROOT_DS/$ZPOOL_ROOT_FS $ZPOOL_POOL_NAME" + + if [ "$ZPOOL_ZVOL_SWAP" == True ]; then + _exec "zfs create \ + -V ${ZPOOL_ZVOL_SWAP_SIZE}G \ + -b $(getconf PAGESIZE) \ + -o compression=zle \ + -o logbias=throughput \ + -o sync=always \ + -o primarycache=metadata \ + -o secondarycache=none \ + -o com.sun:auto-snapshot=false \ + $ZPOOL_POOL_NAME/swap" + _exec "sleep 1" + _exec "mkswap -f /dev/zvol/$ZPOOL_POOL_NAME/swap" + fi + + # create os datasets + _exec "zfs create \ + -o mountpoint=/home \ + $ZPOOL_POOL_NAME/home" + _exec "zfs create \ + -o mountpoint=/usr \ + $ZPOOL_POOL_NAME/$ZPOOL_ROOT_DS/$ZPOOL_ROOT_FS/usr" + _exec "zfs create \ + -o mountpoint=/var \ + $ZPOOL_POOL_NAME/$ZPOOL_ROOT_DS/$ZPOOL_ROOT_FS/var" + _exec "zfs create \ + -o mountpoint=/var/tmp \ + -o setuid=off \ + $ZPOOL_POOL_NAME/$ZPOOL_ROOT_DS/$ZPOOL_ROOT_FS/var/tmp" + _exec "zfs create \ + -o mountpoint=/tmp \ + -o setuid=off \ + $ZPOOL_POOL_NAME/tmp" + + _exec "zfs set mountpoint=legacy $ZPOOL_POOL_NAME/tmp" + + _exec "zpool export $ZPOOL_POOL_NAME" + + # import and create cache file + _exec "zpool import -R /mnt $ZPOOL_POOL_NAME" + _exec "mkdir -p /mnt/etc/zfs" + _exec "zpool set cachefile=/mnt/etc/zfs/zpool.cache $ZPOOL_POOL_NAME" + + # list zfs config + _exec "zpool get all $ZPOOL_POOL_NAME" + _exec "zfs list -t all -o name,type,mountpoint,compress,exec,setuid,atime,relatime" + ;; + layout) + # basic layout setup + [[ "${ZPOOL_DISKS}" == '' ]] && disk select + msg -i "ZPOOL: Layout Configuration" + msg -i "TODO: Currently ZPOOL_LAYOUT is passed to zfs create." + msg -i "? mirror /dev/aaa /dev/bbb mirror /dev/ccc /dev/ddd" + read -e -p "ZPOOL_LAYOUT=" -i "${ZPOOL_DISKS}" + ;; + snapshot) + # basic snapshot support + # TODO recover first snapshot + # TODO setup auto snapshots on / /home etc + + while true; do + read -p "Create snapshot ?" -i "y" -e yn + case $yn in + [Yy]* ) + _exec "zfs snapshot ${ZPOOL_POOL_NAME}${ZPOOL_ROOT_FS}@install-pre-reboot" + break;; + [Nn]* ) break;; + * ) echo "Please answer yes or no.";; + esac + done + ;; + find) + # find existing zpools. + ZPOOL_EXISTS=False + ZPOOL_ROOT_DS_EXISTS=False + ZPOOL_ROOT_FS_EXISTS=False + ZPOOL_ROOT_DS_MOUNTED=False + ZPOOL_ROOT_FS_MOUNTED=False + + zpool_list=$(zfs list | tail -n +2 ) + if [ "$zpool_list" != '' ]; then + msg -i "ZPOOL: found." + ZPOOL_EXISTS=True + zpool list + + zpool_root=$(zfs list / | tail -n +2) + if [ "$zpool_root" != '' ]; then + msg -i "ZPOOL: ROOT_FS found." + ZPOOL_ROOT_DS_EXISTS=True + zfs list / + + zvol_root_mounted=$(zfs list -o mounted / | tail -n +2 | awk '{gsub(/ /, "", $0); print}') + if [ "$zvol_root_mounted" == 'yes' ]; then + ZPOOL_ROOT_DS_MOUNTED=True + msg -i "ZPOOL: ROOT_FS mounted." + msg -i "TODO live migration" + msg -i "TODO installation from ZFS active root" + msg -e "Active development, please see https://github.com/lloydie/zfs-install/issues/11" + else + msg -i "ZPOOL: ROOT_FS not mounted." + fi + fi + else + msg -i "ZPOOL: not found." + fi + ;; + esac +} + +disk() +{ # Basic physical / virtual disks config. + # TODO encryption + # FIXME disk destroy with fdisk, alllow for non gpt partition drives. + + case "$1" in + destroy) + # destroy partition tables + msg -i "DISK: destroy ${ZPOOL_DISKS}" + _exec "sgdisk --zap-all ${ZPOOL_DISKS}" + ;; + format) + if [ -d /sys/firmware/efi ]; then + # uefi + _exec "sgdisk -n3:1M:+512M -t3:EF00 ${ZPOOL_DISKS}" + else + # bios + _exec "sgdisk -a1 -n2:34:2047 -t2:EF02 ${ZPOOL_DISKS}" + fi + + # unencrypted volume + _exec "sgdisk -n1:0:0 -t1:BF01 $ZPOOL_DISKS" + ;; + list) + msg -i "DISK: List block devices." + lsblk -flp -o name,uuid,label,type,fstype,size,mountpoint,model + ;; + select) + # return available list of available drives. + disk-part-drive() + { # hack to get drive a partition is on. + part=${1} + part=${part#/dev/} + disk=$(readlink /sys/class/block/$part) + disk=${disk%/*} + disk=/dev/${disk##*/} + echo $disk + } + + disk_root=$(disk-part-drive $(lsblk -lo name,uuid,mountpoint --noheadings | awk '$3 == "/" {print $1}')) + for disk_name in $(lsblk -dpl -o name,fstype --noheadings | awk -v disk_root="${disk_root}" '!/iso9660/ && $0!~disk_root {print}'); do + disk_list+="$disk_name " + done + + disk_count=$(echo "$disk_list" | awk '{print NF}') + if [ "$disk_count" == '1' ]; then + msg -i "DISK: Single disk found, auto format." + export "$2"="$disk_list" + else + msg -q "DISK: Please select disks for root pool ?" + _select_multi "$2" $disk_list + fi + ;; + esac +} + +os-detect() +{ # simple os detection, linux obviously for starters. + # TODO debian + # TODO gentoo + # TODO centos + # TODO rhel + # TODO coreos + # TODO linuxfromscratch + if [ -f "/etc/lsb-release" ]; then + OS_DISTRIBUTOR=$(lsb_release -si) + OS_RELEASE=$(lsb_release -sr) + OS_CODENAME=$(lsb_release -sc) + OS_DESCRIPTION=$(lsb_release -sd) + fi + if [ "$OS_CODENAME" == '' ]; then + msg -e "Unsupported host operating system." + exit 1 + else + msg -i "OS_DISTRIBUTOR=$OS_DISTRIBUTOR" + msg -i "OS_CODENAME=$OS_CODENAME" + msg -i "OS_RELEASE=$OS_RELEASE" + msg -i "OS_DESCRIPTION=$OS_DESCRIPTION" + fi + [[ -d /sys/firmware/efi ]] && OS_BOOT_MODE=UEFI || OS_BOOT_MODE=BIOS +} + +os-install() +{ # Individual distribution installation + # os-install