From e9c42fb35279dad468466689648d7055388e2f5a Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Antoine=20Beaupr=C3=A9?= <anarcat@debian.org>
Date: Tue, 1 Oct 2019 11:08:26 -0400
Subject: [PATCH] document some kvm stuff and lvm resizing

---
 tsa/howto/kvm.mdwn | 73 ++++++++++++++++++++++++++++++++++++++++++++++
 tsa/howto/lvm.mdwn | 72 +++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 145 insertions(+)
 create mode 100644 tsa/howto/kvm.mdwn

diff --git a/tsa/howto/kvm.mdwn b/tsa/howto/kvm.mdwn
new file mode 100644
index 00000000..ce852f9f
--- /dev/null
+++ b/tsa/howto/kvm.mdwn
@@ -0,0 +1,73 @@
+KVM is Linux's [Kernel-based Virtual Machine][] (not to be confused
+with a [KVM switch][]. It's the backing mechanism for our
+virtualization technologies. This page documents the internals of KVM
+and the configuration on some of our older nodes. Newer machines
+should be provisioned with [[ganeti]] on top and most documentation
+here should not be necessary in day-to-day Ganeti operations.
+
+[KVM switch]: https://en.wikipedia.org/wiki/KVM_switch
+[Kernel-based Virtual Machine]: https://en.wikipedia.org/wiki/Kernel-based_Virtual_Machine
+
+Tutorials
+=========
+
+Rebooting
+---------
+
+Rebooting should be done with a specific procedure, documented in [[upgrades]].
+
+Resizing disks
+---------------
+
+To resize a disk, you need to resize the [QCOW2][] image in the parent
+host.
+
+Before you do this, however, you might also have some wiggle room
+inside the guest itself, inside the LVM physical volume, see the
+output of `pvs` and the [[lvm]] cheat sheet.
+
+Once you are sure you need to resize the partition on the host, you
+need to use the `qemu-img` command to do the resize.
+
+For example, this will resize (grow!) the image to 50GB, assuming it
+was smaller before:
+
+[QCOW2]: https://en.wikipedia.org/wiki/Qcow
+
+    qemu-img resize /srv/vmstore/vineale.torproject.org/vineale.torproject.org-lvm 50G
+
+TODO: do we need to stop the host before this? how about
+repartitionning?
+
+To *shrink* an image, you need to use the `--shrink` option but, be
+careful: the underlying partitions and filesystems need to first be
+resized otherwise you will have data loss.
+
+Design
+======
+
+Disk allocation
+---------------
+
+Disks are allocated on a need-to basis on the KVM host, in the
+`/srv/vmstore`. Each disk is a file on the host filesystem, and
+underneath the guest can create its own partitions. Here is, for
+example, `vineale`'s disk which is currently taking 29GB:
+
+    root@vineale:/srv# df -h /srv
+    Sys. de fichiers           Taille Utilisé Dispo Uti% Monté sur
+    /dev/mapper/vg_vineale-srv    35G     29G  4,4G  87% /srv
+
+On the parent host, it looks like this:
+
+    root@macrum:~# du -h /srv/vmstore/vineale.torproject.org/vineale.torproject.org-lvm
+    29G	/srv/vmstore/vineale.torproject.org/vineale.torproject.org-lvm
+
+ie. only 29GB is in use. You can also see there's a layer of LVM
+volumes inside the guest, so the actual allocation is for 40GB:
+
+    root@vineale:/srv# pvs
+      PV         VG         Fmt  Attr PSize  PFree
+      /dev/sdb   vg_vineale lvm2 a--  40,00g 5,00g
+
+That 40GB size is allocated inside the `QCOW` image.
diff --git a/tsa/howto/lvm.mdwn b/tsa/howto/lvm.mdwn
index 60b6c721..3678de61 100644
--- a/tsa/howto/lvm.mdwn
+++ b/tsa/howto/lvm.mdwn
@@ -28,3 +28,75 @@ setup caching
     lvconvert --type cache-pool --cachemode writethrough --poolmetadata "$vg"/srv-cache-meta "$vg"/srv-cache
 
     lvconvert --type cache --cachepool "$vg"/srv-cache "$vg"/srv
+
+# Resizing
+
+Assume we want to grow this partition to take the available free space
+in the PV:
+
+    root@vineale:/srv# lvs
+      LV   VG         Attr       LSize  Pool Origin Data%  Meta%  Move Log Cpy%Sync Convert
+      srv  vg_vineale -wi-ao---- 35,00g                                                    
+    root@vineale:/srv# pvs
+      PV         VG         Fmt  Attr PSize  PFree
+      /dev/sdb   vg_vineale lvm2 a--  40,00g 5,00g
+    root@vineale:~# pvdisplay 
+      --- Physical volume ---
+      PV Name               /dev/sdb
+      VG Name               vg_vineale
+      PV Size               40,00 GiB / not usable 4,00 MiB
+      Allocatable           yes 
+      PE Size               4,00 MiB
+      Total PE              10239
+      Free PE               1279
+      Allocated PE          8960
+      PV UUID               CXKO15-Wze1-xY6y-rOO6-Tfzj-cDSs-V41mwe
+
+To resize the partition to take up all available free space, you
+should do the following:
+
+ 1. stop services and processes using the partition (will obviously vary):
+ 
+        service apache2 stop
+
+ 2. unmount the filesystem:
+ 
+        umount /srv
+
+ 3. check the filesystem:
+ 
+        fsck -y -f /dev/mapper/vg_vineale-srv
+
+ 4. extend the filesystem using the extent notation to take up all
+    available space:
+    
+        lvextend vg_vineale/srv -l +1279
+
+ 5. grow the filesystem:
+ 
+        resize2fs -p /dev/mapper/vg_vineale-srv
+
+ 6. recheck the filesystem:
+ 
+        fsck  -f -y /dev/mapper/vg_vineale-srv
+
+ 7. remount the filesystem and start processes:
+ 
+        mount /srv
+        service apache2 start
+
+Note: this assumes there is free space on the physical volume. If
+there isn't you will need to add disks to the volume group, and grow
+the physical volume. For example:
+
+    pvcreate /dev/md123
+    vgextend vg_vineale /dev/md123
+
+If the underlying disk was grown, which happens in virtual hosting
+environments, you can also just extend the physical volume:
+
+    pvextend /dev/sdb
+
+See also the [upstream documentation][].
+
+[upstream documentation]: http://www.tldp.org/HOWTO/LVM-HOWTO/extendlv.html
-- 
GitLab