This commit is contained in:
Andrew Wang 2021-09-17 16:42:15 -04:00
parent a1da443418
commit a3e11f1208
9 changed files with 122 additions and 85 deletions

View File

@ -47,12 +47,14 @@ ansible-playbook qemu-vm.yml
## VM Install Option 2 (kvm)
### Install Packages (debian)
**needs update**
```
$ apt install qemu-kvm libvirt-daemon virt-manager virt-viewer ansible cloud-image-utils
qemu-kvm libvirt-clients libvirt-daemon-system bridge-utils virtinst libvirt-daemon virt-manager
```
### Install Packages (archlinux)
**needs update**
```
$ pacman -S qemu libvirt virt-install virt-viewer ansible
```

View File

@ -2,21 +2,9 @@
- name: setup mirror vm
hosts: 127.0.0.1
tasks:
- name: ubuntu iso exists
stat:
path: "{{ playbook_dir }}/vm/ubuntu20_04.iso"
register: iso_exists
ignore_errors: true
- name: ubuntu iso seed exists
stat:
path: "{{ playbook_dir }}/vm/seed.qcow2"
register: seed_exists
ignore_errors: true
- name: mirror vm exists
command: "virsh dumpxml mirror > /dev/null 2>&1"
register: vm_exists
- name: mirbr0 network exists
command: "virsh net-dumpxml mirbr0 > /dev/null 2>&1"
register: net_exists
ignore_errors: true
- name: storage pool exists
@ -24,40 +12,37 @@
register: pool_exists
ignore_errors: true
- name: mirbr0 network exists
command: "virsh net-dumpxml mirbr0 > /dev/null 2>&1"
register: net_exists
- name: mirror vm exists
command: "virsh dumpxml mirror > /dev/null 2>&1"
register: vm_exists
ignore_errors: true
- name: create directory for vm
file:
path: "{{ playbook_dir }}/vm"
path: "{{ playbook_dir }}/vm/disks"
state: directory
recurse: yes
- name: fetch ubuntu iso
get_url:
url: https://releases.ubuntu.com/20.04.3/ubuntu-20.04.3-live-server-amd64.iso
dest: "{{ playbook_dir }}/vm/ubuntu20_04.iso"
when: iso_exists.stat.exists == false
# requires: cloud-localds (cloud-image-utils)
# Installing VMs from Ready Images
# https://www.x386.xyz/index.php/2021/01/06/kvm-on-ubuntu-server-1/
# apply network config
# --network-config {{ playbook_dir }}/templates/network
- name: create ubuntu iso seed
command: >
command:
cmd: >
cloud-localds
--network-config {{ playbook_dir }}/templates/network
{{ playbook_dir }}/vm/seed.qcow2
{{ playbook_dir }}/templates/user-data
when: seed_exists.stat.exists == false
creates: "{{ playbook_dir }}/vm/seed.qcow2"
# get user to set net.ipv4.ip_forward = 1 ?
- name: create mirbr0 bridge network
command: "virsh {{ item }}"
with_items:
loop:
- net-define {{ playbook_dir }}/templates/network.xml
- net-autostart mirbr0
- net-start mirbr0
@ -65,25 +50,25 @@
- name: create storage pool
command: "virsh {{ item }}"
with_items:
- pool-define-as mirror dir --target="{{ playbook_dir }}/vm/"
loop:
- pool-define-as mirror dir --target="{{ playbook_dir }}/vm/disks"
- pool-build mirror
- pool-start mirror
- pool-autostart mirror
when: not pool_exists.rc == 0
# any way to check skip virtual disks that are already created?
# for now just hardcoding size as 10G since will need to mod user-data to use different values
# could just check for each if they are created
# hardcoded to 10G since modification would require change to user-data
- name: create virtual disks
command: "virsh vol-create-as mirror {{ item }}"
with_items:
- mirror_root1.qcow2 10G
- mirror_root2.qcow2 10G
- mirror_disk1.qcow2 10G
- mirror_disk2.qcow2 10G
- mirror_disk3.qcow2 10G
- mirror_disk4.qcow2 10G
command:
cmd: "virsh vol-create-as mirror {{ item.name }} {{ item.size }}"
creates: "{{ playbook_dir }}/vm/disks/{{ item.name }}"
loop:
- { name: mirror_root1.qcow2, size: 10G }
- { name: mirror_root2.qcow2, size: 10G }
- { name: mirror_disk1.qcow2, size: 10G }
- { name: mirror_disk2.qcow2, size: 10G }
- { name: mirror_disk3.qcow2, size: 10G }
- { name: mirror_disk4.qcow2, size: 10G }
- name: create vm
command: >
@ -111,13 +96,25 @@
# copy over pub key into /root/.ssh/authorized_keys
# add line to ssh config that allows ssh as root
# possible that this will not be run in order?
# roles are called relative to playbook
- name: setup mirror services
hosts: 192.168.123.2
include_role: "../roles/{{ item }}"
with_items:
- zfs
# - name: setup mirror services
# hosts: 192.168.123.2
# include_role: "../roles/{{ item }}"
# loop:
# - zfs (make sure runs first)
# - index
# - nginx
# - rsync
# - ftp
# to write
# - merlin
# - scripts (in bin)
# - mirrormanager
# - users (make users + group + ssh conf + ssh pub key) (make sure runs second)
# maybe replace ubuntu user with local user
# users: mirror, local, push
# ssh: allow user to login as root

View File

@ -3,8 +3,21 @@
copy:
src: "{{role_path}}/templates/csc-mirror"
dest: /etc/cron.d/csc-mirror
# restart cron
# does the mirror user exists / home dir exist
# make sure that each role can run on its own or in any order (can assume that zfs + users role will run first)
# remove this if useradding with home dir
- name: mirror home
file:
path: /home/mirror
state: directory
# owner: mirror
# group: mirror
mode: 0755
recurse: yes
# user template module instead
# https://docs.ansible.com/ansible/latest/collections/ansible/builtin/template_module.html
- name: Copy index files
copy:
src: "{{role_path}}/templates/mirror-index"

View File

@ -1,14 +0,0 @@
```
csc-mirror ->
/etc/cron.d/csc-mirror
```
make the `/home/mirror` dir
```
mirror-index/ ->
/home/mirror/mirror-index/
```
```
include/ ->
/mirror/root/include/
```

View File

@ -1,12 +1,9 @@
# /etc/cron.d/csc-mirror: mirror cron jobs
MAILTO=ztseguin@csclub.uwaterloo.ca
# MAILTO=ztseguin@csclub.uwaterloo.ca
# m h dom mon dow user command
# UPS Health
* * * * * mirror /usr/bin/flock -w 0 /tmp/ups-status.lock /bin/bash -c "/usr/local/bin/ups-status >/mirror/root/ups 2>/dev/null" 2>/dev/null
# reprepro incoming
*/10 * * * * root /srv/debian/bin/rrr-incoming cron

View File

@ -10,6 +10,7 @@
name: nginx
state: started
# not great if user already has the correct configs
- name: Remove pre-existing sites-available and sites-enabled
file:
state: absent

View File

@ -17,7 +17,7 @@
owner: root
group: root
mode: 0644
with_items:
loop:
- { src: "rsyncd.conf.j2", dest: "rsyncd.conf" }
- { src: "rsyncd-filter.conf.j2", dest: "rsyncd-filter.conf" }
notify: restart rsync

View File

@ -4,6 +4,29 @@
state: latest
update_cache: true
# in hosts/group_vars we can do
# vars:
# disks:
# - /dev/vdc
# - /dev/vdd
# - /dev/vde
# - /dev/vdf
# disk_arg: "{{ disks | join(' ') }}"
# hardcoded for now
# may be better to use the /dev/disk/by-id/... instead
- name: disks to be used for zpool
set_fact:
disks:
- /dev/vdc
- /dev/vdd
- /dev/vde
- /dev/vdf
- name: concatenate disks
set_fact:
disk_arg: "{{ disks | join(' ') }}"
- name: create zfs mountpoint
file:
path: /mirror/root/.cscmirror
@ -13,10 +36,20 @@
mode: 0777
recurse: yes
- name: zpool exists
command: "zpool status cscmirror > /dev/null 2>&1"
register: zpool_exists
ignore_errors: true
- name: create and mount zpool
# double check this works (and produces the correct result)
# may be better to use the /dev/disk/by-id/... instead
commmand: zpool create -f -m /mirror/root/.cscmirror cscmirror raidz2 /dev/vdc /dev/vdd /dev/vde /dev/vdf
# may need to mount it (also check that it will automount on boot)
command: >
zpool create
-m /mirror/root/.cscmirror
cscmirror
raidz2
{{ disk_arg }}
when: not zpool_exists.rc == 0
# mount all zpools
# zfs mount -a
# need check for if zpool already exists
# may need to mount it (also check that it will automount on boot)

View File

@ -1,6 +1,6 @@
#!/usr/bin/python2
import time, sys, os, errno, logging, signal, copy, select, socket, grp
import time, sys, os, errno, logging, signal, copy, select, socket, grp, random
daily = 86400
twice_daily = 86400 / 2
@ -619,6 +619,11 @@ def await_command(ear):
elif command == 'status':
s.send(mirror_status())
elif command == 'dump':
s.send(str(jobs))
s.send("\n")
s.send(str(repos))
else:
logging.error('Received unrecognized command: %s' % command)
s.send('Bad command: %s' % command)
@ -629,7 +634,10 @@ def await_command(ear):
logging.error('Could not communicate with arthur over socket.')
def new_jobs(now):
for current in repos:
#To prevent repos at the 'bottom' of the dictionary from getting neglected when mirror is under unusual load (and merlin is running at MAX_JOBS)
keys = repos.keys()
random.shuffle(keys)
for current in keys:
if len(jobs) >= MAX_JOBS:
break
if now <= repos[current]['last-attempt'] + mintime: