This commit is contained in:
Andrew Wang 2021-09-17 16:42:15 -04:00
parent a1da443418
commit a3e11f1208
9 changed files with 122 additions and 85 deletions

View File

@ -47,12 +47,14 @@ ansible-playbook qemu-vm.yml
## VM Install Option 2 (kvm) ## VM Install Option 2 (kvm)
### Install Packages (debian) ### Install Packages (debian)
**needs update**
``` ```
$ apt install qemu-kvm libvirt-daemon virt-manager virt-viewer ansible cloud-image-utils $ apt install qemu-kvm libvirt-daemon virt-manager virt-viewer ansible cloud-image-utils
qemu-kvm libvirt-clients libvirt-daemon-system bridge-utils virtinst libvirt-daemon virt-manager qemu-kvm libvirt-clients libvirt-daemon-system bridge-utils virtinst libvirt-daemon virt-manager
``` ```
### Install Packages (archlinux) ### Install Packages (archlinux)
**needs update**
``` ```
$ pacman -S qemu libvirt virt-install virt-viewer ansible $ pacman -S qemu libvirt virt-install virt-viewer ansible
``` ```

View File

@ -2,21 +2,9 @@
- name: setup mirror vm - name: setup mirror vm
hosts: 127.0.0.1 hosts: 127.0.0.1
tasks: tasks:
- name: ubuntu iso exists - name: mirbr0 network exists
stat: command: "virsh net-dumpxml mirbr0 > /dev/null 2>&1"
path: "{{ playbook_dir }}/vm/ubuntu20_04.iso" register: net_exists
register: iso_exists
ignore_errors: true
- name: ubuntu iso seed exists
stat:
path: "{{ playbook_dir }}/vm/seed.qcow2"
register: seed_exists
ignore_errors: true
- name: mirror vm exists
command: "virsh dumpxml mirror > /dev/null 2>&1"
register: vm_exists
ignore_errors: true ignore_errors: true
- name: storage pool exists - name: storage pool exists
@ -24,40 +12,37 @@
register: pool_exists register: pool_exists
ignore_errors: true ignore_errors: true
- name: mirbr0 network exists - name: mirror vm exists
command: "virsh net-dumpxml mirbr0 > /dev/null 2>&1" command: "virsh dumpxml mirror > /dev/null 2>&1"
register: net_exists register: vm_exists
ignore_errors: true ignore_errors: true
- name: create directory for vm - name: create directory for vm
file: file:
path: "{{ playbook_dir }}/vm" path: "{{ playbook_dir }}/vm/disks"
state: directory state: directory
recurse: yes
- name: fetch ubuntu iso - name: fetch ubuntu iso
get_url: get_url:
url: https://releases.ubuntu.com/20.04.3/ubuntu-20.04.3-live-server-amd64.iso url: https://releases.ubuntu.com/20.04.3/ubuntu-20.04.3-live-server-amd64.iso
dest: "{{ playbook_dir }}/vm/ubuntu20_04.iso" dest: "{{ playbook_dir }}/vm/ubuntu20_04.iso"
when: iso_exists.stat.exists == false
# requires: cloud-localds (cloud-image-utils) # Installing VMs from Ready Images
# https://www.x386.xyz/index.php/2021/01/06/kvm-on-ubuntu-server-1/
# Installing VMs from Ready Images
# https://www.x386.xyz/index.php/2021/01/06/kvm-on-ubuntu-server-1/
# apply network config
# --network-config {{ playbook_dir }}/templates/network
- name: create ubuntu iso seed - name: create ubuntu iso seed
command: > command:
cloud-localds cmd: >
{{ playbook_dir }}/vm/seed.qcow2 cloud-localds
{{ playbook_dir }}/templates/user-data --network-config {{ playbook_dir }}/templates/network
when: seed_exists.stat.exists == false {{ playbook_dir }}/vm/seed.qcow2
{{ playbook_dir }}/templates/user-data
creates: "{{ playbook_dir }}/vm/seed.qcow2"
# get user to set net.ipv4.ip_forward = 1 ? # get user to set net.ipv4.ip_forward = 1 ?
- name: create mirbr0 bridge network - name: create mirbr0 bridge network
command: "virsh {{ item }}" command: "virsh {{ item }}"
with_items: loop:
- net-define {{ playbook_dir }}/templates/network.xml - net-define {{ playbook_dir }}/templates/network.xml
- net-autostart mirbr0 - net-autostart mirbr0
- net-start mirbr0 - net-start mirbr0
@ -65,25 +50,25 @@
- name: create storage pool - name: create storage pool
command: "virsh {{ item }}" command: "virsh {{ item }}"
with_items: loop:
- pool-define-as mirror dir --target="{{ playbook_dir }}/vm/" - pool-define-as mirror dir --target="{{ playbook_dir }}/vm/disks"
- pool-build mirror - pool-build mirror
- pool-start mirror - pool-start mirror
- pool-autostart mirror - pool-autostart mirror
when: not pool_exists.rc == 0 when: not pool_exists.rc == 0
# any way to check skip virtual disks that are already created? # hardcoded to 10G since modification would require change to user-data
# for now just hardcoding size as 10G since will need to mod user-data to use different values
# could just check for each if they are created
- name: create virtual disks - name: create virtual disks
command: "virsh vol-create-as mirror {{ item }}" command:
with_items: cmd: "virsh vol-create-as mirror {{ item.name }} {{ item.size }}"
- mirror_root1.qcow2 10G creates: "{{ playbook_dir }}/vm/disks/{{ item.name }}"
- mirror_root2.qcow2 10G loop:
- mirror_disk1.qcow2 10G - { name: mirror_root1.qcow2, size: 10G }
- mirror_disk2.qcow2 10G - { name: mirror_root2.qcow2, size: 10G }
- mirror_disk3.qcow2 10G - { name: mirror_disk1.qcow2, size: 10G }
- mirror_disk4.qcow2 10G - { name: mirror_disk2.qcow2, size: 10G }
- { name: mirror_disk3.qcow2, size: 10G }
- { name: mirror_disk4.qcow2, size: 10G }
- name: create vm - name: create vm
command: > command: >
@ -111,13 +96,25 @@
# copy over pub key into /root/.ssh/authorized_keys # copy over pub key into /root/.ssh/authorized_keys
# add line to ssh config that allows ssh as root # add line to ssh config that allows ssh as root
# possible that this will not be run in order?
# roles are called relative to playbook # roles are called relative to playbook
- name: setup mirror services # - name: setup mirror services
hosts: 192.168.123.2 # hosts: 192.168.123.2
include_role: "../roles/{{ item }}" # include_role: "../roles/{{ item }}"
with_items: # loop:
- zfs # - zfs (make sure runs first)
# - index # - index
# - nginx # - nginx
# - rsync # - rsync
# - ftp # - ftp
# to write
# - merlin
# - scripts (in bin)
# - mirrormanager
# - users (make users + group + ssh conf + ssh pub key) (make sure runs second)
# maybe replace ubuntu user with local user
# users: mirror, local, push
# ssh: allow user to login as root

View File

@ -3,8 +3,21 @@
copy: copy:
src: "{{role_path}}/templates/csc-mirror" src: "{{role_path}}/templates/csc-mirror"
dest: /etc/cron.d/csc-mirror dest: /etc/cron.d/csc-mirror
# restart cron
# does the mirror user exists / home dir exist # make sure that each role can run on its own or in any order (can assume that zfs + users role will run first)
# remove this if useradding with home dir
- name: mirror home
file:
path: /home/mirror
state: directory
# owner: mirror
# group: mirror
mode: 0755
recurse: yes
# user template module instead
# https://docs.ansible.com/ansible/latest/collections/ansible/builtin/template_module.html
- name: Copy index files - name: Copy index files
copy: copy:
src: "{{role_path}}/templates/mirror-index" src: "{{role_path}}/templates/mirror-index"

View File

@ -1,14 +0,0 @@
```
csc-mirror ->
/etc/cron.d/csc-mirror
```
make the `/home/mirror` dir
```
mirror-index/ ->
/home/mirror/mirror-index/
```
```
include/ ->
/mirror/root/include/
```

View File

@ -1,12 +1,9 @@
# /etc/cron.d/csc-mirror: mirror cron jobs # /etc/cron.d/csc-mirror: mirror cron jobs
MAILTO=ztseguin@csclub.uwaterloo.ca # MAILTO=ztseguin@csclub.uwaterloo.ca
# m h dom mon dow user command # m h dom mon dow user command
# UPS Health
* * * * * mirror /usr/bin/flock -w 0 /tmp/ups-status.lock /bin/bash -c "/usr/local/bin/ups-status >/mirror/root/ups 2>/dev/null" 2>/dev/null
# reprepro incoming # reprepro incoming
*/10 * * * * root /srv/debian/bin/rrr-incoming cron */10 * * * * root /srv/debian/bin/rrr-incoming cron
@ -21,7 +18,7 @@ MAILTO=ztseguin@csclub.uwaterloo.ca
# #
# ztseguin - temporarilly disabled until script can be updated to handle symlink farm # ztseguin - temporarilly disabled until script can be updated to handle symlink farm
# 40 5 */14 * * mirror cd /home/mirror/mirror-index && /home/mirror/mirror-index/make-index.py # 40 5 */14 * * mirror cd /home/mirror/mirror-index && /home/mirror/mirror-index/make-index.py
#30 3 * * 0 mirror cd /home/mirror/mirror-index && /home/mirror/mirror-index/make-index.py # 30 3 * * 0 mirror cd /home/mirror/mirror-index && /home/mirror/mirror-index/make-index.py
# Update index hourly # Update index hourly
0 * * * * mirror cd /home/mirror/mirror-index && /home/mirror/mirror-index/make-index.py 0 * * * * mirror cd /home/mirror/mirror-index && /home/mirror/mirror-index/make-index.py

View File

@ -10,6 +10,7 @@
name: nginx name: nginx
state: started state: started
# not great if user already has the correct configs
- name: Remove pre-existing sites-available and sites-enabled - name: Remove pre-existing sites-available and sites-enabled
file: file:
state: absent state: absent

View File

@ -17,7 +17,7 @@
owner: root owner: root
group: root group: root
mode: 0644 mode: 0644
with_items: loop:
- { src: "rsyncd.conf.j2", dest: "rsyncd.conf" } - { src: "rsyncd.conf.j2", dest: "rsyncd.conf" }
- { src: "rsyncd-filter.conf.j2", dest: "rsyncd-filter.conf" } - { src: "rsyncd-filter.conf.j2", dest: "rsyncd-filter.conf" }
notify: restart rsync notify: restart rsync

View File

@ -4,6 +4,29 @@
state: latest state: latest
update_cache: true update_cache: true
# in hosts/group_vars we can do
# vars:
# disks:
# - /dev/vdc
# - /dev/vdd
# - /dev/vde
# - /dev/vdf
# disk_arg: "{{ disks | join(' ') }}"
# hardcoded for now
# may be better to use the /dev/disk/by-id/... instead
- name: disks to be used for zpool
set_fact:
disks:
- /dev/vdc
- /dev/vdd
- /dev/vde
- /dev/vdf
- name: concatenate disks
set_fact:
disk_arg: "{{ disks | join(' ') }}"
- name: create zfs mountpoint - name: create zfs mountpoint
file: file:
path: /mirror/root/.cscmirror path: /mirror/root/.cscmirror
@ -13,10 +36,20 @@
mode: 0777 mode: 0777
recurse: yes recurse: yes
- name: zpool exists
command: "zpool status cscmirror > /dev/null 2>&1"
register: zpool_exists
ignore_errors: true
- name: create and mount zpool - name: create and mount zpool
# double check this works (and produces the correct result) command: >
# may be better to use the /dev/disk/by-id/... instead zpool create
commmand: zpool create -f -m /mirror/root/.cscmirror cscmirror raidz2 /dev/vdc /dev/vdd /dev/vde /dev/vdf -m /mirror/root/.cscmirror
# may need to mount it (also check that it will automount on boot) cscmirror
# zfs mount -a raidz2
# need check for if zpool already exists {{ disk_arg }}
when: not zpool_exists.rc == 0
# mount all zpools
# zfs mount -a
# may need to mount it (also check that it will automount on boot)

View File

@ -1,6 +1,6 @@
#!/usr/bin/python2 #!/usr/bin/python2
import time, sys, os, errno, logging, signal, copy, select, socket, grp import time, sys, os, errno, logging, signal, copy, select, socket, grp, random
daily = 86400 daily = 86400
twice_daily = 86400 / 2 twice_daily = 86400 / 2
@ -619,6 +619,11 @@ def await_command(ear):
elif command == 'status': elif command == 'status':
s.send(mirror_status()) s.send(mirror_status())
elif command == 'dump':
s.send(str(jobs))
s.send("\n")
s.send(str(repos))
else: else:
logging.error('Received unrecognized command: %s' % command) logging.error('Received unrecognized command: %s' % command)
s.send('Bad command: %s' % command) s.send('Bad command: %s' % command)
@ -629,7 +634,10 @@ def await_command(ear):
logging.error('Could not communicate with arthur over socket.') logging.error('Could not communicate with arthur over socket.')
def new_jobs(now): def new_jobs(now):
for current in repos: #To prevent repos at the 'bottom' of the dictionary from getting neglected when mirror is under unusual load (and merlin is running at MAX_JOBS)
keys = repos.keys()
random.shuffle(keys)
for current in keys:
if len(jobs) >= MAX_JOBS: if len(jobs) >= MAX_JOBS:
break break
if now <= repos[current]['last-attempt'] + mintime: if now <= repos[current]['last-attempt'] + mintime: