fixes and customization

This commit is contained in:
Andrew Wang 2021-10-05 23:20:58 -04:00
parent 2834d86e92
commit 1fce04612d
11 changed files with 111 additions and 97 deletions

View File

@ -1,7 +0,0 @@
---
# set this depending on your system
ovmf: /usr/share/edk2-ovmf/x64/OVMF_CODE.fd
vm_ram: 3G
vm_disk_size: 10G
vm_ssh_port: 7777 # vm doesn't seem to start with 22, leaving this here for now
userdata: "{{playbook_dir}}/userdata"

21
hosts
View File

@ -1,21 +0,0 @@
[local]
127.0.0.1
[local:vars]
ansible_connection=local
[vm]
ubuntu@localhost
[vm:vars]
host=mirror
ansible_connection=ssh
ansible_port=7777
ansible_user=local
ansible_password=password
ansible_become_user=root
ansible_become_password=password
# 192.168.123.2 host=mirror ansible_connection=ssh ansible_port=22 ansible_user=ubuntu ansible_password=ubuntu ansible_become_user=root ansible_become_password=ubuntu
# ansible_ssh_private_key_file

View File

@ -15,9 +15,13 @@ virtinst
virt-manager
bridge-utils
```
Then install virt-viewer to interact run through the install with
Then install virt-viewer for the ubuntu install using
```
virt-viewer
$ apt install virt-viewer
```
Finally add your user to the `libvirt` group (may need to logout for this to take effect)
```
usermod -a -G libvirt <username>
```
### Prerequisites (archlinux)
**needs update**
@ -26,16 +30,17 @@ $ pacman -S qemu libvirt virt-install virt-viewer ansible
```
## Running the Playbook
Modify the `hosts` file to your liking then run
```
ansible-playbook -K main.yml
```
Then connect to the created vm using virt-viewer
```
virt-viewer --domain-name mirror
virt-viewer --connect qemu://system mirror
```
If vm is on a remote machine
```
virt-viewer --connect qemu+ssh://user@X.X.X.X/system mirror
virt-viewer --connect qemu+ssh://<user>@X.X.X.X/system mirror
```
---
@ -44,7 +49,7 @@ Follow the steps under [Installing Ubuntu](#installing-ubuntu) before continuing
---
container may stop so will need to start with
vm may stop so will need to start with
```
virsh start mirror
```
@ -60,12 +65,13 @@ ssh local@192.168.123.XXX
In the vm switch to root, install some packages, and clone the repo.
```
$ sudo su
password: password
$ cd
$ apt update && apt upgrade
$ apt install git ansible
$ git clone https://git.csclub.uwaterloo.ca/public/mirror-env.git
```
Then enter `mirror-env/mirror` and follow the instructions there.
Then `cd mirror-env/mirror` and follow the `README.md` there.
## Deleting the VM
@ -76,6 +82,10 @@ ansible-playbook -K cleanup.yml
## Troubleshooting
```
virsh -c qemu:///system start mirror
virsh -c qemu:///system net-dhcp-leases mirbr0
```
```
net.ipv4.ip_forward = 1
```
@ -94,7 +104,7 @@ $ service restart libvirtd
- user: local
- password: password
Note: the resulting system created from following these steps should look like this:
The system created from following these steps should look like this:
```
$ lsblk

View File

@ -5,39 +5,39 @@
become: yes
tasks:
- name: mirbr0 network exists
command: "virsh net-dumpxml mirbr0"
command: "virsh -c qemu:///system net-dumpxml mirbr0"
changed_when: false
ignore_errors: true
register: net_exists
- name: storage pool exists
command: "virsh pool-dumpxml mirror"
command: "virsh -c qemu:///system pool-dumpxml mirror"
changed_when: false
ignore_errors: true
register: pool_exists
- name: mirror vm exists
command: "virsh dumpxml mirror"
command: "virsh -c qemu:///system dumpxml mirror"
changed_when: false
ignore_errors: true
register: vm_exists
- name: delete vm
command: "virsh {{ item }}"
command: "virsh -c qemu:///system {{ item }}"
loop:
- destroy mirror
- undefine mirror
- undefine --nvram mirror
when: vm_exists.rc == 0
- name: delete storage pool
command: "virsh {{ item }}"
command: "virsh -c qemu:///system {{ item }}"
loop:
- pool-destroy mirror
- pool-undefine mirror
when: pool_exists.rc == 0
- name: delete mirbr0 bridge network
command: "virsh {{ item }}"
command: "virsh -c qemu:///system {{ item }}"
loop:
- net-destroy mirbr0
- net-undefine mirbr0

20
libvirt/hosts Normal file
View File

@ -0,0 +1,20 @@
[local]
127.0.0.1
[local:vars]
ansible_connection=local
# vm allocated ram in MB
vm_ram=2048
# vcpus created
vm_vcpus=1
# port for vnc connection
vnc_port=5911
# size of each disk in the zpool
zfs_disks_size=1G
# size of each disk in the RAID1 pool
root_disks_size=10G

View File

@ -5,19 +5,19 @@
become: yes
tasks:
- name: mirbr0 network exists
command: "virsh net-dumpxml mirbr0"
command: "virsh -c qemu:///system net-dumpxml mirbr0"
changed_when: false
ignore_errors: true
register: net_exists
- name: storage pool exists
command: "virsh pool-dumpxml mirror"
command: "virsh -c qemu:///system pool-dumpxml mirror"
changed_when: false
ignore_errors: true
register: pool_exists
- name: mirror vm exists
command: "virsh dumpxml mirror"
command: "virsh -c qemu:///system dumpxml mirror"
changed_when: false
ignore_errors: true
register: vm_exists
@ -33,31 +33,8 @@
url: "https://releases.ubuntu.com/20.04.3/ubuntu-20.04.3-live-server-amd64.iso"
dest: "{{ playbook_dir }}/vm/ubuntu20_04.iso"
# autoinstall not working for libvirt
# https://manintheit.org/en/posts/automation/ubuntu-autoinstall/
# https://ubuntu.com/server/docs/install/autoinstall-quickstart
#
# --network-config {{ playbook_dir }}/templates/network
# - name: create ubuntu iso seed
# command:
# cmd: >
# cloud-localds
# {{ playbook_dir }}/vm/seed.iso
# {{ playbook_dir }}/templates/user-data
# {{ playbook_dir }}/templates/meta-data
# creates: "{{ playbook_dir }}/vm/seed.iso"
# - name: create ubuntu iso seed
# command:
# cmd: >
# genisoimage
# -output {{ playbook_dir }}/vm/cidata.iso
# -V cidata -r -J
# {{ playbook_dir }}/templates/user-data
# {{ playbook_dir }}/templates/meta-data
# creates: "{{ playbook_dir }}/vm/seed.iso"
- name: create mirbr0 bridge network
command: "virsh {{ item }}"
command: "virsh -c qemu:///system {{ item }}"
loop:
- net-define {{ playbook_dir }}/templates/network.xml
- net-autostart mirbr0
@ -65,7 +42,7 @@
when: net_exists.rc != 0
- name: create storage pool
command: "virsh {{ item }}"
command: "virsh -c qemu:///system {{ item }}"
loop:
- pool-define-as mirror dir --target="{{ playbook_dir }}/vm"
- pool-build mirror
@ -73,32 +50,27 @@
- pool-start mirror
when: pool_exists.rc != 0
# TODO: allow user to create any number of disks with any size
- name: create virtual disks
command:
cmd: "virsh vol-create-as mirror {{ item.name }} {{ item.size }}"
cmd: "virsh -c qemu:///system vol-create-as mirror {{ item.name }} {{ item.size }}"
creates: "{{ playbook_dir }}/vm/{{ item.name }}"
loop:
- { name: mirror_root1.qcow2, size: 10G }
- { name: mirror_root2.qcow2, size: 10G }
- { name: mirror_disk1.qcow2, size: 10G }
- { name: mirror_disk2.qcow2, size: 10G }
- { name: mirror_disk3.qcow2, size: 10G }
- { name: mirror_disk4.qcow2, size: 10G }
- { name: mirror_root1.qcow2, size: "{{ root_disks_size }}" }
- { name: mirror_root2.qcow2, size: "{{ root_disks_size }}" }
- { name: mirror_disk1.qcow2, size: "{{ zfs_disks_size }}" }
- { name: mirror_disk2.qcow2, size: "{{ zfs_disks_size }}" }
- { name: mirror_disk3.qcow2, size: "{{ zfs_disks_size }}" }
- { name: mirror_disk4.qcow2, size: "{{ zfs_disks_size }}" }
# does not exist yet
# --os-variant ubuntu20.04
# --cdrom path={{ playbook_dir }}/vm/ubuntu20_04.iso
# --disk path={{ playbook_dir }}/vm/seed.iso,format=raw,bus=virtio
# --cloud-init user-data={{ playbook_dir }}/templates/user-data,meta-data={{ playbook_dir }}/templates/meta-data
# --disk path={{ playbook_dir }}/vm/focal-server-cloudimg-amd64.img
- name: create vm
# flag does not work:
# --os-variant ubuntu20.04
command: >
virt-install
--connect=qemu:///system
--name=mirror
--memory=2048
--vcpus=1
--memory={{ vm_ram }}
--vcpus={{ vm_vcpus }}
--boot uefi
--os-type linux
--cdrom {{ playbook_dir }}/vm/ubuntu20_04.iso
@ -109,6 +81,6 @@
--disk vol=mirror/mirror_disk3.qcow2,bus=virtio
--disk vol=mirror/mirror_disk4.qcow2,bus=virtio
--network bridge=mirbr0,model=virtio
--graphics vnc,port=5911,listen=127.0.0.1
--graphics vnc,port={{ vnc_port }},listen=127.0.0.1
--noautoconsole
when: vm_exists.rc != 0

View File

@ -1,7 +1,7 @@
# Mirror Playbook
**DO NOT run this on your host machine!**
This playbook as root **inside** the vm to set up the mirror's services
Run this playbook as root **inside** the vm to set up the mirror's services
```
ansible-playbook main.yml
```

5
mirror/hosts Normal file
View File

@ -0,0 +1,5 @@
[local]
127.0.0.1
[local:vars]
ansible_connection=local

View File

@ -6,13 +6,13 @@ Install the following:
- genisoimage
- ovmf (find the location of `OVMF\_CODE.fd`, it is system dependent)
Before doing anything else, edit the config files in `group_vars/` to your
system's needs. For qemu installation specifically, you need to provide the
location of your `OVMF_CODE.fd` file.
Before doing anything else, edit the vars in `hosts` to your system's needs.
For qemu installation specifically, you need to provide the location of your
`OVMF_CODE.fd` file.
To begin the setup process, in this repo's root, run:
To begin the setup process, in this folder, run:
```
$ ansible-playbook -K qemu/install.yml
$ ansible-playbook -K install.yml
```
To view the vm, you also need a vnc viewer.
@ -32,18 +32,18 @@ hang. You can manually terminate ansible once the vm shows:
Once the base installation is complete, we need to configure the system in a
postinstall step, start by running the vm with:
```
$ ansible-playbook qemu/run.yml
$ ansible-playbook run.yml
```
And then run the postinstall playbook:
```
$ ansible-playbook qemu/postinstall.yml
$ ansible-playbook postinstall.yml
```
The mirror dev environment is now ready to use. All future vm startups can be
done with:
```
$ ansible-playbook qemu/run.yml
$ ansible-playbook run.yml
```
The default login user has

35
qemu/hosts Normal file
View File

@ -0,0 +1,35 @@
[vm]
ubuntu@localhost
[vm:vars]
host=mirror
ansible_connection=ssh
ansible_port=7777
ansible_user=local
ansible_password=password
ansible_become_user=root
ansible_become_password=password
[local]
127.0.0.1
[local:vars]
ansible_connection=local
# path to OVMF_CODE.fd
ovmf=/usr/share/edk2-ovmf/x64/OVMF_CODE.fd
# ram allocated for the vm
vm_ram=3G
# vm_vcpus=1
# TODO: different sizes for root and zpool disks
vm_disk_size=10G
# directory to store disks and iso
userdata="{{playbook_dir}}/userdata"
# vm doesn't seem to start with 22
# if changed modify ansible_port to match
vm_ssh_port=7777