From 60664d3f1dfb57d851bbfb29e41629db0f0eddc0 Mon Sep 17 00:00:00 2001 From: Max Erenberg Date: Sun, 30 May 2021 16:58:59 -0400 Subject: [PATCH 1/3] add instructions for standalone bridge --- .gitignore | 2 + README.md | 81 +++++++++++++++++++++--- hosts | 20 ------ hosts.sample | 37 +++++++++++ mail/README.md | 38 +++++++++-- mail/mailman3/mailman3.yml | 19 +++++- outsider/README.md | 42 ++++++------ roles/systemd_workarounds/tasks/main.yml | 2 + 8 files changed, 183 insertions(+), 58 deletions(-) delete mode 100644 hosts create mode 100644 hosts.sample diff --git a/.gitignore b/.gitignore index e75d6b1..53d0b5f 100644 --- a/.gitignore +++ b/.gitignore @@ -1,2 +1,4 @@ # Vim .*.swp + +/hosts diff --git a/README.md b/README.md index 8564e48..e39f04c 100644 --- a/README.md +++ b/README.md @@ -5,13 +5,15 @@ run on the CSC servers. The idea is to encourage experimentation without breaking the real services and causing outages. ## For Windows Users +**Update**: WSL2 doesn't seem to be working too well with LXC. I suggest +using VirtualBox or VMWare instead. + Setup WSL2 and open a terminal to it. See the [official setup instructions](https://docs.microsoft.com/en-ca/windows/wsl/install-win10#manual-installation-steps). Make sure you use Ubuntu/Ubuntu Latest from the Windows Store. Once setup is complete, run the following command to update everything: ``` sudo apt update && sudo apt full-upgrade -y --auto-remove --fix-broken --fix-missing --fix-policy --show-progress && sudo apt autoclean ``` -You can skip the Prerequisites section. ## Prerequisites This repo consists of several Ansible playbooks which will automate tasks @@ -22,6 +24,75 @@ theoretically also work. The VM should be running some reasonably recent version of Debian or Ubuntu. 2 CPU cores and 2 GB of RAM should be sufficient. +**Update**: I previously recommended using a shared bridge interface +in the VM. This appears to be causing issues for VMWare users, +so I am now recommending a standalone bridge instead with NAT masquerading. +The instructions for the shared bridge should still work, but if you are +creating the dev environment from scratch, I suggest using the +standalone bridge instead. + +Note that if you do use the standalone bridge, the containers will not +be accessible from outside the VM, so if you need to access one of the +containers from your physical host, you will need to set up TCP forwarding +via `socat` or something similar. + +No matter which network setup you decide to use, you will need to manually +create a `hosts` file before running any of the playbooks. Copy the +`hosts.sample` file as a starting point and edit it as needed: +``` +cp hosts.sample hosts +``` + +Make sure you have the `bridge-utils` package installed in the VM. +This should be installed by default on Ubuntu, but you may have to manually +install it on Debian. + +Also, make sure you disable the default LXC bridge, as it will interfere +with our own bridge: +``` +systemctl stop lxc-net +systemctl mask lxc-net +``` + +### Standalone bridge +Your /etc/network/interfaces (in the VM) should look like the following: +``` +auto enp1s0 +iface enp1s0 inet dhcp + +auto lxcbr1 +iface lxcbr1 inet static + bridge_ports none + bridge_fd 0 + bridge_maxwait 0 + address 192.168.100.1/24 + up iptables -t nat -C POSTROUTING -s 192.168.100.0/24 ! -o lxcbr1 -j MASQUERADE 2>/dev/null || \ + iptables -t nat -A POSTROUTING -s 192.168.100.0/24 ! -o lxcbr1 -j MASQUERADE + down iptables -t nat -D POSTROUTING -s 192.168.100.0/24 ! -o lxcbr1 -j MASQUERADE 2>/dev/null || true +``` +Replace `ensp1s0` by the default interface on the VM. Replace `192.168.100.1/24` +and `192.168.100.0/24` by whatever IP address and subnet you want to use for the +bridge. + +Now bring up the bridge: +``` +ifup lxcbr1 +``` + +Make sure you update the `hosts` file to match whichever IP address and subnet +you chose. + +Now open `/etc/lxc/default.conf` and make sure it looks like the following: +``` +lxc.net.0.type = veth +lxc.net.0.link = lxcbr1 +lxc.net.0.flags = up +lxc.net.0.hwaddr = 00:16:3e:xx:xx:xx +``` +(The hwaddr can be different.) This will ensure that new containers +have this configuration by default. + +### Shared bridge The VM should be attached to a bridge interface with NAT forwarding. QEMU should create a default interface like this called 'virbr0'. For this tutorial, I am assuming that the interface subnet is @@ -32,13 +103,7 @@ do this via virsh or virt-manager; do not modify the subnet manually using iproute2. The reason for this is because libvirt needs to know what the subnet is to setup dnsmasq and iptables properly. -Once the VM is up and running, you will need to create a shared bridge -interface. First, disable the default bridge: -``` -systemctl stop lxc-net -systemctl mask lxc-net -``` -Then paste the following into /etc/network/interfaces: +Your /etc/network/interfaces should look like the following: ``` iface enp1s0 inet manual diff --git a/hosts b/hosts deleted file mode 100644 index 518a36b..0000000 --- a/hosts +++ /dev/null @@ -1,20 +0,0 @@ -[containers] -dns ansible_lxc_host=dns -mail ansible_lxc_host=mail -coffee ansible_lxc_host=coffee -auth1 ansible_lxc_host=auth1 -outsider ansible_lxc_host=outsider - -[containers:vars] -ansible_connection = lxc -ansible_python_interpreter = python3 -base_domain = csclub.internal -ipv4_subnet = 192.168.122.0/24 -ipv4_gateway = 192.168.122.1 -upstream_dns = 192.168.122.1 -host_ipv4_addr = 192.168.122.226 -outsider_ipv4_addr = 192.168.125.2 -dns_ipv4_addr = 192.168.122.4 -mail_ipv4_addr = 192.168.122.52 -coffee_ipv4_addr = 192.168.122.20 -auth1_ipv4_addr = 192.168.122.117 diff --git a/hosts.sample b/hosts.sample new file mode 100644 index 0000000..642e6a0 --- /dev/null +++ b/hosts.sample @@ -0,0 +1,37 @@ +[containers] +dns ansible_lxc_host=dns +mail ansible_lxc_host=mail +coffee ansible_lxc_host=coffee +auth1 ansible_lxc_host=auth1 +outsider ansible_lxc_host=outsider + +[containers:vars] +ansible_connection = lxc +ansible_python_interpreter = python3 +base_domain = csclub.internal + +# the subnet for the containers +ipv4_subnet = 192.168.100.0/24 + +# the gateway for the containers - this should be the upstream +# gateway if you are using a shared bridge, or the VM's bridge +# IP address if you are using a standalone bridge. +ipv4_gateway = 192.168.100.1 + +# the upstream DNS IP address +upstream_dns = 192.168.122.1 + +# the IP address of the VM - this should be the VM's default outgoing +# IP address if you are using a standalone bridge, or the VM's bridge +# address if you are using a standalone bridge. +host_ipv4_addr = 192.168.100.1 + +# The IP addresses for the VMs. The outsider IP address does not really +# matter, just make sure it is in a different subnet from the others. +# Make sure to update the IP addresses of the other containers is in the +# ipv4_subnet which you specified above. +outsider_ipv4_addr = 192.168.101.2 +dns_ipv4_addr = 192.168.100.4 +mail_ipv4_addr = 192.168.100.52 +coffee_ipv4_addr = 192.168.100.20 +auth1_ipv4_addr = 192.168.100.117 diff --git a/mail/README.md b/mail/README.md index f574189..83e5a20 100644 --- a/mail/README.md +++ b/mail/README.md @@ -30,16 +30,40 @@ Attach to the mail container and create a new list, e.g. syscom: cd /var/lib/mailman bin/newlist -a syscom root@csclub.internal mailman ``` -Now on your **real** computer (i.e. not the VM), you are going to visit + +### Standalone bridge +If you are using a standalone bridge, unfortunately you will not be +able to access the container directly from your physical host because +it is behind a NAT. +I suggest running socat on the VM for TCP forwarding: +``` +apt install socat +socat TCP-LISTEN:80,fork TCP:192.168.100.52:80 +``` +This will forward requests to port 80 on the VM to port 80 in the +mail container. + +Now open `/etc/hosts` on your computer and add the following entry: +``` +192.168.122.225 mailman.csclub.internal +``` +Replace `192.168.122.225` with the default IP of the VM. + +### Shared bridge +If you are using a shared bridge, you can access the container +directly from your physical host. Add the following entry to your +`/etc/hosts`: +``` +192.168.100.52 mailman.csclub.internal +``` +Replace `192.168.100.52` with the IP of the mail container. + +## Mailman web interface +Now on your physical host, you are going to visit the web interface for Mailman to adjust some settings and subscribe some new users. -First, open `/etc/hosts` on your computer and add the following entry: -``` -192.168.122.52 mailman.csclub.internal -``` - -Now visit http://mailman.csclub.internal/admin/syscom in your browser. +Visit http://mailman.csclub.internal/admin/syscom in your browser. The admin password is 'mailman' (no quotes). I suggest going over each setting in the Privacy section and reading it diff --git a/mail/mailman3/mailman3.yml b/mail/mailman3/mailman3.yml index 6dabe68..a5c2c4d 100644 --- a/mail/mailman3/mailman3.yml +++ b/mail/mailman3/mailman3.yml @@ -31,7 +31,7 @@ import_role: name: ../../roles/systemd_workarounds vars: - services: [ "memcached" ] + services: [ "memcached", "logrotate" ] - name: upgrade pip pip: executable: pip3 @@ -55,6 +55,7 @@ pip: virtualenv: /opt/mailman3 virtualenv_python: python3 + virtualenv_site_packages: yes name: "{{ item }}" loop: - mysqlclient @@ -63,6 +64,17 @@ - mailman - mailman-web - mailman-hyperkitty + - name: find the site packages directory in the virtualenv + find: + paths: /opt/mailman3/lib + patterns: "python3*" + file_type: directory + register: find_ret + - name: make sure that global site packages are inherited + file: + name: "{{ item.path }}/no-global-site-packages.txt" + state: absent + loop: "{{ find_ret.files }}" - name: create mailman3 folder file: path: /etc/mailman3 @@ -107,6 +119,11 @@ - reload systemd - restart service - meta: flush_handlers + - name: stop Mailman 2 + systemd: + name: mailman + state: stopped + masked: yes - name: enable and start new services systemd: name: "{{ item }}" diff --git a/outsider/README.md b/outsider/README.md index c621b76..49c0591 100644 --- a/outsider/README.md +++ b/outsider/README.md @@ -3,32 +3,30 @@ So this container's a bit special - it represents a host which is **not** on the UW network. The motivation is to test software which have different privilege settings for people outside of the local network, e.g. Postfix. -The idea is to route packets from the 'outsider' container to the LXC host -(i.e. the VM), and the VM will then route them to the other containers. -We could've also created an extra container to act as the router, but -that seemed kind of wasteful. +The easiest way to do this, in my opinion, is to simply create a new bridge +with a different subnet. Add the following to your /etc/network/interfaces: +``` +auto lxcbr2 +iface lxcbr2 inet static + bridge_ports none + bridge_fd 0 + bridge_maxwait 0 + address 192.168.101.1/24 + up iptables -t nat -C POSTROUTING -s 192.168.101.0/24 ! -o lxcbr2 -j MASQUERADE 2>/dev/null || \ + iptables -t nat -A POSTROUTING -s 192.168.101.0/24 ! -o lxcbr2 -j MASQUERADE + down iptables -t nat -D POSTROUTING -s 192.168.101.0/24 ! -o lxcbr2 -j MASQUERADE 2>/dev/null || true +``` +Then: +``` +ifup lxcbr1 +``` ## Installation -Once you have created the container, add the following iptables rules on -the VM: +Once you have created the container, edit the following line in +`/var/lib/lxc/outsider/config`: ``` -iptables -t nat -A POSTROUTING -s 192.168.125.0/24 -d 192.168.122.1 -j MASQUERADE -iptables -t nat -A POSTROUTING -s 192.168.125.0/24 ! -d 192.168.122.0/24 -j MASQUERADE +lxc.net.0.link = lxcbr2 ``` -I also strongly suggest installing iptables-persistent so that these rules -persist on the next reboot: -``` -apt install iptables-persistent -``` -The idea here is that packets from the 'outsider' container should only be -**forwarded**, not masqueraded, to the other containers (to preserve its IP -address), unless if it needs to communicate with the outside world (e.g. to -download Debian packages), in which case we need to use NAT because the -iptables rules which libvirt created on your real computer don't take that -subnet into account (run `iptables -t nat -L -v` on your real computer -to see what I mean). 192.168.122.1, which is your real computer, is a special -case because your host does not have a routing table entry for that -subnet, so it wouldn't be able to reply. As usual, create the container, start it, and install python3. Now detach and run the playbook: diff --git a/roles/systemd_workarounds/tasks/main.yml b/roles/systemd_workarounds/tasks/main.yml index f173234..59c88d9 100644 --- a/roles/systemd_workarounds/tasks/main.yml +++ b/roles/systemd_workarounds/tasks/main.yml @@ -12,6 +12,8 @@ PrivateTmp=false PrivateDevices=false ProtectHome=false + ProtectControlGroups=false + ProtectKernelModules=false dest: "/etc/systemd/system/{{ item }}.service.d/override.conf" loop: "{{ services }}" register: service_overrides From 3e6247e181d60cc8838f772fb70ae671113d457c Mon Sep 17 00:00:00 2001 From: Max Erenberg Date: Wed, 2 Jun 2021 04:30:08 -0400 Subject: [PATCH 2/3] add suggestions from a268wang --- mail/README.md | 15 +++++++++++++++ outsider/README.md | 2 +- 2 files changed, 16 insertions(+), 1 deletion(-) diff --git a/mail/README.md b/mail/README.md index 83e5a20..28bfcc3 100644 --- a/mail/README.md +++ b/mail/README.md @@ -43,6 +43,21 @@ socat TCP-LISTEN:80,fork TCP:192.168.100.52:80 This will forward requests to port 80 on the VM to port 80 in the mail container. +Alternatively, you can use iptables: +``` +iptables -t nat -A PREROUTING -s 192.168.122.0/24 -p tcp --dport 80 -j DNAT --to-destination 192.168.100.52 +``` +Replace '192.168.122.0/24' by the subnet of your VM (your physical host +should also be on this subnet), and replace '192.168.100.52' by the IP +of the mail container. +To make sure this iptables rule is applied automatically at startup, +you can install the iptables-persistent package: +``` +apt install iptables-persistent +``` +You can use `dpkg-reconfigure iptables-persistent` if you ever need to +change the iptables rules which are applied at startup. + Now open `/etc/hosts` on your computer and add the following entry: ``` 192.168.122.225 mailman.csclub.internal diff --git a/outsider/README.md b/outsider/README.md index 49c0591..70e0795 100644 --- a/outsider/README.md +++ b/outsider/README.md @@ -18,7 +18,7 @@ iface lxcbr2 inet static ``` Then: ``` -ifup lxcbr1 +ifup lxcbr2 ``` ## Installation From 3f977516cdcfb1c6ee618a6d374a90ad8d76a8f2 Mon Sep 17 00:00:00 2001 From: Max Erenberg Date: Sun, 6 Jun 2021 22:38:01 -0400 Subject: [PATCH 3/3] add tasks for Kerberos --- auth1/kerberos/kadm5.acl | 6 + auth1/kerberos/kdc.conf.j2 | 19 +++ auth1/kerberos/krb5.conf.j2 | 46 ++++++++ auth1/ldap/data.ldif.j2 | 36 ++++-- auth1/ldap/slapd.conf.j2 | 18 +-- auth1/main.yml | 143 +++++++++++++++++++++-- dns/main.yml | 1 + dns/templates/dnsmasq.conf.j2 | 3 + roles/systemd_workarounds/tasks/main.yml | 3 + 9 files changed, 247 insertions(+), 28 deletions(-) create mode 100644 auth1/kerberos/kadm5.acl create mode 100644 auth1/kerberos/kdc.conf.j2 create mode 100644 auth1/kerberos/krb5.conf.j2 diff --git a/auth1/kerberos/kadm5.acl b/auth1/kerberos/kadm5.acl new file mode 100644 index 0000000..76df603 --- /dev/null +++ b/auth1/kerberos/kadm5.acl @@ -0,0 +1,6 @@ +# This file Is the access control list for krb5 administration. +# When this file is edited run service krb5-admin-server restart to activate +# One common way to set up Kerberos administration is to allow any principal +# ending in /admin is given full administrative rights. +# To enable this, uncomment the following line: +*/admin * diff --git a/auth1/kerberos/kdc.conf.j2 b/auth1/kerberos/kdc.conf.j2 new file mode 100644 index 0000000..0c4a444 --- /dev/null +++ b/auth1/kerberos/kdc.conf.j2 @@ -0,0 +1,19 @@ +[kdcdefaults] + kdc_ports = 88 + +[realms] + CSCLUB.INTERNAL = { + database_name = /var/lib/krb5kdc/principal + admin_keytab = FILE:/etc/krb5kdc/kadm5.keytab + acl_file = /etc/krb5kdc/kadm5.acl + key_stash_file = /etc/krb5kdc/stash + kdc_ports = 88 + max_life = 10h 0m 0s + max_renewable_life = 7d 0h 0m 0s + master_key_type = des3-hmac-sha1 + supported_enctypes = aes256-cts:normal arcfour-hmac:normal des3-hmac-sha1:normal des3-cbc-sha1:normal des-cbc-crc:normal des:normal des:v4 des:norealm des:onlyrealm des:afs3 + default_principal_flags = +preauth + iprop_enable = true + iprop_slave_poll = 2m + iprop_port = 750 + } diff --git a/auth1/kerberos/krb5.conf.j2 b/auth1/kerberos/krb5.conf.j2 new file mode 100644 index 0000000..4a07911 --- /dev/null +++ b/auth1/kerberos/krb5.conf.j2 @@ -0,0 +1,46 @@ +[libdefaults] + default_realm = {{ krb_realm }} + +# The following krb5.conf variables are only for MIT Kerberos. + kdc_timesync = 1 + ccache_type = 4 + forwardable = true + proxiable = true + + dns_lookup_kdc = false + dns_lookup_realm = false + + # For NFS, apparently + allow_weak_crypto = true + +# The following encryption type specification will be used by MIT Kerberos +# if uncommented. In general, the defaults in the MIT Kerberos code are +# correct and overriding these specifications only serves to disable new +# encryption types as they are added, creating interoperability problems. +# +# The only time when you might need to uncomment these lines and change +# the enctypes is if you have local software that will break on ticket +# caches containing ticket encryption types it doesn't know about (such as +# old versions of Sun Java). + +# default_tgs_enctypes = des3-hmac-sha1 +# default_tkt_enctypes = des3-hmac-sha1 +# permitted_enctypes = des3-hmac-sha1 + +# The following libdefaults parameters are only for Heimdal Kerberos. + fcc-mit-ticketflags = true + +[realms] + {{ krb_realm }} = { + kdc = kdc1.{{ base_domain }} + admin_server = kadmin.{{ base_domain }} + } + +[domain_realm] + .csclub.internal = {{ krb_realm }} + csclub.internal = {{ krb_realm }} + +[logging] + kdc = SYSLOG:INFO:AUTH + admin_server = SYSLOG:INFO:AUTH + default = SYSLOG:INFO:AUTH diff --git a/auth1/ldap/data.ldif.j2 b/auth1/ldap/data.ldif.j2 index f4ce33c..34be863 100644 --- a/auth1/ldap/data.ldif.j2 +++ b/auth1/ldap/data.ldif.j2 @@ -45,17 +45,14 @@ sudoHost: ALL sudoCommand: ALL sudoRunAsUser: ALL -# The password for each user is slapd. -# The hashes were generated with slappasswd. - dn: uid=ctdalek,ou=People,{{ ldap_base }} cn: Calum Dalek -userPassword: {SSHA}oaQvmex/jH2MeBsmxZ7YVyaKcC7zYwDK +userPassword: {SASL}ctdalek@{{ krb_realm }} loginShell: /bin/bash -homeDirectory: /home/ctdalek -uidNumber: 10101 +homeDirectory: /users/ctdalek uid: ctdalek -gidNumber: 10101 +uidNumber: 20001 +gidNumber: 20001 objectClass: top objectClass: account objectClass: posixAccount @@ -69,4 +66,27 @@ objectClass: top objectClass: group objectClass: posixGroup cn: ctdalek -gidNumber: 10101 +gidNumber: 20001 + +dn: uid=regular1,ou=People,{{ ldap_base }} +cn: Regular One +userPassword: {SASL}regular1@{{ krb_realm }} +loginShell: /bin/bash +homeDirectory: /users/regular1 +uid: regular1 +uidNumber: 20002 +gidNumber: 20002 +objectClass: top +objectClass: account +objectClass: posixAccount +objectClass: shadowAccount +objectClass: member +program: MAT/Mathematics Computer Science +term: s2021 + +dn: cn=regular1,ou=Group,{{ ldap_base }} +objectClass: top +objectClass: group +objectClass: posixGroup +cn: regular1 +gidNumber: 20002 diff --git a/auth1/ldap/slapd.conf.j2 b/auth1/ldap/slapd.conf.j2 index 360f354..52cc2c1 100644 --- a/auth1/ldap/slapd.conf.j2 +++ b/auth1/ldap/slapd.conf.j2 @@ -45,15 +45,15 @@ timelimit unlimited localssf 128 # map kerberos users to ldap users -# sasl-realm CSCLUB.UWATERLOO.CA -# authz-regexp "uid=([^/=]*),cn=CSCLUB.UWATERLOO.CA,cn=GSSAPI,cn=auth" -# "uid=$1,ou=people,{{ ldap_base }}" -# authz-regexp "uid=ceod/admin,cn=CSCLUB.UWATERLOO.CA,cn=GSSAPI,cn=auth" -# "cn=ceod,{{ ldap_base }}" -# authz-regexp "uid=ldap/auth2.csclub.uwaterloo.ca,cn=CSCLUB.UWATERLOO.CA,cn=GSSAPI,cn=auth" -# "cn=ldap-slave,{{ ldap_base }}" -# authz-regexp "uid=renewal/([^/=]*).csclub.uwaterloo.ca,cn=CSCLUB.UWATERLOO.CA,cn=GSSAPI,cn=auth" -# "cn=renewal,{{ ldap_base }}" +sasl-realm CSCLUB.INTERNAL +authz-regexp "uid=([^/=]*),cn=CSCLUB.INTERNAL,cn=GSSAPI,cn=auth" + "uid=$1,ou=people,dc=csclub,dc=internal" +authz-regexp "uid=ceod/admin,cn=CSCLUB.INTERNAL,cn=GSSAPI,cn=auth" + "cn=ceod,dc=csclub,dc=internal" +authz-regexp "uid=ldap/auth2.csclub.internal,cn=CSCLUB.INTERNAL,cn=GSSAPI,cn=auth" + "cn=ldap-slave,dc=csclub,dc=internal" +authz-regexp "uid=renewal/([^/=]*).csclub.internal,cn=CSCLUB.INTERNAL,cn=GSSAPI,cn=auth" + "cn=renewal,dc=csclub,dc=internal" # map sasl external users to ldap users #authz-regexp "cn=ldap[0-9].csclub.uwaterloo.ca,ou=computer science club,o=university of waterloo,st=ontario,c=ca" diff --git a/auth1/main.yml b/auth1/main.yml index 1447270..c2bf563 100644 --- a/auth1/main.yml +++ b/auth1/main.yml @@ -2,6 +2,7 @@ - hosts: auth1 vars: ldap_base: "{{ base_domain.split('.') | map('regex_replace', '^(.*)$', 'dc=\\1') | join(',') }}" + krb_realm: "{{ base_domain.upper() }}" tasks: - name: setup networking import_role: @@ -9,6 +10,7 @@ vars: ipv4_addr: "{{ auth1_ipv4_addr }}" - meta: flush_handlers + # LDAP - name: install LDAP packages apt: name: "{{ item }}" @@ -17,7 +19,6 @@ - ldap-utils - ldapvi - libnss-ldapd - - libpam-ldapd - nscd - sudo-ldap - name: copy slapd.conf @@ -50,14 +51,6 @@ - rfc2307bis.schema - csc.schema notify: restart slapd - - name: copy DB_CONFIG - copy: - remote_src: yes - src: /usr/share/slapd/DB_CONFIG - dest: /var/lib/ldap/DB_CONFIG - owner: openldap - group: openldap - notify: restart slapd - name: make sure slapd is running systemd: name: slapd @@ -75,6 +68,14 @@ shell: rm /var/lib/ldap/* when: cn_config_cmd.rc == 0 notify: restart slapd + - name: copy DB_CONFIG + copy: + remote_src: yes + src: /usr/share/slapd/DB_CONFIG + dest: /var/lib/ldap/DB_CONFIG + owner: openldap + group: openldap + notify: restart slapd - name: copy ldap.conf template: src: ldap/ldap.conf.j2 @@ -103,8 +104,116 @@ src: ldap/data.ldif.j2 dest: /etc/ldap/data.ldif - name: load LDIF data - command: ldapadd -c -f /etc/ldap/data.ldif -Y EXTERNAL -H ldapi:/// - ignore_errors: yes + shell: ldapadd -c -f /etc/ldap/data.ldif -Y EXTERNAL -H ldapi:/// || true + # Kerberos + - name: install Kerberos packages + apt: + name: "{{ item }}" + loop: + - krb5-admin-server + - krb5-user + - libpam-krb5 + - libsasl2-modules-gssapi-mit + - sasl2-bin + - name: override systemd services for Kerberos + import_role: + name: ../roles/systemd_workarounds + vars: + services: [ "krb5-admin-server", "krb5-kdc" ] + - name: copy krb5.conf + template: + src: kerberos/krb5.conf.j2 + dest: /etc/krb5.conf + notify: + - restart kadmin + - name: copy kdc.conf + template: + src: kerberos/kdc.conf.j2 + dest: /etc/krb5kdc/kdc.conf + notify: + - restart kdc + - name: copy kadm5.acl + copy: + src: kerberos/kadm5.acl + dest: /etc/krb5kdc/kadm5.acl + notify: + - restart kdc + - name: create new realm + command: + cmd: krb5_newrealm + # This is the KDC database master key + stdin: | + krb5 + krb5 + creates: /var/lib/krb5kdc/principal + - meta: flush_handlers + - name: add sysadmin principal + command: + cmd: kadmin.local + stdin: | + addprinc sysadmin/admin + krb5 + krb5 + - name: add user principals + command: + cmd: kadmin.local + stdin: | + addprinc {{ item }} + krb5 + krb5 + loop: + - ctdalek + - regular1 + # TODO: add more hosts + - name: add host principals + command: + cmd: kadmin.local + stdin: | + addprinc -randkey host/auth1.{{ base_domain }} + addprinc -randkey ldap/auth1.{{ base_domain }} + # TODO: create an Ansible role for this + - name: copy keytab to host + command: + cmd: kadmin.local + stdin: | + ktadd host/auth1.{{ base_domain }} + ktadd ldap/auth1.{{ base_domain }} + - name: create keytab group + group: + name: keytab + - name: allow users in keytab group to read keytab + file: + path: /etc/krb5.keytab + group: keytab + mode: 0640 + - name: add openldap user to necessary groups + user: + name: openldap + groups: + - keytab + - sasl + notify: + - restart slapd + - name: create /usr/lib/sasl2/slapd.conf + copy: + content: | + mech_list: plain login gssapi external + pwcheck_method: saslauthd + dest: /usr/lib/sasl2/slapd.conf + notify: + - restart slapd + - name: add config for saslauthd + replace: + path: /etc/default/saslauthd + regexp: "^{{ item.key }}=.*$" + replace: "{{ item.key }}={{ item.value }}" + loop: + - key: START + value: 'yes' + - key: MECHANISMS + value: '"kerberos5"' + notify: + - restart saslauthd handlers: - name: restart slapd systemd: @@ -118,3 +227,15 @@ systemd: name: nscd state: restarted + - name: restart kadmin + systemd: + name: krb5-admin-server + state: restarted + - name: restart kdc + systemd: + name: krb5-kdc + state: restarted + - name: restart saslauthd + systemd: + name: saslauthd + state: restarted diff --git a/dns/main.yml b/dns/main.yml index 225f7ce..34da500 100644 --- a/dns/main.yml +++ b/dns/main.yml @@ -16,6 +16,7 @@ copy: content: | {{ mail_ipv4_addr }} mail.{{ base_domain }} + {{ auth1_ipv4_addr }} auth1.{{ base_domain }} dest: /etc/dnsmasq_hosts notify: restart dnsmasq - name: add dnsmasq config diff --git a/dns/templates/dnsmasq.conf.j2 b/dns/templates/dnsmasq.conf.j2 index fac609d..fb6031a 100644 --- a/dns/templates/dnsmasq.conf.j2 +++ b/dns/templates/dnsmasq.conf.j2 @@ -9,3 +9,6 @@ cname=mailman.{{ base_domain }},mail.{{ base_domain }} mx-host={{ base_domain }},mail.{{ base_domain }},50 address=/coffee.{{ base_domain }}/{{ coffee_ipv4_addr }} address=/auth1.{{ base_domain }}/{{ auth1_ipv4_addr }} +cname=ldap1.{{ base_domain }},auth1.{{ base_domain }} +cname=kdc1.{{ base_domain }},auth1.{{ base_domain }} +cname=kadmin.{{ base_domain }},auth1.{{ base_domain }} diff --git a/roles/systemd_workarounds/tasks/main.yml b/roles/systemd_workarounds/tasks/main.yml index 59c88d9..cd780bc 100644 --- a/roles/systemd_workarounds/tasks/main.yml +++ b/roles/systemd_workarounds/tasks/main.yml @@ -14,6 +14,9 @@ ProtectHome=false ProtectControlGroups=false ProtectKernelModules=false + InaccessibleDirectories= + ReadOnlyDirectories= + ReadWriteDirectories= dest: "/etc/systemd/system/{{ item }}.service.d/override.conf" loop: "{{ services }}" register: service_overrides