Ansible – KeepAliveD

Ansible – KeepAliveD

In this post I am creating a fictitious tenant using a vars file which looks like below.

/tenant_1.yml
---
tenant_name: tenant_1
config_forward_rules_allow_spec: false

tenant_subnets:
  - { tenant_subnet: '192.168.70.0/24' }  # Web
  - { tenant_subnet: '192.168.71.0/24' }  # App
  - { tenant_subnet: '192.168.72.0/24' }  # DB

tenant_vips:
  - 10.10.10.100
  - 10.10.10.101
  - 10.10.10.102
  - 10.10.10.103
  - 10.10.10.104

# Applied on router(s) forward rules
forward_rules_allow_spec:
#  - { protocol: 'tcp', port: '8080', source: '0.0.0.0/0', destination: '192.168.70.0/24' }
#  - { protocol: 'tcp', port: '22', source: '0.0.0.0/0', destination: '192.168.71.0/24' }
#  - { protocol: 'tcp', port: '22', source: '0.0.0.0/0', destination: '192.168.72.0/24' }

forward_rules_allow_gen:
  - { source: '192.168.70.0/24', destination: '0.0.0.0/0' }
  - { source: '192.168.71.0/24', destination: '0.0.0.0/0' }
  - { source: '192.168.72.0/24', destination: '0.0.0.0/0' }

forward_rules_out_drop:
  - { source: '192.168.70.0/24', destination: '192.168.71.0/24' }
  - { source: '192.168.70.0/24', destination: '192.168.72.0/24' }
  - { source: '192.168.71.0/24', destination: '192.168.70.0/24' }
  - { source: '192.168.71.0/24', destination: '192.168.72.0/24' }
  - { source: '192.168.72.0/24', destination: '192.168.70.0/24' }
  - { source: '192.168.72.0/24', destination: '192.168.71.0/24' }

backend_rules:

# Load Balancer Setup

lb_details:
  - { name: 'web', protocol: 'tcp', listen_port: '80', tenant_vip: '10.10.10.100' } 
  - { name: 'db', protocol: 'tcp', listen_port: '3306', tenant_vip: '10.10.10.100' }

lb_defs:
  - { lb_def_name: 'web', protocol: 'tcp', listen_port: '80', tenant_vip: '10.10.10.100', lb_group: 'web', server: 'ans-cloud-web01', backend_port: '80' }
  - { lb_def_name: 'web', protocol: 'tcp', listen_port: '80', tenant_vip: '10.10.10.100', lb_group: 'web', server: 'ans-cloud-web02', backend_port: '80' }
  - { lb_def_name: 'web', protocol: 'tcp', listen_port: '80', tenant_vip: '10.10.10.100', lb_group: 'web', server: 'ans-cloud-web03', backend_port: '80' }
  - { lb_def_name: 'db', protocol: 'tcp', listen_port: '3306', tenant_vip: '10.10.10.100', lb_group: 'db', server: 'ans-cloud-db01', backend_port: '3306' }
  - { lb_def_name: 'db', protocol: 'tcp', listen_port: '3306', tenant_vip: '10.10.10.100', lb_group: 'db', server: 'ans-cloud-db02', backend_port: '3306' }

And the goal of this is to dynamically create a KeepAliveD Failover configuration between two Ubuntu servers functioning as routers and load balancers.
I will be using the following playbook to generate the KeepAliveD configuration.

---
- hosts: all
  vars_files:
    - ../vars/tenant_1.yml
  tasks:
  - name: tenant_configs | config | setting up keepalived vips
    template: src=../templates/etc/keepalived/keepalived.conf.j2 dest=/etc/keepalived/keepalived.conf owner=root group=root mode=0644
    run_once: true
    notify: restart keepalived
  handlers:
  - name: restart keepalived
    service: name=keepalived state=restarted

The playbook will use the following template to actually generate the keepalived.conf file.

# {{ ansible_managed }}

#### Below is for managing the whole stack involved in Quagga HA
vrrp_script chk_zebra {
   script "killall -0 zebra"   # verify the pid existance
   interval 2                    # check every 2 seconds
   weight 2                      # add 2 points of prio if OK
}

vrrp_instance Quagga {
  state MASTER
  interface {{ keepalived_vip_int }}
  virtual_router_id {{ keepalived_router_id }}
  priority {{ keepalived_router_pri }}
  advert_int 1
  virtual_ipaddress {
    {{ keepalived_vip }}
  }
  virtual_ipaddress_excluded {
{% for vip in tenant_vips %}
          {{ vip }}
{% endfor %}
  }
  notify_master {{ notify_master_script }}
  notify_backup {{ notify_backup_script }}
}

And what you end up with is below

#### Below is for managing the whole stack involved in Quagga HA
vrrp_script chk_zebra {
   script "killall -0 zebra"   # verify the pid existance
   interval 2                    # check every 2 seconds
   weight 2                      # add 2 points of prio if OK
}

vrrp_instance Quagga {
  state MASTER
  interface eth0
  virtual_router_id 23
  priority 101
  advert_int 1
  virtual_ipaddress {
    10.10.10.4
  }
  virtual_ipaddress_excluded {
          10.10.10.100
          10.10.10.101
          10.10.10.102
          10.10.10.103
          10.10.10.104
  }
  notify_master /opt/scripts/master.sh
  notify_backup /opt/scripts/backup.sh
}

And there you have it. You can now build out your KeepAliveD configurations.

Below is the actual complete group_vars file. Which will be use for this post and future posts based on this same scenario.

Enjoy!

Leave a Reply

Your email address will not be published. Required fields are marked *

*