# Installing and configuring master/child Bind DNS on head nodes

Domain Name System (DNS) is a distributed database system that associates hostnames with their respective IP addresses. Berkeley Internet Name Domain (BIND) comprises a set of DNS-related programs. It is also known as the service named in Red Hat Enterprise Linux. The /etc/named.conf is the main configuration file in the BIND configuration. This section focuses on installing, configuring, and managing BIND on the DNS server for all head nodes to create master and child configuration.

Prerequisites:

  • All hosts must be running RHEL 8.6 OS.

  • Red Hat subscription must be active.

  • DNS server must be configured with the following settings:

TABLE 9 DNS server settings

DNS server settings Description
isv.local Zone (Domain name)
172.28.230.11 IP address of master DNS node
172.28.230.12 IP address of the 1st child DNS node
172.28.230.13 IP address of the 2nd child DNS node

Installing and configuring Bind DNS service on master nodes

To install and configure Bind DNS service on master nodes:

  1. Install Bind DNS service on the master node with the following command:
$ yum -y install bind bind-utils vim
  1. Configure BIND DNS on the master node.

  2. Edit /etc/named.conf configuration file.

Options {

listen-on port 53 { 127.0.0.1; 172.28.230.11; }:|

llsten-on-v6 port 53 { ::1; };

directory  "/var/named";

dump-file "/var/named/data/cache_dump.db";

statistics-file "/var/named/data/named_stats.txt";

memstatistics-file "/var/named/data/named_mem_stats.txt";

secroots-file "/var/named/data/named.secroots";

recursing-file "/var/named/data/named.recursing";

allow-query  { localhost; 172.0.0.0/24; };

allow-transfer { localhost; 172-28.230.12; 172.28.230.13; };

recursion no;

dnssec-enable yes;

dnssec-validation yes;

managed-keys-directory "/var/named/dynamic ";

pid:-file "/run/named/named.pid";

session-keyfile "/run/named./session.key";

include" /etc/crypto-policies/back.-ends/blind.config";

};

logging {

channel default_debug {

file "data/named.run";

severity dynamic;

           };

);

zone "." IN {

type hint;

file "named.ca";

};

zone "isv.local" IN{

type master;

file "forward,isv";

allow-update { none; };

};

zone "230.28.172.in-addr.arpa" IN{

type master;

file "reverse. isv";

allow-update { none; };

};

include "/etc/named.rfc1912.zones";

include "/etc/named.root.key";
  1. Create zone files.

  2. After setting the files in the named.conf configuration file, create the zone files in the /var/named/ director directory and place all the records that you would wish to add such as A/AAAA, MX, PTR, and so on.

[root@kvm~] $ cat /var/named/forward.isv

$TTL 86400

@     IN SOA kvm1.isv.local. root.isv.local.(

2011071001; serial

300 ; refresh

1800; retry

604800; expire

86400 ) ; minimum

@      IN     NS      kvm1.isv.local.

@      IN     NS      kvm2.isv.local.

@      IN     NS      kvm3.isv.local.

@      IN      A       172.28.230.11

@      IN      A       172.28.230.12

@      IN      A       172.28.230.13

boot.ocp.isv.local.           IN     A     172.28.230.21

master0.ocp.isv.local.     IN     A     172.28.230.22

master1.ocp.isv.local.     IN     A     172.28.230.23

master2.ocp.isv.local.     IN     A     172.28.230.24

haproxy.ocp.isv.local.     IN     A     172.28.230.25

api.int.ocp.isv.local.        IN     A     172.28.230.25

api.ocp.isv.local.             IN     A     172.28.230.25

workerl.ocp.isv.local.     IN     A     172.28.230.26

worker2.ocp.isv.local.    IN     A     172.28.230.27

worker3.ocp.isv.local.    IN     A     172.28.230.28

* .apps.ocp.isv.local.      IN     A     172.28.230.25
  1. Create the corresponding reverse records for the same domain that were defined in the named.conf configuration file.
[root@kvm1 ~] $ cat /var/named/reverse.isv

$TTL 86400

@ IN SOA kvm1.isv.local.root.isv.local(

2011071001 ; serial

300 ; refresh

1800; retry

604800 ; expire

86400 ) ; minimum

@    IN    NS     kvml.isv.local.

@    IN    NS     kvm2.isv.local.

@    IN    NS     kvm3.isv.local.

@    IN    PTR    isv.local.

@    IN    A      172.28.230.11

4P   IN    A      172.28.230.12

11   IN    A      172.28.230.13

11   IN    PTR    kvm1.isv.local.

12   IN    PTR    kvm2.isv.local.

13   IN    PTR    kvm3.isv.local.

21   IN    PTR    boot.ocp.isv.local.

22   IN    PTR    master0.ocp.isv.local.

23   IN    PTR    master1.ocp.isv.local.

24   IN    PTR    master2.ocp.isv.local.

25   IN    PTR    haproxy.ocp.isv.local.

25   IN    PTR    api.int.ocp.isv.local.

25   IN    PTR    api.ocp.isv.local.

26   IN    PTR    worker1.ocp.isv.local.

27   IN    PTR    worker2.ocp.isv.local.

28   IN    PTR    worker3.ocp.isv.local.
  1. Modify DNS settings on the master server.

  2. Set the new DNS server as the default name server.

  3. Edit /etc/resolv.conf configuration file and add the following lines:

NOTE

Ensure that the IP address is replaced to match your environment.

[root@kvm1~] $ cat/etc/resolv.conf

search isv.local

nameserver 172.28.230.11
  1. Configure firewall to allow DNS service.
$ sudo firewall-cmd--add-service=dns--permanent

$ sudo firewall-cmd--reload

$ sudo named-checkconf

$ sudo systemctl start named

$ sudo systemctl enable named

The master BIND DNS server configuration is complete.

Installing Bind DNS service on child nodes

To install Bind DNS service on child nodes:

  1. On the both the child nodes with IP addresses as 172.28.230.12 and 172.28.230.13, install bind and bind-utils:
$ yum -y install bind bind-utils vim
  1. Configure the child nodes and edit /etc/named.conf configuration file.
options {

listen-on port 53 { 127.0.0.1; 172.28.230.12; 172.28.230.13; };

listen-on-v6 port 53 { ::1; };

directory " /var/named";

dump-file "/var/named/data/cache_dump.db";

statistics-file "/var/named/data/named_stats.txt";

memstatistics-file "/var/named/data/named_mem_stats.txt";

secroots-file "/var/named/data/named.secroots";

recursing-file "var/named/data/named.recursing";

allow-query { localhost; 172.0.0.0/24; };

recursion yes;

dnssec-enable yes;

dnssec-validation yes;

managed-keys-directory "/var/named/dynamic";

pid-file " /run/named/named.pid";

session-keyfile "/run/named/session.key";

include "/etc/crypto-policies/back-ends/bind_config";

};

logging {

channel default_debug {

file "data/named.run";

severity dynamic;

};

};

zone "." IN {

type hint;

file "named.ca";

};

zone "isv.local" IN{

type slave;

file "slaves/forward,isv";

master { 172.28.230.11; };

};

zone "230.28.172.in-addr.arpa" IN{

type slave;

file "slaves/reverse. isv";

master { 172.28.230.11; };

};

include "/etc/named.rfc1912.zones";

include "/etc/named.root.key" ;
  1. Modify DNS settings on the child server.

  2. Set the new DNS servers (both master and childs) as the default name servers.

  3. Edit /etc/resolv.conf configuration file and add the following lines:

[root.kvm2~]9$ cat/etc/resolv.conf

search isv.local

nameserver 172.28.230.11

nameserver 172.28.230.12

nameserver 172.28.230.13

NOTE

Ensure that the IP addresses are replaced to match your environment.

  1. Check the configurations and then start and enable BIND:
$ sudo named-checkconf

$ sudo systemctl start named

$ sudo systemctl enable named
  1. Verify that the /var/named/slaves directory includes the zone files that were transferred from the master node.
[root@kvm2~] $ ls -| /var/named/slaves/

-rw-r-r-1 named named 900 Jun 23 09:11 forward.isv

-rw-r-r-1 named named 1168 Jun 23 09:12 reverse.isv

[root@)kvm3~] $ls-| /var/named/slaves/

-rw-r-r-1 named named 900 Jun 23 09:11 forward.isv

-rw-r-r-1 named named 1168 Jun 23 09:12 reverse.isv

Testing master/childDNS configuration

To test the DNS resolution from the master/child DNS servers:

  1. On the master DNS server, run the following command:
  • Master DNS node:
[root@kvm1~] $ dig +short master0.ocp.isv.local

172.28.230.22

[root@kvm1~]$dig +short -x 172.28.230.22

master0.ocp.isv.local.
  1. On both the child DNS servers, run the following commands:
  • 1st Child DNS node:
[root@kvm2~] $ dig +short master0.ocp.isv.local

172.28.230.22

[root@kvm2~]#$dig +short -x 172.28.230.22

master0.ocp.isv.local.
  • 2nd Child DNS node:
[root@kvm3~] $ dig +short master0.ocp.isv.local

172.28.230.22

[root@kvm3~] $ dig +short -x 172.28.230.22

master0.ocp.isv.local.

Configuring load balancer on head nodes

The RHOCP 4.10 uses an external load balancer to communicate from outside the cluster with services running inside the cluster. This section assumes that there is a load balancer available within our deployment environment and is available for use. Our deployment environment is developed using HAProxy, an open-source solution with one virtual machine for load balancing functionality.

In a production environment, Hewlett Packard Enterprise recommends the use of enterprise load balancing such as F5 Networks Big-IP and its associated products.

To configure load balancer on all head nodes:

  1. Install HAProxy and keepalived service on all head nodes.
$ yum install haproxy

$ systemctl enable haproxy

$ systemctl start haproxy
  1. Edit /etc/haproxy/haproxy.conf configuration file on all head nodes.
frontend openshift-api-server

    bind *:6443

    default_backend openshift-api-server

    mode tcp

    option tcplog

backend openshift-api-server

    balance source

    mode tcp

server ocpboot1 boot.ocp.isv.local:6443 check

        server ocpmaster1 master0.ocp.isv.local:6443 check

        server ocpmaster2 master1.ocp.isv.local:6443 check

        server ocpmaster3 master2.ocp.isv.local:6443 check

frontend machine-config-server68

    bind *:22623

    default_backend machine-config-server

    mode tcp

    option tcplog

backend machine-config-server

    balance source

    mode tcp

server ocpboot1 boot.ocp.isv.local:22623 check

        server ocpmaster1 master0.ocp.isv.local:22623 check

        server ocpmaster2 master1.ocp.isv.local:22623 check

        server ocpmaster3 master2.ocp.isv.local:22623 check

frontend ingress-http

    bind *:80

    default_backend ingress-http

    mode tcp

    option tcplog

backend ingress-http

    balance source

    mode tcp

        server ocpworker1 worker1.ocp.isv.local:80 check

        server ocpworker2 worker2.ocp.isv.local:80 check

        server ocpworker3 worker3.ocp.isv.local:80 check

# if creating a cluster with only master nodes to begin with and later adding the worker nodes, master nodes should be added in this section instead of worker nodes. Once all the worker nodes are added into the cluster, this configuration needs to be updated with the worker nodes.

    # server ocpmaster01 master0.ocp.isv.local:80 check

    # server ocpmaster02 master1.ocp.isv.local:80 check

    # server ocpmaster03 master2.ocp.isv.local:80 check

frontend ingress-https

    bind *:443

    default_backend ingress-https

    mode tcp

    option tcplog

backend ingress-https

    balance source

    mode tcp

        server ocpworker1 worker1.ocp.isv.local:443 check

        server ocpworker2 worker2.ocp.isv.local:443 check

        server ocpworker3 worker3.ocp.isv.local:443 check

# if creating a cluster with only master nodes to begin with and later adding the worker nodes, master nodes should be added in this section instead of worker nodes. Once all the worker nodes are added into the cluster, this configuration needs to be updated with the worker nodes.

    # server ocpmaster01 master0.ocp.isv.local:443 check

    # server ocpmaster02 master1.ocp.isv.local:443 check

    # server ocpmaster03 master2.ocp.isv.local:443 check

NOTE

The load balancer configuration should contain values that are aligned to the installation environment.

Installing Keepalived service for HAProxy servers

The Keepalived service provides highly available capabilities on the load balancer for HAProxy server configuration.

To install the Keepalived service on all three head nodes:

  1. Install Keepalived and psmisc service.

NOTE

The psmisc service provides killall to check the HAProxy for VRRP.

$ yum install -y keepalived psmisc
  1. Determine the interface that can be used with the services. For example, the highlighted interface is used with the services on our load balancer for HAProxy servers.
[root@kvm3 slaves] $ ip a

1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000

    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00

    inet 127.0.0.1/8 scope host lo

       valid_lft forever preferred_lft forever

    inet6 ::1/128 scope host

       valid_lft forever preferred_lft forever

2: ens2: <BROADCAST,MULTICAST,SLAVE,UP,LOWER_UP> mtu 1500 qdisc mq master bond0 state UP group default qlen 1000

    link/ether 88:e9:a4:41:55:b8 brd ff:ff:ff:ff:ff:ff

3: ens3: <BROADCAST,MULTICAST,SLAVE,UP,LOWER_UP> mtu 1500 qdisc mq master bond0 state UP group default qlen 1000

    link/ether 88:e9:a4:41:55:b8 brd ff:ff:ff:ff:ff:ff permaddr 88:e9:a4:40:f6:d8

4: bridge0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP group default qlen 1000

    link/ether 88:e9:a4:41:55:b8 brd ff:ff:ff:ff:ff:ff

    inet6 fe80::8034:bdc:f7d0:b444/64 scope link noprefixroute

       valid_lft forever preferred_lft forever

5: bond0: <BROADCAST,MULTICAST,MASTER,UP,LOWER_UP> mtu 1500 qdisc noqueue master bridge0 state UP group default qlen 1000

    link/ether 88:e9:a4:41:55:b8 brd ff:ff:ff:ff:ff:ff

6: bridge0.230: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP group default qlen 1000

    link/ether 88:e9:a4:41:55:b8 brd ff:ff:ff:ff:ff:ff

    inet 172.28.230.13/24 brd 172.28.230.255 scope global noprefixroute bridge0.230

NOTE

This interface could be a dedicated interface or a shared interface. It needs to belong on the same broadcast domain as the VIP.

  1. Allow all connections from the interface for traffic use. Use the following IP addresses as interfaces for HAProxy:
$ iptables -A INPUT -s 172.28.230.11 -j ACCEPT

$ iptables -A INPUT -s 172.28.230.12 -j ACCEPT

$ iptables -A INPUT -s 172.28.230.13 -j ACCEPT
  1. Allocate an IP address from the existing HAproxy network for the floating IP address of the load balancers.

  2. Update the existing cluster DNS name to the newly assigned IP address.

  3. Generate a random external password for Keepalived AUTH_PASS.

$ uuidgen

f30fedc5-2b19-414f-b67e-f05742a82e78

Configuring Keepalived service for each node

To configure Keepalived service for each node:

  1. Edit /etc/keepalived/keepalived.conf configuration file on all three nodes for active/active VIP.
[root@kvm3 slaves]# vi /etc/keepalived/keepalived.conf

router_id ovp_vrrp

}

vrrp_script haproxy_check {

 script "killall -0 haproxy"

 interval 2

 weight 2

 }

vrrp_instance OCP_EXT {

 interface bridge0.230

virtual_router_id 51

priority 100

 state MASTER

 virtual_ipaddress {

 172.28.230.25 dev bridge0.230}

 track_script {

 haproxy_check

}

 authentication {

 auth_type PASS

 auth_pass 1cee4b6e-2cdc-48bf-83b2-01a96d1593e4

 }

 }

The keepalived configuration file consists of the following parts:

  • state MASTER: It is the primary HAproxy server.

  • priority line: It determines the priority of the server configuration. If a master server must be elected, the server with the highest priority is selected.

  • virtual_ipaddress: It denotes the IP address to be used for floating VIP and the local device to bind to 10.0.15.30 and enp1s0 interface.

  1. Start and enable the services on all head nodes.
$ systemctl enable keepalived; systemctl start keepalived

Testing HAProxy with Keepalived service

After successful deployment of Keepalived service on the head nodes, the HAProxy nodes route traffic via HAProxy when the VRRP VIP is available.

[root@kvm3 slaves] $ ip a

1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000

    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00

    inet 127.0.0.1/8 scope host lo

       valid_lft forever preferred_lft forever

    inet6 ::1/128 scope host

       valid_lft forever preferred_lft forever

2: ens2: <BROADCAST,MULTICAST,SLAVE,UP,LOWER_UP> mtu 1500 qdisc mq master bond0 state UP group default qlen 1000

    link/ether 88:e9:a4:41:55:b8 brd ff:ff:ff:ff:ff:ff

3: ens3: <BROADCAST,MULTICAST,SLAVE,UP,LOWER_UP> mtu 1500 qdisc mq master bond0 state UP group default qlen 1000

    link/ether 88:e9:a4:41:55:b8 brd ff:ff:ff:ff:ff:ff permaddr 88:e9:a4:40:f6:d8

4: bridge0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP group default qlen 1000

    link/ether 88:e9:a4:41:55:b8 brd ff:ff:ff:ff:ff:ff

    inet6 fe80::8034:bdc:f7d0:b444/64 scope link noprefixroute

       valid_lft forever preferred_lft forever

5: bond0: <BROADCAST,MULTICAST,MASTER,UP,LOWER_UP> mtu 1500 qdisc noqueue master bridge0 state UP group default qlen 1000

    link/ether 88:e9:a4:41:55:b8 brd ff:ff:ff:ff:ff:ff

6: bridge0.230: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP group default qlen 1000

    link/ether 88:e9:a4:41:55:b8 brd ff:ff:ff:ff:ff:ff

    inet 172.28.230.13/24 brd 172.28.230.255 scope global noprefixroute bridge0.230

       valid_lft forever preferred_lft forever

    inet 172.28.230.25/32 scope global bridge0.230

       valid_lft forever preferred_lft forever

    inet6 fe80::c48b:8f0f:9902:b9a1/64 scope link noprefixroute

       valid_lft forever preferred_lft forever