# Deploying Squid proxy on head nodes

Squid is a proxy server that caches content to reduce bandwidth and load web pages more quickly. This section describes how to set up Squid as a proxy for HTTP, HTTPS, and FTP protocol, as well as authentication and restricting access.

Prerequisites:

  • The Keepalived service must be available

To deploy Squid proxy server on head nodes:

  1. Install the Squid package on all head nodes
$ yum install squid
  1. Edit /etc/squid/squid.conf configuration file
# Recommended minimum configuration:

# Example rule allowing access from your local networks.

# Adapt to list your (internal) IP networks from where browsing

# should be allowed

#acl localnet src 172.0.0.0/8   # RFC1918 possible internal network

acl localnet src {{ localnet }}        # RFC1918 possible internal network

#acl localnet src 192.168.0.0/16        # RFC1918 possible internal network

acl localnet src fc00::/7       # RFC 4193 local private network range

acl localnet src fe80::/10      # RFC 4291 link-local (directly plugged) machines

 acl SSL_ports port 443

acl Safe_ports port 210         # wais

acl Safe_ports port 1025-65535  # unregistered ports

acl Safe_ports port 280         # http-mgmt

acl Safe_ports port 488         # gss-http

acl Safe_ports port 591         # filemaker

acl Safe_ports port 777         # multiling http

acl CONNECT method CONNECT

# Recommended minimum Access Permission configuration:

#

# Only allow cachemgr access from localhost

http_access allow localhost manager

http_access deny manager

# Deny requests to certain unsafe ports

http_access deny !Safe_ports

# Deny CONNECT to other than secure SSL ports

http_access deny CONNECT !SSL_ports

# We strongly recommend the following be uncommented to protect innocent

# web applications running on the proxy server who think the only

# one who can access services on "localhost" is a local user

#http_access deny to_localhost

#

# INSERT YOUR OWN RULE(S) HERE TO ALLOW ACCESS FROM YOUR CLIENTS

#

# Example rule allowing access from your local networks.

# Adapt localnet in the ACL section to list your (internal) IP networks

# from where browsing should be allowed

http_access allow localnet

http_access allow localhost

# And finally deny all other access to this proxy

cache_peer {{ corporate_proxy }} parent {{ corporate_proxy_port }} 0 no-query default

acl all src all

http_access allow localhost

never_direct allow all

# Squid normally listens to port 3128

http_port {{ squid_port }}

# Uncomment the line below to enable disk caching - path format is /cygdrive/<full path to cache folder>, i.e.

#cache_dir aufs /cygdrive/d/squid/cache 3000 16 256

# Leave coredumps in the first cache dir

coredump_dir /var/cache/squid

# Add any of your own refresh_pattern entries above these.

refresh_pattern ^ftp:           1440    20%     10080

refresh_pattern ^gopher:        1440    0%      1440

refresh_pattern -i (/cgi-bin/|\?) 0     0%      0

refresh_pattern .               0       20%     4320

dns_nameservers {{ master_dns }} {{ slave1_dns }} {{ slave2_dns }}

max_filedescriptors 3200

==========

acl Safe_ports port 1025-65535  $ unregistered ports

acl Safe_ports port 280         # http-mgmt

acl Safe_ports port 488         # gss-http

acl Safe_ports port 591         # filemaker

acl Safe_ports port 777         # multiling http

acl CONNECT method CONNECT

# Recommended minimum Access Permission configuration:

# Only allow cachemgr access from localhost

http_access allow localhost manager

http_access deny manager

# Deny requests to certain unsafe ports

http_access deny !Safe_ports

# Deny CONNECT to other than secure SSL ports

http_access deny CONNECT !SSL_ports

# We strongly recommend the following be uncommented to protect innocent

# web applications running on the proxy server who think the only

# one who can access services on "localhost" is a local user

#http_access deny to_localhost

# INSERT YOUR OWN RULE(S) HERE TO ALLOW ACCESS FROM YOUR CLIENTS

# Example rule allowing access from your local networks.

# Adapt localnet in the ACL section to list your (internal) IP networks

# from where browsing should be allowed

http_access allow localnet

http_access allow localhost

# And finally deny all other access to this proxy

cache_peer {{ corporate_proxy }} parent {{ corporate_proxy_port }} 0 no-query default

acl all src all

http_access allow localhost

never_direct allow all

# Squid normally listens to port 3128

http_port {{ squid_port }}

# Uncomment the line below to enable disk caching - path format is /cygdrive/<full path to cache folder>, i.e.

#cache_dir aufs /cygdrive/d/squid/cache 3000 16 256

# Leave coredumps in the first cache dir

coredump_dir /var/cache/squid

# Add any of your own refresh_pattern entries above these.

refresh_pattern ^ftp:           1440    20%     10080

refresh_pattern ^gopher:        1440    0%      1440

refresh_pattern -i (/cgi-bin/|\?) 0     0%      0

refresh_pattern .               0       20%     4320

dns_nameservers {{ master_dns }} {{ slave1_dns }} {{ slave2_dns }}

max_filedescriptors 3200
  1. Enable and start the Squid service.
$ systemctl enable --now squid
  1. Open port 3128 in the firewall.
$ firewall-cmd --permanent --add-port=3128/tcp
$ firewall-cmd --reload
  1. Edit /etc/environment.conf configuration file.
FTP_PROXY=http://{{ squid VIP }}:{{ squid_port }}

https_proxy=http://{{ squid VIP }}:{{ squid_port }}

http_proxy=http://{{ squid VIP }}:{{ squid_port }}

no_proxy=localhost,127.0.0.1,{{ squid VIP }}

HTTPS_PROXY=http://{{ squid VIP }}:{{ squid_port }}

ftp_proxy=http://{{ squid VIP }}:{{ squid_port }}

  1. Edit /etc/keepalived/keepalived.conf configuration file.
vrrp_script chk_squid_service {

script "/usr/sbin/squid -k check"

interval 3

}

vrrp_instance proxy_ip1 {

state MASTER

interface {{ VIP_Interface }}

virtual_router_id 1

priority 255

virtual_ipaddress {

  {{ VIP }}/{{ VIP_Prefix }} dev {{ VIP_Interface }} label {{ VIP_Interface }}:1

}

track_script {

  chk_squid_service

}

}
  1. Restart Keepalived service.

  2. Restart Squid service.

Configuring the cluster-wide proxy during installation

Most production environments deny direct access to the Internet and instead access the available HTTP or HTTPS proxy. To use a proxy while configuring a new RHOCP cluster, the proxy settings of that proxy must be configured in the install-config.yaml file.

NOTE

For bare metal installations, if the node IP addresses are not assigned from the range specified in the networking.machineNetwork[].cidr field in the install-config.yaml file, they must be added in the proxy.noProxy field.

Prerequisites:

  • An existing install-config.yaml file must be available
  • If the cluster requires access to certain sites, review those sites and determine whether any of them need to bypass the proxy. By default, all cluster egress traffic is proxied, including calls to hosting cloud provider APIs. These sites must be added to the spec.noProxyfield of the proxy object to bypass the proxy, if necessary

To configure the proxy settings of a new RHOCP cluster:

  • Edit the install-config.yaml file and add the following details:

  • baseDomain: Base domain of the DNS that hosts RHOCP.

  • name: Name of the RHOCP cluster. It is same as the new domain created in DNS.

  • replicas: Update this field to reflect the corresponding number of master or worker instances required for the RHOCP cluster as per the installation environment requirements. It is recommended to have a minimum of three master nodes and two worker nodes per RHOCP cluster.

  • clusterNetworks: This field is pre-populated by Red Hat. Update this field only if a custom cluster network is needed.

  • pullSecret: Update this field with the pull secret for the Red Hat account. Login to Red Hat account using the following link and retrieve the pull secret:

https://cloud.redhat.com/openshift/install/metal/user-provisioned (opens new window)

  • sshKey: Update this field with the sshKey of the installer VM and copy the SSH key in install-config.yaml file. Generate the SSH key with the following command:
$ ssh-keygen

The following install-config.yaml file is an example with the path /opt/NGS-OpenShift/playbooks/roles/generate_ignition_files/ignitions/install-config.yml that can be used to update the fields to suit your installation environment:

apiVersion: v1

baseDomain: < name of the base domain >
proxy:

  httpProxy: http://172.28.201.200:3128/

  httpsProxy: http://172.28.201.200:3128/

  noProxy: ".apps.ocp.isv.local,.cluster.local,.hp.com,.hpcloud.net,.hpecorp.net,.localdomain.com,.svc,10.0.0.0/16,10.0.0.1,10.0.0.2,10.0.0.3,10.1.0.0/16,12.128.0.0/14,127.0.0.1,16.110.135.51,16.110.135.52,172.17.0.0/16,172.28.230.0/24,172.28.230.100,172.28.230.101,172.28.230.102,172.28.230.103,172.28.230.105,172.28.230.106,172.28.230.107,172.28.230.108,172.28.230.109,172.28.230.110,172.28.230.111,172.28.230.112,172.28.230.113,172.28.230.114,172.28.230.115,172.30.0.0/16,api,api-int,api-int.ocp.isv.local,api.ocp.isv.local,bootstrap,bootstrap.ocp.isv.local,haproxy.ocp.isv.local,isv.local,localaddress,localhost,master1,master1.ocp.isv.local,master2,master2.ocp.isv.local,master3,master3.ocp.isv.local,resolver.hpecorp.net,worker1,worker1.ocp.isv.local,worker2,worker2.ocp.isv.local,worker3,worker3.ocp.isv.local"
compute:

- hyperthreading : Enabled

name : worker

replicas : 2

controlPlane :

hyperthreading : Enabled

name : master

replicas : 3

metadata :

name : < name of the cluster, same as the new domain under the base domain created >

networking :

clusterNetworks :

- cidr : 12.128.0.0/14

hostPrefix : 23

networkType : OpenShiftSDN

serviceNetwork :

- 172.30.0.0/16

platform :

none : {}

pullSecret: 'pull secret provided as per the Red Hat account'

sshKey: 'ssh key of the installer VM'

NOTE

The ignition files have a time-out period of 24 hours, and it is critical that the clusters are created within 24 hours after generating the ignition files. If the time-out period crosses 24 hours, clean up the files from the directory where the ignition files were saved to regenerate the ignition files.