# Sample vars file
A sample vars.yml
file is provided named group_vars/all/vars.yml.sample
that you can use as a model for your own
vars file. To create a vars.yml
file, you create a new file called group_vars/all/vars.yml
and add entries based on
the descriptions in the preceding sections. A sample vars.yml
file is shown below for convenience.
---
#
# OpenShift 4.X artifacts
#
efk_channel: "4.2" # "preview" if OCP4.1, "4.2" if OCP 4.2 (use quotes)
local_home: "{{ lookup('env','HOME') }}" # Local user's HOME directory
install_dir: "{{ local_home }}/.ocp" # OCP installation dir
ocp_installer_path: '{{ local_home }}/kits/openshift-install' # Path to the OCP installer
ocp_oc_path: '{{ local_home }}/kits/oc' # Path to the oc client
ocp_kubectl_path: '{{ local_home }}/kits/kubectl' # Path to kubectl client
pull_secret: '{{ vault.pull_secret }}' # pull secret
#
# You need a VLAN with transparent access to Internet
#
vm_portgroup: hpeOpenshift # portgroup that the VMS connect to (must exist)
dhcp_subnet: 10.15.152.0/24 # subnet to use on the above VLAN (see your net admin)
gateway: '10.15.152.1' # gateway for the above subnet (see your net admin)
domain_name: hpecloud.org # you can keep this
#
# vcenter related settings
#
vcenter_hostname: vcentergen10.am2.cloudra.local # name of your vCenter server
vcenter_username: Administrator@vsphere.local # Admin user for your vCenter environment
vcenter_password: '{{ vault.vcenter_password }}' # Password for the above
vcenter_validate_certs: false # true not implemented/tested
vcenter_cluster: OCP # Name of your SimpliVity Cluster (must exist)
datacenter: DEVOPS # Name of your DATACENTER (must exist)
datastores: ['Openshift_HPE'] # where to store the VMs (datastore must exist), only one datastore supported
#datastore_size: 1024 # size in GiB of the VM datastore, only applies if the playbook creates the datastore
cluster_name: hpe # Name of the K8S Cluster. A VM folder with the same name is created if needed
ntp_servers: ['10.12.2.1'] # NTP servers in your environment
dns_servers: ['10.10.173.1','10.10.173.31'] # DNS servers in your environment
#
# folders, templates and OVAs, templates are created using the corresponding OVA if they cannot be found (and only if they cannot be found)
#
support_folder: 'hpeSupport' # Folder for non-OCP VMs and templates (created if needed)
master_ova_path: '{{ local_home }}/kits/rhcos.ova' # eg rhcos-4.1.0-x86_64-vmware.ova' (file is expected to be there)
worker_ova_path: '{{ master_ova_path }}' # Path to the OVA file used to create the VM template for OCP worker nodes
support_ova_path: '{{ local_home }}/kits/hpe-rhel760.ova' # Path to the OVA file used to create the VM template for support machines (LBs etc)
master_template: hpe-rhcos # VMware template name for OCP master nodes
worker_template: '{{ master_template }}' # VMware template name for OCP worker nodes (same as master nodes by default, ie RH CoreOS)
support_template: hpe-rhel760 # VMware template name for non OCP VMs (such as LBs etc)
#
# passwordless login
#
ssh_key: '{{ vault.ssh_key }}' # ssh public key for all VMs, strictly speaking this is public info and should not be in the vault
#
# Red Hat Network credentials
#
rhn_orgid: '{{ vault.rhn_orgid }}' # Organization ID associated with RHN account
rhn_key: '{{ vault.rhn_key }}' # Account Key associated with RHN account
rhn_user: '{{ vault.rhn_user }}' # Username associated with RHN account
rhn_pass: '{{ vault.rhn_pass }}' # Password associated with RHN account
#
# Load balancer
#
frontend_vm_portgroup: 'extVLAN2968' # Name of the portgroup connected to the access/public network
frontend_gateway: '10.15.156.1' # Access network gateway
loadbalancers:
apps:
vip: 10.15.156.9/24 # if omitted, defaults to the internal IP address of the first load balancer (ie no VIP, no HA)
backend:
vip: 10.15.152.9/24 # if ommited, defaults to the internal IP address of the first load balancer (ie no VIP, no HA)
interface: ens192 # name of the internal/backend interface (ens192 if using RHEL7.6 and using these playbooks)
vrrp_router_id: 51
frontend:
vip: 10.15.156.9/24 # if omitted, defaults to the external IP address of the first load balancer (ie no VIP, no HA)
interface: ens224 # name of the external/frontend interface (ens224 if RHEL7.6 and using these playbooks)
vrrp_router_id: 51
#
# Number of NFS Shares to create, you can use these shares to create Persistent Volumes
#
number_of_nfs_shares: 5 # The image registry requires one NFS share, shares are named /srv/share0, /srv/share1 etc..
#
# LDAP integration
#
ldap_bind_user_dn: "cn=adreader,cn=Users,dc=am2,dc=cloudra,dc=local" # DN of the user you use to bind with the LDAP service
ldap_bind_user_password: "{{ vault.ldap_bind_user_password }}" # password for the user above, the real password is encrypted in the vault file
#ldap_ca_file: 'path to your LDAP CA certificate in pem format' # a default ca file is provided in playbooks/ldap/files/ca.pem , it will likely not work in your environment
#ldap_cr_file: 'path to your LDAP Custom resource file' # a default cr file is provided in playbooks/ldap/vars/ldap_cr.yml, it will liklely not work in your environment
#
# OpenShift-Ansible Playbook
#
ocp_repo_directory: "{{ local_home }}/openshift-ansible" # OpenShift-Ansible repository directory
#
# backup related settings
#
backup_directory: "{{ local_home }}/backups" # will be created if needed
backup_artifacts: # additional items to save in a backup
- "{{ install_dir }}"
- ./group_vars/
- ./hosts
#
# VMWare CSI Storage plugin
#
#csi_datastore_name: "{{ datastores[0] }}" # name of the datastore which will hold the persistent volumes
#csi_storageclass_name: csivols # name of the storage class to create
#csi_datastore_size: 512 # size of the datastore in GiB
#
# Sysdig Integration
#
sysdig_access_key: "{{ vault.sysdig_access_key }}" # required. Your sysdig access key should be encrypted in vault.yml
sysdig_k8s_cluster_name: "{{ cluster_name }}" # allows you to view, scope, and segment metrics in the Sysdig Monitor UI by Kubernetes cluster.
#sysdig_tags: 'key1:value,key2:value' # comma separated lits of key:value pairs
#sysdig_collector: collector.sysdigcloud.com # optional, leave unchanged for SaaS Sysdig
#sysdig_collector_port: 6666 # optional, leave unchanged for SaaS Sysdig
#sysdig_ssl: true # optional, leave unchanged for SaaS Sysdig
#sysdig_ssl_verify_certificate: true # optional, leave unchanged for SaaS Sysdig
#sysdig_new_k8s: true # optional, allows kube state metrics to be automatically detected, monitored, and displayed in Sysdig Monitor.
#
# SimpliVity
#
#simplivity_appliances: # define if you want to enable SimpliVity specific capabilities
#- 10.10.173.109
#- 10.10.173.110
#- 10.10.173.111
#
# Proxy Configuration
#
#http_proxy: "http://web-proxy.hpecloud.org:8080"
#https_proxy: "http://web-proxy.hpecloud.org:8080"
#no_proxy: "localhost,.{{ domain_name }},{{ dhcp_subnet }},{{ vcenter_hostname }}"