Skip to content
This repository has been archived by the owner on Nov 20, 2023. It is now read-only.

gcp - cns support #232

Open
wants to merge 6 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
73 changes: 51 additions & 22 deletions docs/PROVISIONING_GCP.md
Original file line number Diff line number Diff line change
Expand Up @@ -102,7 +102,7 @@ hosting_infrastructure: gcp
dns_domain: $dns_domain
env_id: <a unique identifier for that gcp project>

casl_instances:
cloud_infrastructure:
region: <gcp region>
masters:
count: <# of masters>
Expand Down Expand Up @@ -146,26 +146,26 @@ For the host layout see the example and replace the suffixes with your `env_id`.
| hosting_infrastructure | no | | must be set to `gcp` |
| dns_domain | no | | the domain you want to use for this cluster, must be `$dns_domain` |
| env_id | no | | a unique cluster identifier for this gcp project |
| casl_instances.region | no | | the gcp region in which to install the cluster |
| casl_instances.image_name | yes | `rhel7` | image to use for all the VMs |
| casl_instances.masters.count | no | | number of masters |
| casl_instances.masters.flavor | yes | `n1-standard-2` | type of VM |
| casl_instances.masters.zones | no | | array of availability zones in which the masters will be equally spread |
| casl_instances.masters.name_prefix | yes | `master` | prefix of the master VM names |
| casl_instances.masters.preemptible | yes | `false` | whether should be preemptible, not recommeded. |
| casl_instances.masters.docker_volume_size | yes | `10` | size of the docker volume disk |
| casl_instances.appnodes.count | no | | number of appnodes |
| casl_instances.appnodes.flavor | yes | `n1-standard-2` | type of VM |
| casl_instances.appnodes.zones | no | | array of availability zones in which the appnodes will be equally spread |
| casl_instances.appnodes.name_prefix | yes | `node` | prefix of the appnode VM names |
| casl_instances.appnodes.preemptible | yes | `false` | whether should be preemptible, not recommeded. |
| casl_instances.appnodes.docker_volume_size | yes | `50` | size of the docker volume disk |
| casl_instances.inranodes.count | no | | number of inranodes |
| casl_instances.inranodes.flavor | yes | `n1-standard-2` | type of VM |
| casl_instances.inranodes.zones | no | | array of availability zones in which the inranodes will be equally spread |
| casl_instances.inranodes.name_prefix | yes | `inranodes` | prefix of the inranode VM names |
| casl_instances.inranodes.preemptible | yes | `false` | whether should be preemptible, not recommeded. |
| casl_instances.inranodes.docker_volume_size | yes | `20` | size of the docker volume disk |
| cloud_infrastructure.region | no | | the gcp region in which to install the cluster |
| cloud_infrastructure.image_name | yes | `rhel7` | image to use for all the VMs |
| cloud_infrastructure.masters.count | no | | number of masters |
| cloud_infrastructure.masters.flavor | yes | `n1-standard-2` | type of VM |
| cloud_infrastructure.masters.zones | no | | array of availability zones in which the masters will be equally spread |
| cloud_infrastructure.masters.name_prefix | yes | `master` | prefix of the master VM names |
| cloud_infrastructure.masters.preemptible | yes | `false` | whether should be preemptible, not recommeded. |
| cloud_infrastructure.masters.docker_volume_size | yes | `10` | size of the docker volume disk |
| cloud_infrastructure.appnodes.count | no | | number of appnodes |
| cloud_infrastructure.appnodes.flavor | yes | `n1-standard-2` | type of VM |
| cloud_infrastructure.appnodes.zones | no | | array of availability zones in which the appnodes will be equally spread |
| cloud_infrastructure.appnodes.name_prefix | yes | `node` | prefix of the appnode VM names |
| cloud_infrastructure.appnodes.preemptible | yes | `false` | whether should be preemptible, not recommeded. |
| cloud_infrastructure.appnodes.docker_volume_size | yes | `50` | size of the docker volume disk |
| cloud_infrastructure.inranodes.count | no | | number of inranodes |
| cloud_infrastructure.inranodes.flavor | yes | `n1-standard-2` | type of VM |
| cloud_infrastructure.inranodes.zones | no | | array of availability zones in which the inranodes will be equally spread |
| cloud_infrastructure.inranodes.name_prefix | yes | `inranodes` | prefix of the inranode VM names |
| cloud_infrastructure.inranodes.preemptible | yes | `false` | whether should be preemptible, not recommeded. |
| cloud_infrastructure.inranodes.docker_volume_size | yes | `20` | size of the docker volume disk |
| service_account_email | no | | service account to be used when connecting to the Google API |
| credentials_file | no | | path to the credential file in json format to be used for the connections to the Google |
| project_id | no | `20` | gcp project id to use |
Expand All @@ -186,7 +186,7 @@ Checkout the version of casl that you want to use and run the galaxy command:
```
git clone https://github.com/redhat-cop/casl-ansible
cd casl-ansible
ansible-galaxy install -r casl-requirements.yml -p roles
ansible-galaxy install -r casl-requirements.yml -p galaxy
```
at this point you can run the playbook this way:
```
Expand All @@ -206,6 +206,35 @@ If you need to execute this operation separately, you can run the following comm
ansible-playbook -i <inventory_dir> --private-key=<private key for $gcp_user> <casl_ansible_dir>/playbooks/openshift/gcp/configure-registry.yml
```

## CNS support
Support to automate the deployment of CNS is available.
you need to add an additional section to the `cloud_infrastructure` dictionary to define the cns nodes.
Here is an example of this section.

```
cnsnodes:
count: 3
flavor: n1-standard-8
zones:
- us-central1-a
- us-central1-b
- us-central1-f
name_prefix: cnsnode
preemptible: false
docker_volume_size: 20
cns_volume_size: 100
cns_disk_type: pd-standard
```

you also need to add the following variable:
```cns_node_glusterfs_volume: /dev/disk/by-id/google-cns-disk1```
Also, make sure that the glusterfs group vars contains the following:
```
glusterfs_devices:
- '/dev/disk/by-id/google-cns-disk1'
```
Finally you need to configure your glustefs settings as you would normally do.
Please refer to the [cns example](../inventory/sample.gcp-cns.example.com.d/inventory) for more details.

# Cleaning up
In order to clean up run this plyabook
Expand Down
27 changes: 27 additions & 0 deletions inventory/sample.byo.example.com.d/inventory/group_vars/OSEv3.yml
Original file line number Diff line number Diff line change
Expand Up @@ -51,3 +51,30 @@ openshift_node_open_ports:
port: "1936/tcp"
- service: "prometheus node exporter"
port: "9100/tcp"

openshift_node_groups:
- name: node-config-master
labels:
- 'node-role.kubernetes.io/master=true'
edits:
- key: kubeletArguments.kube-reserved
value: [ 'cpu={{ ansible_processor_vcpus * 50 }}m', 'memory={{ ansible_processor_vcpus * 50 }}M' ]
- key: kubeletArguments.system-reserved
value: [ 'cpu={{ ansible_processor_vcpus * 50 }}m', 'memory={{ ansible_processor_vcpus * 100 }}M' ]
- name: node-config-infra
labels:
- 'node-role.kubernetes.io/infra=true'
edits:
- key: kubeletArguments.kube-reserved
value: [ 'cpu={{ ansible_processor_vcpus * 50 }}m', 'memory={{ ansible_processor_vcpus * 50 }}M' ]
- key: kubeletArguments.system-reserved
value: [ 'cpu={{ ansible_processor_vcpus * 50 }}m', 'memory={{ ansible_processor_vcpus * 100 }}M' ]
- name: node-config-compute
labels:
- 'node-role.kubernetes.io/compute=true'
edits:
- key: kubeletArguments.kube-reserved
value: [ 'cpu={{ ansible_processor_vcpus * 50 }}m', 'memory={{ ansible_processor_vcpus * 50 }}M' ]
- key: kubeletArguments.system-reserved
value: [ 'cpu={{ ansible_processor_vcpus * 50 }}m', 'memory={{ ansible_processor_vcpus * 100 }}M' ]

5 changes: 5 additions & 0 deletions inventory/sample.gcp-cns.example.com.d/inventory/clouds.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
ansible:
use_hostnames: True
expand_hostvars: True
fail_on_errors: True

69 changes: 69 additions & 0 deletions inventory/sample.gcp-cns.example.com.d/inventory/gce.ini
Original file line number Diff line number Diff line change
@@ -0,0 +1,69 @@
# Copyright 2013 Google Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.

# The GCE inventory script has the following dependencies:
# 1. A valid Google Cloud Platform account with Google Compute Engine
# enabled. See https://cloud.google.com
# 2. An OAuth2 Service Account flow should be enabled. This will generate
# a private key file that the inventory script will use for API request
# authorization. See https://developers.google.com/accounts/docs/OAuth2
# 3. Convert the private key from PKCS12 to PEM format
# $ openssl pkcs12 -in pkey.pkcs12 -passin pass:notasecret \
# > -nodes -nocerts | openssl rsa -out pkey.pem
# 4. The libcloud (>=0.13.3) python libray. See http://libcloud.apache.org
#
# (See ansible/test/gce_tests.py comments for full install instructions)
#
# Author: Eric Johnson <erjohnso@google.com>

[gce]
# GCE Service Account configuration information can be stored in the
# libcloud 'secrets.py' file. Ideally, the 'secrets.py' file will already
# exist in your PYTHONPATH and be picked up automatically with an import
# statement in the inventory script. However, you can specify an absolute
# path to the secrets.py file with 'libcloud_secrets' parameter.
# This option will be deprecated in a future release.
libcloud_secrets =

# If you are not going to use a 'secrets.py' file, you can set the necessary
# authorization parameters here.
gce_service_account_email_address = <REPLACE WITH A VALID SERVICE ACCOUNT E-MAIL ADDRESS>
gce_service_account_pem_file_path = <REPLACE WITH A VALID SERVICE ACCOUNT PEM/JSON FILE>
gce_project_id = <REPLACE WITH A VALID PROJECT ID>
gce_zone =

# Filter inventory based on on state. Leave undefined to return instances regardless of state.
# example: Uncomment to only return inventory in the running or provisioning state
#instance_states = RUNNING,PROVISIONING


[inventory]
# The 'inventory_ip_type' parameter specifies whether 'ansible_ssh_host' should
# contain the instance internal or external address. Values may be either
# 'internal' or 'external'. If 'external' is specified but no external instance
# address exists, the internal address will be used.
# The INVENTORY_IP_TYPE environment variable will override this value.
inventory_ip_type =

[cache]
# directory in which cache should be created
cache_path = ~/.ansible/tmp

# The number of seconds a cache file is considered valid. After this many
# seconds, a new API call will be made, and the cache file will be updated.
# To disable the cache, set this value to 0
cache_max_age = 0
Loading