diff --git a/docs/PROVISIONING_GCP.md b/docs/PROVISIONING_GCP.md index ac916136..649ae715 100644 --- a/docs/PROVISIONING_GCP.md +++ b/docs/PROVISIONING_GCP.md @@ -102,7 +102,7 @@ hosting_infrastructure: gcp dns_domain: $dns_domain env_id: -casl_instances: +cloud_infrastructure: region: masters: count: <# of masters> @@ -146,26 +146,26 @@ For the host layout see the example and replace the suffixes with your `env_id`. | hosting_infrastructure | no | | must be set to `gcp` | | dns_domain | no | | the domain you want to use for this cluster, must be `$dns_domain` | | env_id | no | | a unique cluster identifier for this gcp project | -| casl_instances.region | no | | the gcp region in which to install the cluster | -| casl_instances.image_name | yes | `rhel7` | image to use for all the VMs | -| casl_instances.masters.count | no | | number of masters | -| casl_instances.masters.flavor | yes | `n1-standard-2` | type of VM | -| casl_instances.masters.zones | no | | array of availability zones in which the masters will be equally spread | -| casl_instances.masters.name_prefix | yes | `master` | prefix of the master VM names | -| casl_instances.masters.preemptible | yes | `false` | whether should be preemptible, not recommeded. | -| casl_instances.masters.docker_volume_size | yes | `10` | size of the docker volume disk | -| casl_instances.appnodes.count | no | | number of appnodes | -| casl_instances.appnodes.flavor | yes | `n1-standard-2` | type of VM | -| casl_instances.appnodes.zones | no | | array of availability zones in which the appnodes will be equally spread | -| casl_instances.appnodes.name_prefix | yes | `node` | prefix of the appnode VM names | -| casl_instances.appnodes.preemptible | yes | `false` | whether should be preemptible, not recommeded. | -| casl_instances.appnodes.docker_volume_size | yes | `50` | size of the docker volume disk | -| casl_instances.inranodes.count | no | | number of inranodes | -| casl_instances.inranodes.flavor | yes | `n1-standard-2` | type of VM | -| casl_instances.inranodes.zones | no | | array of availability zones in which the inranodes will be equally spread | -| casl_instances.inranodes.name_prefix | yes | `inranodes` | prefix of the inranode VM names | -| casl_instances.inranodes.preemptible | yes | `false` | whether should be preemptible, not recommeded. | -| casl_instances.inranodes.docker_volume_size | yes | `20` | size of the docker volume disk | +| cloud_infrastructure.region | no | | the gcp region in which to install the cluster | +| cloud_infrastructure.image_name | yes | `rhel7` | image to use for all the VMs | +| cloud_infrastructure.masters.count | no | | number of masters | +| cloud_infrastructure.masters.flavor | yes | `n1-standard-2` | type of VM | +| cloud_infrastructure.masters.zones | no | | array of availability zones in which the masters will be equally spread | +| cloud_infrastructure.masters.name_prefix | yes | `master` | prefix of the master VM names | +| cloud_infrastructure.masters.preemptible | yes | `false` | whether should be preemptible, not recommeded. | +| cloud_infrastructure.masters.docker_volume_size | yes | `10` | size of the docker volume disk | +| cloud_infrastructure.appnodes.count | no | | number of appnodes | +| cloud_infrastructure.appnodes.flavor | yes | `n1-standard-2` | type of VM | +| cloud_infrastructure.appnodes.zones | no | | array of availability zones in which the appnodes will be equally spread | +| cloud_infrastructure.appnodes.name_prefix | yes | `node` | prefix of the appnode VM names | +| cloud_infrastructure.appnodes.preemptible | yes | `false` | whether should be preemptible, not recommeded. | +| cloud_infrastructure.appnodes.docker_volume_size | yes | `50` | size of the docker volume disk | +| cloud_infrastructure.inranodes.count | no | | number of inranodes | +| cloud_infrastructure.inranodes.flavor | yes | `n1-standard-2` | type of VM | +| cloud_infrastructure.inranodes.zones | no | | array of availability zones in which the inranodes will be equally spread | +| cloud_infrastructure.inranodes.name_prefix | yes | `inranodes` | prefix of the inranode VM names | +| cloud_infrastructure.inranodes.preemptible | yes | `false` | whether should be preemptible, not recommeded. | +| cloud_infrastructure.inranodes.docker_volume_size | yes | `20` | size of the docker volume disk | | service_account_email | no | | service account to be used when connecting to the Google API | | credentials_file | no | | path to the credential file in json format to be used for the connections to the Google | | project_id | no | `20` | gcp project id to use | @@ -186,7 +186,7 @@ Checkout the version of casl that you want to use and run the galaxy command: ``` git clone https://github.com/redhat-cop/casl-ansible cd casl-ansible -ansible-galaxy install -r casl-requirements.yml -p roles +ansible-galaxy install -r casl-requirements.yml -p galaxy ``` at this point you can run the playbook this way: ``` @@ -206,6 +206,35 @@ If you need to execute this operation separately, you can run the following comm ansible-playbook -i --private-key= /playbooks/openshift/gcp/configure-registry.yml ``` +## CNS support +Support to automate the deployment of CNS is available. +you need to add an additional section to the `cloud_infrastructure` dictionary to define the cns nodes. +Here is an example of this section. + +``` + cnsnodes: + count: 3 + flavor: n1-standard-8 + zones: + - us-central1-a + - us-central1-b + - us-central1-f + name_prefix: cnsnode + preemptible: false + docker_volume_size: 20 + cns_volume_size: 100 + cns_disk_type: pd-standard +``` + +you also need to add the following variable: +```cns_node_glusterfs_volume: /dev/disk/by-id/google-cns-disk1``` +Also, make sure that the glusterfs group vars contains the following: +``` +glusterfs_devices: +- '/dev/disk/by-id/google-cns-disk1' +``` +Finally you need to configure your glustefs settings as you would normally do. +Please refer to the [cns example](../inventory/sample.gcp-cns.example.com.d/inventory) for more details. # Cleaning up In order to clean up run this plyabook diff --git a/inventory/sample.byo.example.com.d/inventory/group_vars/OSEv3.yml b/inventory/sample.byo.example.com.d/inventory/group_vars/OSEv3.yml index 0a7e8642..c54470f6 100644 --- a/inventory/sample.byo.example.com.d/inventory/group_vars/OSEv3.yml +++ b/inventory/sample.byo.example.com.d/inventory/group_vars/OSEv3.yml @@ -51,3 +51,30 @@ openshift_node_open_ports: port: "1936/tcp" - service: "prometheus node exporter" port: "9100/tcp" + +openshift_node_groups: + - name: node-config-master + labels: + - 'node-role.kubernetes.io/master=true' + edits: + - key: kubeletArguments.kube-reserved + value: [ 'cpu={{ ansible_processor_vcpus * 50 }}m', 'memory={{ ansible_processor_vcpus * 50 }}M' ] + - key: kubeletArguments.system-reserved + value: [ 'cpu={{ ansible_processor_vcpus * 50 }}m', 'memory={{ ansible_processor_vcpus * 100 }}M' ] + - name: node-config-infra + labels: + - 'node-role.kubernetes.io/infra=true' + edits: + - key: kubeletArguments.kube-reserved + value: [ 'cpu={{ ansible_processor_vcpus * 50 }}m', 'memory={{ ansible_processor_vcpus * 50 }}M' ] + - key: kubeletArguments.system-reserved + value: [ 'cpu={{ ansible_processor_vcpus * 50 }}m', 'memory={{ ansible_processor_vcpus * 100 }}M' ] + - name: node-config-compute + labels: + - 'node-role.kubernetes.io/compute=true' + edits: + - key: kubeletArguments.kube-reserved + value: [ 'cpu={{ ansible_processor_vcpus * 50 }}m', 'memory={{ ansible_processor_vcpus * 50 }}M' ] + - key: kubeletArguments.system-reserved + value: [ 'cpu={{ ansible_processor_vcpus * 50 }}m', 'memory={{ ansible_processor_vcpus * 100 }}M' ] + diff --git a/inventory/sample.gcp-cns.example.com.d/inventory/clouds.yml b/inventory/sample.gcp-cns.example.com.d/inventory/clouds.yml new file mode 100644 index 00000000..c266426c --- /dev/null +++ b/inventory/sample.gcp-cns.example.com.d/inventory/clouds.yml @@ -0,0 +1,5 @@ +ansible: + use_hostnames: True + expand_hostvars: True + fail_on_errors: True + diff --git a/inventory/sample.gcp-cns.example.com.d/inventory/gce.ini b/inventory/sample.gcp-cns.example.com.d/inventory/gce.ini new file mode 100644 index 00000000..091ec247 --- /dev/null +++ b/inventory/sample.gcp-cns.example.com.d/inventory/gce.ini @@ -0,0 +1,69 @@ +# Copyright 2013 Google Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# The GCE inventory script has the following dependencies: +# 1. A valid Google Cloud Platform account with Google Compute Engine +# enabled. See https://cloud.google.com +# 2. An OAuth2 Service Account flow should be enabled. This will generate +# a private key file that the inventory script will use for API request +# authorization. See https://developers.google.com/accounts/docs/OAuth2 +# 3. Convert the private key from PKCS12 to PEM format +# $ openssl pkcs12 -in pkey.pkcs12 -passin pass:notasecret \ +# > -nodes -nocerts | openssl rsa -out pkey.pem +# 4. The libcloud (>=0.13.3) python libray. See http://libcloud.apache.org +# +# (See ansible/test/gce_tests.py comments for full install instructions) +# +# Author: Eric Johnson + +[gce] +# GCE Service Account configuration information can be stored in the +# libcloud 'secrets.py' file. Ideally, the 'secrets.py' file will already +# exist in your PYTHONPATH and be picked up automatically with an import +# statement in the inventory script. However, you can specify an absolute +# path to the secrets.py file with 'libcloud_secrets' parameter. +# This option will be deprecated in a future release. +libcloud_secrets = + +# If you are not going to use a 'secrets.py' file, you can set the necessary +# authorization parameters here. +gce_service_account_email_address = +gce_service_account_pem_file_path = +gce_project_id = +gce_zone = + +# Filter inventory based on on state. Leave undefined to return instances regardless of state. +# example: Uncomment to only return inventory in the running or provisioning state +#instance_states = RUNNING,PROVISIONING + + +[inventory] +# The 'inventory_ip_type' parameter specifies whether 'ansible_ssh_host' should +# contain the instance internal or external address. Values may be either +# 'internal' or 'external'. If 'external' is specified but no external instance +# address exists, the internal address will be used. +# The INVENTORY_IP_TYPE environment variable will override this value. +inventory_ip_type = + +[cache] +# directory in which cache should be created +cache_path = ~/.ansible/tmp + +# The number of seconds a cache file is considered valid. After this many +# seconds, a new API call will be made, and the cache file will be updated. +# To disable the cache, set this value to 0 +cache_max_age = 0 diff --git a/inventory/sample.gcp-cns.example.com.d/inventory/gce.py b/inventory/sample.gcp-cns.example.com.d/inventory/gce.py new file mode 100755 index 00000000..9a0cef0b --- /dev/null +++ b/inventory/sample.gcp-cns.example.com.d/inventory/gce.py @@ -0,0 +1,508 @@ +#!/usr/bin/env python +# Copyright 2013 Google Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +''' +GCE external inventory script +================================= + +Generates inventory that Ansible can understand by making API requests +Google Compute Engine via the libcloud library. Full install/configuration +instructions for the gce* modules can be found in the comments of +ansible/test/gce_tests.py. + +When run against a specific host, this script returns the following variables +based on the data obtained from the libcloud Node object: + - gce_uuid + - gce_id + - gce_image + - gce_machine_type + - gce_private_ip + - gce_public_ip + - gce_name + - gce_description + - gce_status + - gce_zone + - gce_tags + - gce_metadata + - gce_network + - gce_subnetwork + +When run in --list mode, instances are grouped by the following categories: + - zone: + zone group name examples are us-central1-b, europe-west1-a, etc. + - instance tags: + An entry is created for each tag. For example, if you have two instances + with a common tag called 'foo', they will both be grouped together under + the 'tag_foo' name. + - network name: + the name of the network is appended to 'network_' (e.g. the 'default' + network will result in a group named 'network_default') + - machine type + types follow a pattern like n1-standard-4, g1-small, etc. + - running status: + group name prefixed with 'status_' (e.g. status_running, status_stopped,..) + - image: + when using an ephemeral/scratch disk, this will be set to the image name + used when creating the instance (e.g. debian-7-wheezy-v20130816). when + your instance was created with a root persistent disk it will be set to + 'persistent_disk' since there is no current way to determine the image. + +Examples: + Execute uname on all instances in the us-central1-a zone + $ ansible -i gce.py us-central1-a -m shell -a "/bin/uname -a" + + Use the GCE inventory script to print out instance specific information + $ contrib/inventory/gce.py --host my_instance + +Author: Eric Johnson +Contributors: Matt Hite , Tom Melendez +Version: 0.0.3 +''' + +try: + import pkg_resources +except ImportError: + # Use pkg_resources to find the correct versions of libraries and set + # sys.path appropriately when there are multiversion installs. We don't + # fail here as there is code that better expresses the errors where the + # library is used. + pass + +USER_AGENT_PRODUCT = "Ansible-gce_inventory_plugin" +USER_AGENT_VERSION = "v2" + +import sys +import os +import argparse + +from time import time + +if sys.version_info >= (3, 0): + import configparser +else: + import ConfigParser as configparser + +import logging +logging.getLogger('libcloud.common.google').addHandler(logging.NullHandler()) + +try: + import json +except ImportError: + import simplejson as json + +try: + from libcloud.compute.types import Provider + from libcloud.compute.providers import get_driver + _ = Provider.GCE +except: + sys.exit("GCE inventory script requires libcloud >= 0.13") + + +class CloudInventoryCache(object): + def __init__(self, cache_name='ansible-cloud-cache', cache_path='/tmp', + cache_max_age=300): + cache_dir = os.path.expanduser(cache_path) + if not os.path.exists(cache_dir): + os.makedirs(cache_dir) + self.cache_path_cache = os.path.join(cache_dir, cache_name) + + self.cache_max_age = cache_max_age + + def is_valid(self, max_age=None): + ''' Determines if the cache files have expired, or if it is still valid ''' + + if max_age is None: + max_age = self.cache_max_age + + if os.path.isfile(self.cache_path_cache): + mod_time = os.path.getmtime(self.cache_path_cache) + current_time = time() + if (mod_time + max_age) > current_time: + return True + + return False + + def get_all_data_from_cache(self, filename=''): + ''' Reads the JSON inventory from the cache file. Returns Python dictionary. ''' + + data = '' + if not filename: + filename = self.cache_path_cache + with open(filename, 'r') as cache: + data = cache.read() + return json.loads(data) + + def write_to_cache(self, data, filename=''): + ''' Writes data to file as JSON. Returns True. ''' + if not filename: + filename = self.cache_path_cache + json_data = json.dumps(data) + with open(filename, 'w') as cache: + cache.write(json_data) + return True + + +class GceInventory(object): + def __init__(self): + # Cache object + self.cache = None + # dictionary containing inventory read from disk + self.inventory = {} + + # Read settings and parse CLI arguments + self.parse_cli_args() + self.config = self.get_config() + self.driver = self.get_gce_driver() + self.ip_type = self.get_inventory_options() + if self.ip_type: + self.ip_type = self.ip_type.lower() + + # Cache management + start_inventory_time = time() + cache_used = False + if self.args.refresh_cache or not self.cache.is_valid(): + self.do_api_calls_update_cache() + else: + self.load_inventory_from_cache() + cache_used = True + self.inventory['_meta']['stats'] = {'use_cache': True} + self.inventory['_meta']['stats'] = { + 'inventory_load_time': time() - start_inventory_time, + 'cache_used': cache_used + } + + # Just display data for specific host + if self.args.host: + print(self.json_format_dict( + self.inventory['_meta']['hostvars'][self.args.host], + pretty=self.args.pretty)) + else: + # Otherwise, assume user wants all instances grouped + zones = self.parse_env_zones() + print(self.json_format_dict(self.inventory, + pretty=self.args.pretty)) + sys.exit(0) + + def get_config(self): + """ + Reads the settings from the gce.ini file. + + Populates a SafeConfigParser object with defaults and + attempts to read an .ini-style configuration from the filename + specified in GCE_INI_PATH. If the environment variable is + not present, the filename defaults to gce.ini in the current + working directory. + """ + gce_ini_default_path = os.path.join( + os.path.dirname(os.path.realpath(__file__)), "gce.ini") + gce_ini_path = os.environ.get('GCE_INI_PATH', gce_ini_default_path) + + # Create a ConfigParser. + # This provides empty defaults to each key, so that environment + # variable configuration (as opposed to INI configuration) is able + # to work. + config = configparser.SafeConfigParser(defaults={ + 'gce_service_account_email_address': '', + 'gce_service_account_pem_file_path': '', + 'gce_project_id': '', + 'gce_zone': '', + 'libcloud_secrets': '', + 'inventory_ip_type': '', + 'cache_path': '~/.ansible/tmp', + 'cache_max_age': '300' + }) + if 'gce' not in config.sections(): + config.add_section('gce') + if 'inventory' not in config.sections(): + config.add_section('inventory') + if 'cache' not in config.sections(): + config.add_section('cache') + + config.read(gce_ini_path) + + ######### + # Section added for processing ini settings + ######### + + # Set the instance_states filter based on config file options + self.instance_states = [] + if config.has_option('gce', 'instance_states'): + states = config.get('gce', 'instance_states') + # Ignore if instance_states is an empty string. + if states: + self.instance_states = states.split(',') + + # Caching + cache_path = config.get('cache', 'cache_path') + cache_max_age = config.getint('cache', 'cache_max_age') + # TOOD(supertom): support project-specific caches + cache_name = 'ansible-gce.cache' + self.cache = CloudInventoryCache(cache_path=cache_path, + cache_max_age=cache_max_age, + cache_name=cache_name) + return config + + def get_inventory_options(self): + """Determine inventory options. Environment variables always + take precedence over configuration files.""" + ip_type = self.config.get('inventory', 'inventory_ip_type') + # If the appropriate environment variables are set, they override + # other configuration + ip_type = os.environ.get('INVENTORY_IP_TYPE', ip_type) + return ip_type + + def get_gce_driver(self): + """Determine the GCE authorization settings and return a + libcloud driver. + """ + # Attempt to get GCE params from a configuration file, if one + # exists. + secrets_path = self.config.get('gce', 'libcloud_secrets') + secrets_found = False + + try: + import secrets + args = list(secrets.GCE_PARAMS) + kwargs = secrets.GCE_KEYWORD_PARAMS + secrets_found = True + except: + pass + + if not secrets_found and secrets_path: + if not secrets_path.endswith('secrets.py'): + err = "Must specify libcloud secrets file as " + err += "/absolute/path/to/secrets.py" + sys.exit(err) + sys.path.append(os.path.dirname(secrets_path)) + try: + import secrets + args = list(getattr(secrets, 'GCE_PARAMS', [])) + kwargs = getattr(secrets, 'GCE_KEYWORD_PARAMS', {}) + secrets_found = True + except: + pass + + if not secrets_found: + args = [ + self.config.get('gce', 'gce_service_account_email_address'), + self.config.get('gce', 'gce_service_account_pem_file_path') + ] + kwargs = {'project': self.config.get('gce', 'gce_project_id'), + 'datacenter': self.config.get('gce', 'gce_zone')} + + # If the appropriate environment variables are set, they override + # other configuration; process those into our args and kwargs. + args[0] = os.environ.get('GCE_EMAIL', args[0]) + args[1] = os.environ.get('GCE_PEM_FILE_PATH', args[1]) + args[1] = os.environ.get('GCE_CREDENTIALS_FILE_PATH', args[1]) + + kwargs['project'] = os.environ.get('GCE_PROJECT', kwargs['project']) + kwargs['datacenter'] = os.environ.get('GCE_ZONE', kwargs['datacenter']) + + # Retrieve and return the GCE driver. + gce = get_driver(Provider.GCE)(*args, **kwargs) + gce.connection.user_agent_append( + '%s/%s' % (USER_AGENT_PRODUCT, USER_AGENT_VERSION), + ) + return gce + + def parse_env_zones(self): + '''returns a list of comma separated zones parsed from the GCE_ZONE environment variable. + If provided, this will be used to filter the results of the grouped_instances call''' + import csv + reader = csv.reader([os.environ.get('GCE_ZONE', "")], skipinitialspace=True) + zones = [r for r in reader] + return [z for z in zones[0]] + + def parse_cli_args(self): + ''' Command line argument processing ''' + + parser = argparse.ArgumentParser( + description='Produce an Ansible Inventory file based on GCE') + parser.add_argument('--list', action='store_true', default=True, + help='List instances (default: True)') + parser.add_argument('--host', action='store', + help='Get all information about an instance') + parser.add_argument('--pretty', action='store_true', default=False, + help='Pretty format (default: False)') + parser.add_argument( + '--refresh-cache', action='store_true', default=False, + help='Force refresh of cache by making API requests (default: False - use cache files)') + self.args = parser.parse_args() + + def node_to_dict(self, inst): + md = {} + + if inst is None: + return {} + + if 'items' in inst.extra['metadata']: + for entry in inst.extra['metadata']['items']: + md[entry['key']] = entry['value'] + + net = inst.extra['networkInterfaces'][0]['network'].split('/')[-1] + subnet = None + if 'subnetwork' in inst.extra['networkInterfaces'][0]: + subnet = inst.extra['networkInterfaces'][0]['subnetwork'].split('/')[-1] + # default to exernal IP unless user has specified they prefer internal + if self.ip_type == 'internal': + ssh_host = inst.private_ips[0] + else: + ssh_host = inst.public_ips[0] if len(inst.public_ips) >= 1 else inst.private_ips[0] + + return { + 'gce_uuid': inst.uuid, + 'gce_id': inst.id, + 'gce_image': inst.image, + 'gce_machine_type': inst.size, + 'gce_private_ip': inst.private_ips[0], + 'gce_public_ip': inst.public_ips[0] if len(inst.public_ips) >= 1 else None, + 'gce_name': inst.name, + 'gce_description': inst.extra['description'], + 'gce_status': inst.extra['status'], + 'gce_zone': inst.extra['zone'].name, + 'gce_tags': inst.extra['tags'], + 'gce_metadata': md, + 'gce_network': net, + 'gce_subnetwork': subnet, + # Hosts don't have a public name, so we add an IP + 'ansible_ssh_host': ssh_host + } + + def load_inventory_from_cache(self): + ''' Loads inventory from JSON on disk. ''' + + try: + self.inventory = self.cache.get_all_data_from_cache() + hosts = self.inventory['_meta']['hostvars'] + except Exception as e: + print( + "Invalid inventory file %s. Please rebuild with -refresh-cache option." + % (self.cache.cache_path_cache)) + raise + + def do_api_calls_update_cache(self): + ''' Do API calls and save data in cache. ''' + zones = self.parse_env_zones() + data = self.group_instances(zones) + self.cache.write_to_cache(data) + self.inventory = data + + def list_nodes(self): + all_nodes = [] + params, more_results = {'maxResults': 500}, True + while more_results: + self.driver.connection.gce_params = params + all_nodes.extend(self.driver.list_nodes()) + more_results = 'pageToken' in params + return all_nodes + + def group_instances(self, zones=None): + '''Group all instances''' + groups = {} + meta = {} + meta["hostvars"] = {} + + for node in self.list_nodes(): + + # This check filters on the desired instance states defined in the + # config file with the instance_states config option. + # + # If the instance_states list is _empty_ then _ALL_ states are returned. + # + # If the instance_states list is _populated_ then check the current + # state against the instance_states list + if self.instance_states and not node.extra['status'] in self.instance_states: + continue + + name = node.name + + meta["hostvars"][name] = self.node_to_dict(node) + + zone = node.extra['zone'].name + + # To avoid making multiple requests per zone + # we list all nodes and then filter the results + if zones and zone not in zones: + continue + + if zone in groups: + groups[zone].append(name) + else: + groups[zone] = [name] + + tags = node.extra['tags'] + for t in tags: + if t.startswith('group-'): + tag = t[6:] + else: + tag = 'tag_%s' % t + if tag in groups: + groups[tag].append(name) + else: + groups[tag] = [name] + + net = node.extra['networkInterfaces'][0]['network'].split('/')[-1] + net = 'network_%s' % net + if net in groups: + groups[net].append(name) + else: + groups[net] = [name] + + machine_type = node.size + if machine_type in groups: + groups[machine_type].append(name) + else: + groups[machine_type] = [name] + + image = node.image and node.image or 'persistent_disk' + if image in groups: + groups[image].append(name) + else: + groups[image] = [name] + + status = node.extra['status'] + stat = 'status_%s' % status.lower() + if stat in groups: + groups[stat].append(name) + else: + groups[stat] = [name] + + for private_ip in node.private_ips: + groups[private_ip] = [name] + + if len(node.public_ips) >= 1: + for public_ip in node.public_ips: + groups[public_ip] = [name] + + groups["_meta"] = meta + + return groups + + def json_format_dict(self, data, pretty=False): + ''' Converts a dict to a JSON object and dumps it as a formatted + string ''' + + if pretty: + return json.dumps(data, sort_keys=True, indent=2) + else: + return json.dumps(data) + +# Run the script +if __name__ == '__main__': + GceInventory() diff --git a/inventory/sample.gcp-cns.example.com.d/inventory/group_vars/OSEv3.yml b/inventory/sample.gcp-cns.example.com.d/inventory/group_vars/OSEv3.yml new file mode 100644 index 00000000..84232bba --- /dev/null +++ b/inventory/sample.gcp-cns.example.com.d/inventory/group_vars/OSEv3.yml @@ -0,0 +1,65 @@ +--- + +deployment_type: openshift-enterprise +openshift_deployment_type: openshift-enterprise +openshift_master_cluster_method: native +openshift_release: v3.9 + + +# HTPASSWD Identity Provider +openshift_master_identity_providers: + - 'name': 'htpasswd_auth' + 'login': 'true' + 'challenge': 'true' + 'kind': 'HTPasswdPasswordIdentityProvider' + 'filename': '/etc/origin/master/htpasswd' + +#this will create an admin/admin user +openshift_master_htpasswd_users: + admin: $apr1$7aiANAYb$TOUYVUqnBqBlD5AQEIMYw1 + +openshift_hosted_router_selector: 'region=infra' +openshift_hosted_manage_router: true + +openshift_hosted_registry_selector: 'region=infra' +openshift_hosted_manage_registry: true + + +osm_default_node_selector: 'region=primary' + + +openshift_docker_options: "--log-driver=json-file --log-opt max-size=50m --log-opt max-file=100" + +openshift_cloudprovider_kind: gce + +os_sdn_network_plugin_name: 'redhat/openshift-ovs-networkpolicy' +#os_sdn_network_plugin_name: 'redhat/openshift-ovs-multitenant' +os_firewall_use_firewalld: true +osm_cluster_network_cidr: 10.1.0.0/16 + +openshift_enable_service_catalog: false +openshift_hosted_prometheus_deploy: false +openshift_cfme_install_app: false + +openshift_storage_glusterfs_namespace: glusterfs +openshift_storage_glusterfs_name: storage +openshift_storage_glusterfs_nodeselector: "region=cns" +openshift_storage_glusterfs_storageclass: true +openshift_storage_glusterfs_storageclass_default: true +openshift_storage_glusterfs_block_deploy: true +openshift_storage_glusterfs_block_host_vol_create: true +openshift_storage_glusterfs_block_host_vol_size: 100 +openshift_storage_glusterfs_block_host_vol_max: 10 +openshift_storage_glusterfs_block_storageclass: true +openshift_storage_glusterfs_block_storageclass_default: false +openshift_storage_glusterfs_s3_deploy: true +openshift_storage_glusterfs_heketi_admin_key: admin +openshift_storage_glusterfs_heketi_user_key: user +openshift_storage_glusterfs_wipe: true + + + + + + + diff --git a/inventory/sample.gcp-cns.example.com.d/inventory/group_vars/all.yml b/inventory/sample.gcp-cns.example.com.d/inventory/group_vars/all.yml new file mode 100644 index 00000000..10d792fa --- /dev/null +++ b/inventory/sample.gcp-cns.example.com.d/inventory/group_vars/all.yml @@ -0,0 +1,116 @@ +--- + +# 'hosting_infrastructure' is used to drive the correct behavior based +# on the hosting infrastructure, cloud provider, etc. Valid values are: +# - 'openstack' +# - 'aws' +# - 'azure' (Coming Soon) +# - 'gcp' +hosting_infrastructure: gcp + +# Cluster Environment ID to uniquely identify the environment +env_id: env1 + +ansible_user: +ansible_become: true + +dns_domain: "" + +cloud_infrastructure: + region: us-central1 + image_name: rhel7 + masters: + count: 3 + flavor: n1-standard-8 + zones: + - us-central1-a + - us-central1-b + - us-central1-f + name_prefix: master + preemptible: false + docker_volume_size: 12 + appnodes: + count: 3 + flavor: n1-standard-4 + zones: + - us-central1-a + - us-central1-b + - us-central1-f + name_prefix: node + preemptible: false + docker_volume_size: 50 + infranodes: + count: 3 + flavor: n1-standard-4 + zones: + - us-central1-a + - us-central1-b + - us-central1-f + name_prefix: infranode + preemptible: false + docker_volume_size: 20 + cnsnodes: + count: 3 + flavor: n1-standard-8 + zones: + - us-central1-a + - us-central1-b + - us-central1-f + name_prefix: cnsnode + preemptible: false + docker_volume_size: 20 + cns_volume_size: 100 + cns_disk_type: pd-standard + +# docker_storage_block_device: "/dev/vdb" +cns_node_glusterfs_volume: /dev/disk/by-id/google-cns-disk1 + +# Subscription Management Details +rhsm_register: True +rhsm_repos: + - "rhel-7-server-rpms" + - "rhel-7-server-ose-3.9-rpms" + - "rhel-7-server-extras-rpms" + - "rhel-7-fast-datapath-rpms" + + +# Use RHSM username, password and optionally pool: +# NOTE: use the -e option to specify on CLI instead of statically set here +rhsm_username: '' +rhsm_password: '' + +# leave commented out if you want to `--auto-attach` a pool +#rhsm_pool: '' + +# Specify the version of docker to use +#docker_version: "1.12.*" + +#google specific properties +service_account_email: +credentials_file: +project_id: + +# google object storage bucket name for the registry +# suggested value: openshift-registry-{{ env_id }}-{{ project_id }} +google_registry_bucket_name: openshift-registry-{{ env_id }}-{{ project_id }} + +# openshift_master_cluster_public_hostname must be something like: +# .{{ env_id }}.{{ dns_domain }} +openshift_master_cluster_public_hostname: master.{{ env_id }}.{{ dns_domain }} + +# openshift_master_cluster_hostname must be something like: +# .{{ env_id }}.{{ dns_domain }} +openshift_master_cluster_hostname: master-internal.{{ env_id }}.{{ dns_domain }} + +# openshift_master_default_subdomain must be something like: +# .{{ env_id }}.{{ dns_domain }} +openshift_master_default_subdomain: apps.{{ env_id }}.{{ dns_domain }} + +#a unique prefix (unsure for what is used) +openshift_gcp_prefix: "{{ env_id }}" + +#your openshift goocle project +openshift_gcp_project: "{{ project_id }}" + +#whether your deployment is multizone. +openshift_gcp_multizone: true diff --git a/inventory/sample.gcp-cns.example.com.d/inventory/group_vars/glusterfs.yml b/inventory/sample.gcp-cns.example.com.d/inventory/group_vars/glusterfs.yml new file mode 100644 index 00000000..84138422 --- /dev/null +++ b/inventory/sample.gcp-cns.example.com.d/inventory/group_vars/glusterfs.yml @@ -0,0 +1,8 @@ +--- + +openshift_node_labels: + region: cns + node-role.kubernetes.io/cnsnode: true + +glusterfs_devices: +- '/dev/disk/by-id/google-cns-disk1' \ No newline at end of file diff --git a/inventory/sample.gcp-cns.example.com.d/inventory/group_vars/infra_hosts.yml b/inventory/sample.gcp-cns.example.com.d/inventory/group_vars/infra_hosts.yml new file mode 100644 index 00000000..30a253cb --- /dev/null +++ b/inventory/sample.gcp-cns.example.com.d/inventory/group_vars/infra_hosts.yml @@ -0,0 +1,9 @@ +--- + +openshift_node_labels: + region: infra + node-role.kubernetes.io/infranode: true + +openshift_node_open_ports: + - service: "router stats port" + port: "1936/tcp" diff --git a/inventory/sample.gcp-cns.example.com.d/inventory/group_vars/masters.yml b/inventory/sample.gcp-cns.example.com.d/inventory/group_vars/masters.yml new file mode 100644 index 00000000..17862642 --- /dev/null +++ b/inventory/sample.gcp-cns.example.com.d/inventory/group_vars/masters.yml @@ -0,0 +1,5 @@ +--- + +openshift_node_labels: + region: master + diff --git a/inventory/sample.gcp-cns.example.com.d/inventory/group_vars/nodes.yaml b/inventory/sample.gcp-cns.example.com.d/inventory/group_vars/nodes.yaml new file mode 100644 index 00000000..24976e09 --- /dev/null +++ b/inventory/sample.gcp-cns.example.com.d/inventory/group_vars/nodes.yaml @@ -0,0 +1,28 @@ +--- + +openshift_node_labels: + region: primary + +openshift_node_kubelet_args: + kube-reserved: + - cpu={{ ansible_processor_vcpus * 50 }}m + - memory={{ ansible_processor_vcpus * 50 }}M + system-reserved: + - cpu={{ ansible_processor_vcpus * 50 }}m + - memory={{ ansible_processor_vcpus * 100 }}M + eviction-hard: + - memory.available<256Mi + minimum-container-ttl-duration: + - "10s" + maximum-dead-containers-per-container: + - "2" + maximum-dead-containers: + - "50" + pods-per-core: + - '10' + max-pods: + - '250' + image-gc-high-threshold: + - '80' + image-gc-low-threshold: + - '60' diff --git a/inventory/sample.gcp-cns.example.com.d/inventory/host_vars/localhost b/inventory/sample.gcp-cns.example.com.d/inventory/host_vars/localhost new file mode 100644 index 00000000..9aaa69f8 --- /dev/null +++ b/inventory/sample.gcp-cns.example.com.d/inventory/host_vars/localhost @@ -0,0 +1,4 @@ +--- + +ansible_connection: local +ansible_become: false diff --git a/inventory/sample.gcp-cns.example.com.d/inventory/hosts b/inventory/sample.gcp-cns.example.com.d/inventory/hosts new file mode 100644 index 00000000..86bf1452 --- /dev/null +++ b/inventory/sample.gcp-cns.example.com.d/inventory/hosts @@ -0,0 +1,31 @@ +[cluster_hosts:children] +OSEv3 + +[OSEv3:children] +masters +nodes +etcd +glusterfs + +[masters:children] +tag_env1-master + +[etcd:children] +tag_env1-master + +[nodes:children] +masters +tag_env1-appnode +infra_hosts +glusterfs + +[infra_hosts:children] +tag_env1-infranode + +[glusterfs:children] +tag_env1-cnsnode + +[tag_env1-master] +[tag_env1-appnode] +[tag_env1-infranode] +[tag_env1-cnsnode] diff --git a/inventory/sample.gcp.example.com.d/inventory/group_vars/all.yml b/inventory/sample.gcp.example.com.d/inventory/group_vars/all.yml index 7cb7b014..2535e617 100644 --- a/inventory/sample.gcp.example.com.d/inventory/group_vars/all.yml +++ b/inventory/sample.gcp.example.com.d/inventory/group_vars/all.yml @@ -48,7 +48,7 @@ cloud_infrastructure: - us-central1-f name_prefix: infranode preemptible: false - docker_volume_size: 20 + docker_volume_size: 20 # docker_storage_block_device: "/dev/vdb" diff --git a/playbooks/openshift/gcp/provision.yml b/playbooks/openshift/gcp/provision.yml index 1be5f147..bdb00204 100644 --- a/playbooks/openshift/gcp/provision.yml +++ b/playbooks/openshift/gcp/provision.yml @@ -29,5 +29,21 @@ - name: install subscription manager package: name: subscription-manager - state: present + state: present + +- hosts: cluster_hosts + gather_facts: false + tasks: + - name: install yum-utils + package: + name: yum-utils + state: present + +- hosts: cluster_hosts + gather_facts: false + tasks: + - name: install yum-utils + package: + name: yum-utils + state: present diff --git a/playbooks/openshift/pre-install.yml b/playbooks/openshift/pre-install.yml index e7383e38..0820832e 100644 --- a/playbooks/openshift/pre-install.yml +++ b/playbooks/openshift/pre-install.yml @@ -71,5 +71,5 @@ tasks: - set_fact: glusterfs_devices: - - "{{ hostvars['localhost'].cns_node_glusterfs_volume }}" + - "{{ hostvars['localhost'].cns_node_glusterfs_volume }}" diff --git a/roles/manage-gcp-infra/defaults/main.yml b/roles/manage-gcp-infra/defaults/main.yml index bfa3dd69..346a77fe 100644 --- a/roles/manage-gcp-infra/defaults/main.yml +++ b/roles/manage-gcp-infra/defaults/main.yml @@ -21,4 +21,13 @@ cloud_infrastructure: name_prefix: infranode flavor: n1-standard-2 preemptible: false - docker_volume_size: 20 \ No newline at end of file + docker_volume_size: 20 + cnsnodes: + count: 0 + flavor: n1-standard-2 + zones: [] + name_prefix: cnsnode + preemptible: false + docker_volume_size: 20 + cns_volume_size: 100 + cns_disk_type: pd-ssd \ No newline at end of file diff --git a/roles/manage-gcp-infra/tasks/create-infrastructure.yml b/roles/manage-gcp-infra/tasks/create-infrastructure.yml index 73409e1a..b7ac95d3 100644 --- a/roles/manage-gcp-infra/tasks/create-infrastructure.yml +++ b/roles/manage-gcp-infra/tasks/create-infrastructure.yml @@ -34,3 +34,4 @@ shell: gcloud deployment-manager deployments create openshift-{{ gcloud_env }} --config {{ tempdir.path }}/openshift-gcloud.yaml when: not deployment.stdout + diff --git a/roles/manage-gcp-infra/templates/openshift-gcloud.yaml.j2 b/roles/manage-gcp-infra/templates/openshift-gcloud.yaml.j2 index 45c10e32..e3ddcc9d 100644 --- a/roles/manage-gcp-infra/templates/openshift-gcloud.yaml.j2 +++ b/roles/manage-gcp-infra/templates/openshift-gcloud.yaml.j2 @@ -1,5 +1,17 @@ resources: +- name: {{ gcloud_env }}-empty-10g-disk + properties: + zone: {{ gcloud_masters_zones[0] }} + type: zones/{{ gcloud_masters_zones[0] }}/diskTypes/pd-standard + sizeGb: 10 + type: compute.v1.disk + +- name: {{ gcloud_env }}-empty-10g-image + properties: + sourceDisk: $(ref.{{ gcloud_env }}-empty-10g-disk.selfLink) + type: compute.v1.image + # instance templates - name: {{ gcloud_env }}-master-template @@ -47,6 +59,7 @@ resources: - {{ gcloud_env }}-master - {{ gcloud_env }}-node type: compute.v1.instanceTemplate + - name: {{ gcloud_env }}-appnode-template properties: description: 'appnode-template' @@ -92,6 +105,7 @@ resources: - {{ gcloud_env }}-appnode - {{ gcloud_env }}-node type: compute.v1.instanceTemplate + - name: {{ gcloud_env }}-infranode-template properties: description: 'infranode-template' @@ -138,6 +152,62 @@ resources: - {{ gcloud_env }}-node type: compute.v1.instanceTemplate +- name: {{ gcloud_env }}-cnsnode-template + properties: + description: 'cnsnode-template' + properties: + canIpForward: false + disks: + - autoDelete: true + boot: true + index: 0 + initializeParams: + diskSizeGb: {{ gcloud_vm_disk_size }} + sourceImage: https://www.googleapis.com/compute/v1/projects/rhel-cloud/global/images/family/rhel-7 + diskType: pd-ssd + - autoDelete: true + boot: false + index: 1 + deviceName: {{ gcloud_docker_volume_name }} + type: SCRATCH + initializeParams: + diskType: local-ssd + - autoDelete: true + boot: false + index: 2 + deviceName: {{ gcloud_cns_volume_name }} + type: PERSISTENT + initializeParams: + sourceImage: $(ref.{{ gcloud_env }}-empty-10g-image.selfLink) + diskSizeGb: {{ gcloud_cnsnodes_cns_volume_size }} + diskType: {{ gcloud_cnsnodes_cns_disk_type }} + machineType: {{ gcloud_infranodes_flavor }} + networkInterfaces: + - accessConfigs: + - name: external-nat + type: ONE_TO_ONE_NAT + network: https://www.googleapis.com/compute/v1/projects/{{ gcloud_project_name }}/global/networks/default + scheduling: + automaticRestart: true + onHostMaintenance: MIGRATE + preemptible: {{ gcloud_cnsnodes_preemptible }} + serviceAccounts: + - scopes: + - https://www.googleapis.com/auth/cloud.useraccounts.readonly + - https://www.googleapis.com/auth/compute + - https://www.googleapis.com/auth/devstorage.read_only + - https://www.googleapis.com/auth/logging.write + - https://www.googleapis.com/auth/monitoring.write + - https://www.googleapis.com/auth/service.management.readonly + - https://www.googleapis.com/auth/servicecontrol + tags: + items: + - {{ gcloud_env }} + - {{ gcloud_env }}-cnsnode + - {{ gcloud_env }}-node + type: compute.v1.instanceTemplate + + # group managers - name: {{ gcloud_env }}-master-igm @@ -183,6 +253,19 @@ resources: - zone: "zones/{{ i }}" {% endfor %} type: compute.beta.regionInstanceGroupManager + +- name: {{ gcloud_env }}-cnsnode-igm + properties: + baseInstanceName: {{ gcloud_env }}-cnsnode + instanceTemplate: $(ref.{{ gcloud_env }}-cnsnode-template.selfLink) + targetSize: {{ gcloud_cnsnodes_num }} + region: {{ gcloud_region }} + distributionPolicy: + zones: +{% for i in gcloud_cnsnodes_zones %} + - zone: "zones/{{ i }}" +{% endfor %} + type: compute.beta.regionInstanceGroupManager # master networking diff --git a/roles/manage-gcp-infra/vars/main.yaml b/roles/manage-gcp-infra/vars/main.yaml index 79a8bbe2..d800a776 100644 --- a/roles/manage-gcp-infra/vars/main.yaml +++ b/roles/manage-gcp-infra/vars/main.yaml @@ -4,31 +4,43 @@ gcloud_masters_num: "{{ cloud_infrastructure.masters.count }}" gcloud_appnodes_num: "{{ cloud_infrastructure.appnodes.count }}" gcloud_infranodes_num: "{{ cloud_infrastructure.infranodes.count }}" +gcloud_cnsnodes_num: "{{ cloud_infrastructure.cnsnodes.count }}" # flavor gcloud_masters_flavor: "{{ cloud_infrastructure.masters.flavor }}" gcloud_appnodes_flavor: "{{ cloud_infrastructure.appnodes.flavor }}" gcloud_infranodes_flavor: "{{ cloud_infrastructure.infranodes.flavor }}" +gcloud_cnsnodes_flavor: "{{ cloud_infrastructure.cnsnodes.flavor }}" # zones gcloud_masters_zones: "{{ cloud_infrastructure.masters.zones }}" gcloud_appnodes_zones: "{{ cloud_infrastructure.appnodes.zones }}" gcloud_infranodes_zones: "{{ cloud_infrastructure.infranodes.zones }}" +gcloud_cnsnodes_zones: "{{ cloud_infrastructure.cnsnodes.zones }}" # name prefix gcloud_masters_name_prefix: "{{ cloud_infrastructure.masters.name_prefix }}" gcloud_appnodes_name_prefix: "{{ cloud_infrastructure.appnodes.name_prefix }}" gcloud_infranodes_name_prefix: "{{ cloud_infrastructure.infranodes.name_prefix }}" +gcloud_cnsnodes_name_prefix: "{{ cloud_infrastructure.cnsnodes.name_prefix }}" #preemptitability gcloud_masters_preemptible: "{{ cloud_infrastructure.masters.preemptible }}" gcloud_appnodes_preemptible: "{{ cloud_infrastructure.appnodes.preemptible }}" gcloud_infranodes_preemptible: "{{ cloud_infrastructure.infranodes.preemptible }}" +gcloud_cnsnodes_preemptible: "{{ cloud_infrastructure.cnsnodes.preemptible }}" -#docker volume +#docker volume size gcloud_masters_docker_volume_size: "{{ cloud_infrastructure.masters.docker_volume_size }}" gcloud_appnodes_docker_volume_size: "{{ cloud_infrastructure.appnodes.docker_volume_size }}" gcloud_infranodes_docker_volume_size: "{{ cloud_infrastructure.infranodes.docker_volume_size }}" +gcloud_cnsnodes_docker_volume_size: "{{ cloud_infrastructure.cnsnodes.docker_volume_size }}" + +#cns volume size +gcloud_cnsnodes_cns_volume_size: "{{ cloud_infrastructure.cnsnodes.cns_volume_size }}" + +# cns disk type +gcloud_cnsnodes_cns_disk_type: "{{ cloud_infrastructure.cnsnodes.cns_disk_type }}" gcloud_region: "{{ cloud_infrastructure.region }}" gcloud_project_name: "{{ project_id }}" @@ -38,6 +50,7 @@ gcloud_master_external_fqdn: "{{ openshift_master_cluster_public_hostname }}" gcloud_master_internal_fqdn: "{{ openshift_master_cluster_hostname }}" gcloud_infranode_wildcard_fqdn: "*.{{ openshift_master_default_subdomain }}" gcloud_docker_volume_name: docker-storage +gcloud_cns_volume_name: cns-disk1 gcloud_vm_disk_size: 50 gcloud_registry_configmap_name: registry-config gcloud_registry_bucket_name: "{{ google_registry_bucket_name }}"