Skip to content

Commit

Permalink
Merge pull request #672 from IBM/656-enable-storage-classes-for-fusio…
Browse files Browse the repository at this point in the history
…n-hci

656 enable storage classes for fusion hci
  • Loading branch information
fketelaars authored Apr 26, 2024
2 parents 74c1591 + 84e9e2c commit 2114923
Show file tree
Hide file tree
Showing 7 changed files with 352 additions and 83 deletions.
38 changes: 21 additions & 17 deletions automation-generators/existing-ocp/openshift/preprocessor.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,35 +15,38 @@
# ocp_storage_class_block: nfs-client

def preprocessor(attributes=None, fullConfig=None, moduleVariables=None):
g = GeneratorPreProcessor(attributes,fullConfig,moduleVariables)
# Initialize GeneratorPreProcessor
g = GeneratorPreProcessor(attributes, fullConfig, moduleVariables)

# Ensure required attributes are present
g('name').isRequired()
g('ocp_version').isRequired()
g('openshift_storage').isRequired()

# Now that we have reached this point, we can check the attribute details if the previous checks passed
# Validate attribute details if previous checks pass
if len(g.getErrors()) == 0:
fc = g.getFullConfig()
ge=g.getExpandedAttributes()
ge = g.getExpandedAttributes()

# OpenShift version must be 4.6 or higher
# Validate OpenShift version
if version.parse(str(ge['ocp_version'])) < version.parse("4.6"):
g.appendError(msg='ocp_version must be 4.6 or higher. If the OpenShift version is 4.10, specify ocp_version: "4.10"')

# Validate cloud_native_toolkit attribute
if 'cloud_native_toolkit' in ge:
if type(ge['cloud_native_toolkit']) != bool:
g.appendError(msg='Attribute cloud_native_toolkit must be either true or false if specified. Default is false.')

# Check infrastructure attributes
# Validate infrastructure attributes
if 'infrastructure' in ge:
if 'type' in ge['infrastructure']:
if ge['infrastructure']['type'] not in ['ibm-roks','aws-self-managed','aws-rosa','azure-aro','vsphere','standard','detect']:
g.appendError(msg='infrastructure.type must be ibm-roks, aws-self-managed, aws-rosa, azure-aro, vsphere, standard or detect')
g.appendError(msg='infrastructure.type must be ibm-roks, aws-self-managed, aws-rosa, azure-aro, vsphere, standard, or detect')
if 'processor_architecture' in ge['infrastructure']:
if ge['infrastructure']['processor_architecture'] not in ['amd64','ppc64le','s390x']:
g.appendError(msg='infrastructure.processor_architecture must be amd64, ppc64le or s390x')
g.appendError(msg='infrastructure.processor_architecture must be amd64, ppc64le, or s390x')

# Check upstream DNS server
# Validate upstream DNS server
if 'upstream_dns' in ge:
for dns in ge['upstream_dns']:
if 'name' not in dns:
Expand All @@ -57,38 +60,39 @@ def preprocessor(attributes=None, fullConfig=None, moduleVariables=None):
elif len(dns['dns_servers']) < 1:
g.appendError(msg='At least 1 dns_servers element must be specified for all upstream_dns configurations')

# Validate mcg attributes
if 'mcg' in ge:
mcg=ge['mcg']
mcg = ge['mcg']
if 'install' not in mcg:
g.appendError(msg='install property must be specified in openshift.mcg')
elif type(mcg['install']) != bool:
g.appendError(msg='Value mcg.install must be True or False')
if 'storage_type' not in mcg:
g.appendError(msg='storage_type property must be specified in openshift.mcg')
elif mcg['storage_type'] not in ['storage-class']:
elif mcg['storage_type'] != 'storage-class':
g.appendError(msg='Value mcg.storage_type must be storage-class')
if 'storage_class' not in mcg:
g.appendError(msg='storage_class property must be specified in openshift.mcg')

# Check openshift_storage atttributes
# Validate openshift_storage attributes
if len(ge['openshift_storage']) < 1:
g.appendError(msg='At least one openshift_storage element must be specified.')
for os in ge['openshift_storage']:
if "storage_name" not in os:
g.appendError(msg='storage_name must be specified for all openshift_storage elements')
if "storage_type" not in os:
g.appendError(msg='storage_type must be specified for all openshift_storage elements')
if "storage_type" in os and os['storage_type'] not in ['nfs','ocs','aws-elastic','pwx','custom','auto']:
g.appendError(msg='storage_type must be nfs, ocs, aws-elastic, custom or auto')
if "storage_type" in os and os['storage_type']=='custom':
if "storage_type" in os and os['storage_type'] not in ['nfs','ocs','aws-elastic','pwx','ibm-storage-fdf','custom','auto']:
g.appendError(msg='storage_type must be nfs, ocs, aws-elastic, ibm-storage-fdf, custom, or auto')
if "storage_type" in os and os['storage_type'] == 'custom':
if "ocp_storage_class_file" not in os:
g.appendError(msg='ocp_storage_class_file must be specified when storage_type is custom')
if "ocp_storage_class_block" not in os:
g.appendError(msg='ocp_storage_class_block must be specified when storage_type is custom')

# Return result containing updated attributes and errors
result = {
'attributes_updated': g.getExpandedAttributes(),
'errors': g.getErrors()
}
return result


return result
Original file line number Diff line number Diff line change
@@ -1,118 +1,101 @@
# Retrieve the openshift storage definitions from passed OpenShift cluster
# Retrieve the openshift storage definitions from the specified OpenShift cluster
- set_fact:
_openshift_storage: "{{ all_config.openshift | json_query(query) | first }}"
vars:
query: >-
[?name=='{{ _p_openshift_cluster_name }}'].openshift_storage
- name: Show OpenShift storage objects
- name: Display OpenShift storage objects
debug:
msg: "{{ _openshift_storage }}"

# Obtain the OpenShift storage element from the openshift storage definitions
# Obtain the OpenShift storage element corresponding to the specified storage name
- set_fact:
_selected_openshift_storage: "{{ _openshift_storage | json_query(query) | first | default({}) }}"
vars:
query: >-
[?storage_name=='{{ _p_openshift_storage_name }}']
when: (_p_openshift_storage_name | default('')) != ''
when: _p_openshift_storage_name | default('') != ''

# Default to the first storage definition found if no storage name passed
- set_fact:
_selected_openshift_storage: "{{ _openshift_storage | first | default({}) }}"
when: (_p_openshift_storage_name | default('')) == ''
when: _p_openshift_storage_name | default('') == ''

- name: Fail if the storage information for the passed storage name cannot be found
# Fail if the storage information for the passed storage name cannot be found
- name: Fail if storage information cannot be found
fail:
msg: "Storage information for storage name {{ _p_openshift_storage_name }} specified for OpenShift cluster {{ _p_openshift_cluster_name }} not found. Check the configuration."
msg: "Storage information for '{{ _p_openshift_storage_name }}' in OpenShift cluster '{{ _p_openshift_cluster_name }}' not found. Please verify the configuration."
when: _selected_openshift_storage == {}

- name: Show storage class construct
# Display the selected storage class construct
- name: Display selected storage class
debug:
var: _selected_openshift_storage

- name: Get storage classes from cluster
# Get all storage classes available in the OpenShift cluster
- name: Retrieve available storage classes
shell:
oc get sc -o jsonpath='{range .items[*]}{.metadata.name}{"\n"}{end}'
register: _openshift_storage_classes

- name: Show found storage classes
# Display found storage classes
- name: Display found storage classes
debug:
var: _openshift_storage_classes

# Determine the storage type
- set_fact:
_storage_type: "{{ _selected_openshift_storage.storage_type }}"

# Include tasks to auto-detect storage type if set to 'auto'
- include_tasks: auto-detect-storage-type.yml
when: _storage_type == 'auto'

- fail: msg="Valid values for storage type are 'nfs', 'ocs', 'pwx', 'aws-elastic' and 'custom'"
when:
- _storage_type != 'nfs'
- _storage_type != 'ocs'
- _storage_type != 'pwx'
- _storage_type != 'aws-elastic'
- _storage_type != 'ibm-classic-storage'
- _storage_type != 'custom'
# Fail if the detected storage type is not supported
- fail:
msg: "Valid storage types are 'nfs', 'ocs', 'pwx', 'aws-elastic', 'ibm-classic-storage', 'ibm-storage-fdf', and 'custom'"
when: _storage_type not in ['nfs', 'ocs', 'pwx', 'aws-elastic', 'ibm-classic-storage', 'ibm-storage-fdf', 'custom']

- set_fact:
ocp_storage_class_file: "{{ _selected_openshift_storage.ocp_storage_class_file | default('managed-nfs-storage') }}"
ocp_storage_class_block: "{{ _selected_openshift_storage.ocp_storage_class_block | default('managed-nfs-storage') }}"
when: _storage_type == 'nfs'

- set_fact:
ocp_storage_class_file: "{{ _selected_openshift_storage.ocp_storage_class_file | default('ocs-storagecluster-cephfs') }}"
ocp_storage_class_block: "{{ _selected_openshift_storage.ocp_storage_class_block | default('ocs-storagecluster-ceph-rbd') }}"
when: _storage_type == 'ocs'

- fail: msg="ocp_storage_class_file and ocp_storage_class_block are required when storage_type is set to 'custom'"
when:
- _selected_openshift_storage.ocp_storage_class_file is undefined
- _selected_openshift_storage.ocp_storage_class_block is undefined
- _storage_type == 'custom'

# Set storage class variables based on the detected storage type
- set_fact:
ocp_storage_class_file: "{{ _selected_openshift_storage.ocp_storage_class_file }}"
ocp_storage_class_block: "{{ _selected_openshift_storage.ocp_storage_class_block }}"
when: _storage_type == 'custom'

# Portworx storage classes vary according to cartridge, so are set in the relevant CR templates
- set_fact:
ocp_storage_class_file: "{{ _selected_openshift_storage.ocp_storage_class_file | default('px-replicated') }}"
ocp_storage_class_block: "{{ _selected_openshift_storage.ocp_storage_class_block | default('px-db') }}"
when: _storage_type == 'pwx'

# Choose appropriate AWS storage class
- set_fact:
ocp_storage_class_file: "{{ _selected_openshift_storage.ocp_storage_class_file | default('efs-nfs-client') }}"
ocp_storage_class_block: "{{ _selected_openshift_storage.ocp_storage_class_block | default('gp2') }}"
when: _storage_type == 'aws-elastic' and (_openshift_storage_classes.stdout | regex_search('^gp2$', multiline=True)) != None

- set_fact:
ocp_storage_class_file: "{{ _selected_openshift_storage.ocp_storage_class_file | default('efs-nfs-client') }}"
ocp_storage_class_block: "{{ _selected_openshift_storage.ocp_storage_class_block | default('gp2-csi') }}"
when: _storage_type == 'aws-elastic' and (_openshift_storage_classes.stdout | regex_search('^gp2-csi$', multiline=True)) != None

- set_fact:
ocp_storage_class_file: "{{ _selected_openshift_storage.ocp_storage_class_file | default('efs-nfs-client') }}"
ocp_storage_class_block: "{{ _selected_openshift_storage.ocp_storage_class_block | default('gp3-csi') }}"
when: _storage_type == 'aws-elastic' and (_openshift_storage_classes.stdout | regex_search('^gp3-csi$', multiline=True)) != None

- set_fact:
ocp_storage_class_file: "{{ _selected_openshift_storage.ocp_storage_class_file | default('ibmc-file-gold-gid') }}"
ocp_storage_class_block: "{{ _selected_openshift_storage.ocp_storage_class_block | default('ibmc-block-gold') }}"
when: _storage_type == 'ibm-classic-storage'

ocp_storage_class_file: "{{ _selected_openshift_storage.ocp_storage_class_file | default({
'nfs': 'managed-nfs-storage',
'ocs': 'ocs-storagecluster-cephfs',
'pwx': 'px-replicated',
'aws-elastic': 'efs-nfs-client',
'ibm-classic-storage': 'ibmc-file-gold-gid',
'ibm-storage-fdf': 'ibm-storage-fusion-cp-sc'
}[_storage_type]) }}"
ocp_storage_class_block: "{{ _selected_openshift_storage.ocp_storage_class_block | default({
'nfs': 'managed-nfs-storage',
'ocs': 'ocs-storagecluster-ceph-rbd',
'pwx': 'px-db',
'aws-elastic': 'gp2',
'ibm-classic-storage': 'ibmc-block-gold',
'ibm-storage-fdf': 'ibm-storage-fusion-cp-sc'
}[_storage_type]) }}"
when: _storage_type != 'custom'

# Display the selected storage class variables
- debug:
msg:
- "ocp_storage_class_file: {{ ocp_storage_class_file }}"
- "ocp_storage_class_block: {{ ocp_storage_class_block}}"
- "ocp_storage_class_file: {{ ocp_storage_class_file }}"
- "ocp_storage_class_block: {{ ocp_storage_class_block }}"

- name: Check if file storage class {{ ocp_storage_class_file }} exists in OpenShift
# Check if the file and block storage classes exist in OpenShift
- name: Check if file storage class exists in OpenShift
shell:
oc get sc {{ ocp_storage_class_file }}
register: file_class_status

- name: Check if block storage class {{ ocp_storage_class_block }} exists in OpenShift
- name: Check if block storage class exists in OpenShift
shell:
oc get sc {{ ocp_storage_class_block }}
register: block_class_status
when: ocp_storage_class_block != ocp_storage_class_file
1 change: 1 addition & 0 deletions docs/mkdocs.yml
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,7 @@ nav:
- 'Azure ARO': ./10-use-deployer/3-run/azure-aro.md
- 'Azure Self-managed': ./10-use-deployer/3-run/azure-self-managed.md
- 'vSphere': ./10-use-deployer/3-run/vsphere.md
- 'Spectrum HCI': ./10-use-deployer/3-run/fusion-hci.md
- Post-run:
- 'Post-run changes': ./10-use-deployer/5-post-run/post-run.md
- 'Running commands': ./10-use-deployer/7-command/command.md
Expand Down
4 changes: 2 additions & 2 deletions docs/src/05-install/install.md
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@ Then run the following command to build the container image.

This process will take 5-10 minutes to complete and it will install all the pre-requisites needed to run the automation, including Ansible, Python and required operating system packages. For the installation to work, the system on which the image is built must be connected to the internet.

### Downloading the Cloud Pak Deployer Image
## Downloading the Cloud Pak Deployer Image from Registry

To download the Cloud Pak Deployer image from the Quay.io registry, you can use the Docker command-line interface (CLI) or Podman.

Expand All @@ -72,7 +72,7 @@ podman pull quay.io/cloud-pak-deployer/cloud-pak-deployer

This command pulls the latest version of the Cloud Pak Deployer image from the Quay.io repository. Once downloaded, you can use this image to deploy Cloud Paks

### Tags and Versions
## Tags and Versions

By default, the above command pulls the latest version of the Cloud Pak Deployer image. If you want to specify a particular version or tag, you can append it to the image name. For example:

Expand Down
Loading

0 comments on commit 2114923

Please sign in to comment.