-
Notifications
You must be signed in to change notification settings - Fork 11
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
fix: secure push stepaction in deprovision task
- Loading branch information
Showing
1 changed file
with
202 additions
and
0 deletions.
There are no files selected for viewing
202 changes: 202 additions & 0 deletions
202
tasks/rosa/hosted-cp/rosa-hcp-deprovision/0.2/rosa-hcp-deprovision.yaml
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,202 @@ | ||
apiVersion: tekton.dev/v1 | ||
kind: Task | ||
metadata: | ||
name: collect-artifacts-deprovision-rosa | ||
spec: | ||
description: | | ||
This Tekton Task handles the collection of test artifacts and deprovisions the OpenShift cluster. The task performs the following steps: | ||
1. **Collect Artifacts**: Gathers artifacts if the pipeline did not succeed. | ||
2. **Inspect and Upload Artifacts**: Checks for sensitive information in the artifacts and uploads them to the OCI container registry. | ||
3. **Deprovision ROSA Cluster**: Deletes the OpenShift cluster if specified. | ||
4. **Remove Tags from Subnets**: Cleans up tags from AWS subnets associated with the cluster. | ||
5. **Remove Load Balancers**: Deletes any associated AWS load balancers. | ||
params: | ||
- name: test-name | ||
type: string | ||
description: The name of the test being executed. | ||
- name: ocp-login-command | ||
type: string | ||
description: Command to log in to the OpenShift cluster. | ||
- name: oci-container | ||
type: string | ||
description: The ORAS container registry URI where artifacts will be stored. | ||
- name: cluster-name | ||
type: string | ||
description: The name of the OpenShift cluster that is to be deleted. | ||
- name: konflux-test-infra-secret | ||
type: string | ||
description: The name of the secret containing credentials for testing infrastructure. | ||
- name: cloud-credential-key | ||
type: string | ||
description: The key within the konflux-test-infra secret where AWS ROSA configuration details are stored. | ||
- name: pipeline-aggregate-status | ||
type: string | ||
description: The status of the pipeline (e.g., Succeeded, Failed, Completed, None). | ||
default: None | ||
volumes: | ||
- name: konflux-test-infra-volume | ||
secret: | ||
secretName: konflux-test-infra | ||
steps: | ||
- name: collect-artifacts | ||
workingDir: /workspace/cluster-artifacts | ||
onError: continue | ||
image: quay.io/konflux-qe-incubator/konflux-qe-tools:latest | ||
script: | | ||
#!/bin/sh | ||
$(params.ocp-login-command) | ||
curl -sSL https://raw.githubusercontent.com/konflux-ci/konflux-qe-definitions/main/scripts/gather-extra.sh | bash | ||
when: | ||
- input: $(params.pipeline-aggregate-status) | ||
operator: notin | ||
values: ["Succeeded"] | ||
- name: secure-push-oci | ||
ref: | ||
resolver: git | ||
params: | ||
- name: url | ||
value: https://github.com/konflux-ci/tekton-integration-catalog.git | ||
- name: revision | ||
value: main | ||
- name: pathInRepo | ||
value: stepactions/secure-push-oci/0.1/secure-push-oci.yaml | ||
params: | ||
- name: workdir-path | ||
value: /workspace | ||
- name: oci-ref | ||
value: $(params.oci-container) | ||
- name: credentials-volume-name | ||
value: konflux-test-infra-volume | ||
when: | ||
- input: $(params.pipeline-aggregate-status) | ||
operator: notin | ||
values: ["Succeeded"] | ||
- name: deprovision-rosa | ||
image: quay.io/konflux-qe-incubator/konflux-qe-tools:latest | ||
onError: continue | ||
volumeMounts: | ||
- name: konflux-test-infra-volume | ||
mountPath: /usr/local/konflux-test-infra | ||
script: | | ||
set -o errexit | ||
set -o nounset | ||
set -o pipefail | ||
export CLUSTER_NAME REGION AWS_ACCESS_KEY_ID AWS_SECRET_ACCESS_KEY ROSA_TOKEN | ||
CLUSTER_NAME=$(params.cluster-name) | ||
REGION=$(jq -r '.aws["region"]' /usr/local/konflux-test-infra/$(params.cloud-credential-key)) | ||
AWS_ACCESS_KEY_ID=$(jq -r '.aws["access-key-id"]' /usr/local/konflux-test-infra/$(params.cloud-credential-key)) | ||
AWS_SECRET_ACCESS_KEY=$(jq -r '.aws["access-key-secret"]' /usr/local/konflux-test-infra/$(params.cloud-credential-key)) | ||
ROSA_TOKEN=$(jq -r '.aws["rosa-hcp"]["rosa-token"]' /usr/local/konflux-test-infra/$(params.cloud-credential-key)) | ||
config_aws_creds() { | ||
aws configure set aws_access_key_id $AWS_ACCESS_KEY_ID | ||
aws configure set aws_secret_access_key $AWS_SECRET_ACCESS_KEY | ||
aws configure set region "$REGION" | ||
} | ||
if [[ -n "$CLUSTER_NAME" ]]; then | ||
echo "INFO: [$(date +"%Y/%m/%d %H:%M:%S")] Started to destroy cluster [$CLUSTER_NAME]..." | ||
printf "INFO: Logging in to your Red Hat account...\n" | ||
config_aws_creds | ||
rosa login --token="$ROSA_TOKEN" | ||
# Trigger the deletion of the cluster without waiting for it to be completely deleted | ||
rosa delete cluster --region "$REGION" --cluster="$CLUSTER_NAME" -y | ||
else | ||
echo "INFO: No OCP cluster needs to be destroyed." | ||
fi | ||
echo "INFO: [$(date +"%Y/%m/%d %H:%M:%S")] Done" | ||
- name: remove-tag-from-subnets | ||
image: quay.io/konflux-qe-incubator/konflux-qe-tools:latest | ||
onError: continue | ||
volumeMounts: | ||
- name: konflux-test-infra-volume | ||
mountPath: /usr/local/konflux-test-infra | ||
script: | | ||
set -o errexit | ||
set -o nounset | ||
set -o pipefail | ||
CLUSTER_NAME=$(params.cluster-name) | ||
REGION=$(jq -r '.aws["region"]' /usr/local/konflux-test-infra/$(params.cloud-credential-key)) | ||
AWS_ACCESS_KEY_ID=$(jq -r '.aws["access-key-id"]' /usr/local/konflux-test-infra/$(params.cloud-credential-key)) | ||
AWS_SECRET_ACCESS_KEY=$(jq -r '.aws["access-key-secret"]' /usr/local/konflux-test-infra/$(params.cloud-credential-key)) | ||
ROSA_TOKEN=$(jq -r '.aws["rosa-hcp"]["rosa-token"]' /usr/local/konflux-test-infra/$(params.cloud-credential-key)) | ||
SUBNET_IDS=$(jq -r '.aws["rosa-hcp"]["subnets-ids"]' /usr/local/konflux-test-infra/$(params.cloud-credential-key)) | ||
aws configure set aws_access_key_id $AWS_ACCESS_KEY_ID | ||
aws configure set aws_secret_access_key $AWS_SECRET_ACCESS_KEY | ||
aws configure set region "$REGION" | ||
echo "INFO: [$(date +"%Y/%m/%d %H:%M:%S")] Started to remove tags of cluster [$CLUSTER_NAME]..." | ||
printf "INFO: Logging in to your Red Hat account...\n" | ||
rosa login --token="$ROSA_TOKEN" | ||
if [[ -n "$CLUSTER_NAME" ]]; then | ||
cluster_id=$(rosa --region "$REGION" describe cluster --cluster="$CLUSTER_NAME" -o json | jq -r .id) | ||
echo "INFO: Cluster ID: $cluster_id" | ||
echo "INFO: Removing tag from subnets [$SUBNET_IDS]..." | ||
new_subnet_ids="${SUBNET_IDS//,/ }" | ||
aws --region "$REGION" ec2 delete-tags --resources $new_subnet_ids --tags Key="kubernetes.io/cluster/${cluster_id}" | ||
echo "INFO: [$(date +"%Y/%m/%d %H:%M:%S")] Done" | ||
else | ||
echo "INFO: No OCP cluster tag needs to be removed." | ||
fi | ||
- name: remove-load-balancers | ||
image: quay.io/konflux-qe-incubator/konflux-qe-tools:latest | ||
onError: continue | ||
volumeMounts: | ||
- name: konflux-test-infra-volume | ||
mountPath: /usr/local/konflux-test-infra | ||
script: | | ||
set -o errexit | ||
set -o nounset | ||
set -o pipefail | ||
export CLUSTER_NAME REGION AWS_ACCESS_KEY_ID AWS_SECRET_ACCESS_KEY | ||
CLUSTER_NAME=$(params.cluster-name) | ||
REGION=$(jq -r '.aws["region"]' /usr/local/konflux-test-infra/$(params.cloud-credential-key)) | ||
AWS_ACCESS_KEY_ID=$(jq -r '.aws["access-key-id"]' /usr/local/konflux-test-infra/$(params.cloud-credential-key)) | ||
AWS_SECRET_ACCESS_KEY=$(jq -r '.aws["access-key-secret"]' /usr/local/konflux-test-infra/$(params.cloud-credential-key)) | ||
aws configure set aws_access_key_id $AWS_ACCESS_KEY_ID | ||
aws configure set aws_secret_access_key $AWS_SECRET_ACCESS_KEY | ||
aws configure set region "$REGION" | ||
echo "INFO: [$(date +"%Y/%m/%d %H:%M:%S")] Started to remove load balancers of cluster [$CLUSTER_NAME]..." | ||
ELB_TAG_KEY="api.openshift.com/name" | ||
ELB_TAG_VALUE=$(params.cluster-name) | ||
# Get all load balancer ARNs | ||
all_arns=$(aws elbv2 describe-load-balancers --query 'LoadBalancers[*].LoadBalancerArn' --output text) | ||
# Process the ARNs in batches of 20 to avoid errors like: | ||
# An error occurred (ValidationError) when calling the DescribeTags operation: | ||
# Cannot have more than 20 resources described | ||
batch_size=20 | ||
arns_to_delete=() | ||
for arn in $all_arns; do | ||
arns_to_delete+=($arn) | ||
if [ ${#arns_to_delete[@]} -eq $batch_size ]; then | ||
aws elbv2 describe-tags --resource-arns ${arns_to_delete[@]} \ | ||
--query "TagDescriptions[?Tags[?Key=='$ELB_TAG_KEY' && Value=='$ELB_TAG_VALUE']].ResourceArn" --output text | while read matched_arn; do | ||
echo "Deleting load balancer with ARN: $matched_arn" | ||
aws elbv2 delete-load-balancer --load-balancer-arn $matched_arn | ||
done | ||
arns_to_delete=() | ||
fi | ||
done |