From 16ec5bc394f1f1ef1495ff3a00191924e567547e Mon Sep 17 00:00:00 2001 From: dlpzx Date: Thu, 26 Dec 2024 10:58:48 +0100 Subject: [PATCH] Replace Integration tests - core --- backend/dataall/core/stacks/api/types.py | 1 + tests_new/integration_tests/README.md | 207 +++++++++++++----- tests_new/integration_tests/aws_clients/s3.py | 36 +++ .../integration_tests/aws_clients/sts.py | 57 +++++ tests_new/integration_tests/client.py | 33 +-- .../core/environment/queries.py | 55 ++++- .../core/environment/test_environment.py | 26 ++- .../core/environment/utils.py | 46 ++++ .../core/organizations/global_conftest.py | 8 +- .../core/organizations/queries.py | 6 +- .../core/organizations/test_organization.py | 1 - .../core/permissions/queries.py | 64 ++++++ .../core/permissions/test_permissions.py | 57 +++++ .../integration_tests/core/stack/conftest.py | 31 +++ .../integration_tests/core/stack/queries.py | 66 ++++++ .../core/stack/test_stack.py | 102 +++++++++ .../integration_tests/core/vpc/conftest.py | 21 ++ .../integration_tests/core/vpc/queries.py | 84 +++++++ .../integration_tests/core/vpc/test_vpc.py | 68 ++++++ 19 files changed, 884 insertions(+), 85 deletions(-) create mode 100644 tests_new/integration_tests/aws_clients/s3.py create mode 100644 tests_new/integration_tests/aws_clients/sts.py create mode 100644 tests_new/integration_tests/core/environment/utils.py create mode 100644 tests_new/integration_tests/core/permissions/queries.py create mode 100644 tests_new/integration_tests/core/permissions/test_permissions.py create mode 100644 tests_new/integration_tests/core/stack/conftest.py create mode 100644 tests_new/integration_tests/core/stack/test_stack.py create mode 100644 tests_new/integration_tests/core/vpc/conftest.py create mode 100644 tests_new/integration_tests/core/vpc/queries.py create mode 100644 tests_new/integration_tests/core/vpc/test_vpc.py diff --git a/backend/dataall/core/stacks/api/types.py b/backend/dataall/core/stacks/api/types.py index 1ddd96184..35a4a92ad 100644 --- a/backend/dataall/core/stacks/api/types.py +++ b/backend/dataall/core/stacks/api/types.py @@ -20,6 +20,7 @@ gql.Field(name='region', type=gql.NonNullableType(gql.String)), gql.Field(name='status', type=gql.String), gql.Field(name='stackid', type=gql.String), + gql.Field(name='updated', type=gql.AWSDateTime), gql.Field(name='link', type=gql.String, resolver=resolve_link), gql.Field(name='outputs', type=gql.String, resolver=resolve_outputs), gql.Field(name='resources', type=gql.String, resolver=resolve_resources), diff --git a/tests_new/integration_tests/README.md b/tests_new/integration_tests/README.md index ac8c17565..054b8fb31 100644 --- a/tests_new/integration_tests/README.md +++ b/tests_new/integration_tests/README.md @@ -10,60 +10,154 @@ Currently **we support only Cognito based deployments** but support for any IdP ## Pre-requisites -- A real deployment of data.all in AWS -- An SSM parameter (`/{resource_prefix/{env_name}/testdata`) with the following contents - ``` - { - "users": { - "testUserTenant": { - "username": "testUserTenant", - "password": "...", - "groups": [ - "DAAdministrators" - ] - }, - "testUser1": { - "username": "testUser1", - "password": "...", - "groups": [ - "testGroup1" - ] - }, - "testUser2": { - "username": "testUser2", - "password": "...", - "groups": [ - "testGroup2" - ] - }, - "testUser3": { - "username": "testUser3", - "password": "...", - "groups": [ - "testGroup3" - ] - }, - "testUser4": { - "username": "testUser4", - "password": "...", - "groups": [ - "testGroup4" - ] - } - }, - "envs": { - "session_env1": { - "accountId": "...", - "region": "eu-central-1" - }, - "session_env2": { - "accountId": "...", - "region": "eu-west-1" - } - } - } - ``` +- A real deployment of data.all in AWS. + - For this deployment the `cdk.json` flag `enable_pivot_role_auto_create` must be set to `true`. + - For this deployment the `config.json` flag `cdk_pivot_role_multiple_environments_same_account` must be set to `true` if an AWS account is going to be reused for multiple environments, + - Second test account is bootstraped, and first account is added to trusted policy in target regions + ```cdk bootstrap --trust -c @aws-cdk/core:newStyleStackSynthesis=true --cloudformation-execution-policies arn:aws:iam::aws:policy/AdministratorAccess aws:///region``` + - An SSM parameter (`/dataall/{env_name}/testdata`) in the DEPLOYMENT ACCOUNT with the following contents + ``` + { + "users": { + "testUserTenant": { + "username": "testUserTenant", + "password": "...", + "groups": [ + "DAAdministrators" + ] + }, + "testUser1": { + "username": "testUser1", + "password": "...", + "groups": [ + "testGroup1" + ] + }, + "testUser2": { + "username": "testUser2", + "password": "...", + "groups": [ + "testGroup2" + ] + }, + "testUser3": { + "username": "testUser3", + "password": "...", + "groups": [ + "testGroup3" + ] + }, + "testUser4": { + "username": "testUser4", + "password": "...", + "groups": [ + "testGroup4" + ] + } + }, + "envs": { + "session_env1": { + "accountId": "...", + "region": "eu-central-1" + }, + "session_env2": { + "accountId": "...", + "region": "eu-west-1" + }, + "persistent_env1": { + "accountId": "...", + "region": "us-east-1" + }, + "persistent_cross_acc_env_1": { + "accountId": "...", + "region": "us-east-1" + }, + "session_cross_acc_env_1": { + "accountId": "...", + "region": "eu-central-1" + } + }, + "dashboards": { + "session_env1": { + "dashboardId": "..." + }, + }, + "redshift_connections": { + "connection_serverless_admin_session_env1": { + "namespace_id": "...", + "workgroup": "...", + "secret_arn": "..." + }, + "connection_serverless_data_user_session_env1": { + "namespace_id": "...", + "workgroup": "...", + "secret_arn": "..." + }, + "connection_cluster_admin_session_cross_acc_env_1": { + "cluster_id": "...", + "secret_arn": "..." + }, + "connection_cluster_data_user_session_cross_acc_env_1": { + "cluster_id": "...", + "secret_arn": "..." + } + } + } + ``` + - The pipeline will create the users/groups +- For Redshift testing we require some pre-existing infrastructure: + - One Redshift serverless namespace+workgroup deployed in `session_env1` and one Redshift provisioned cluster in `session_cross_acc_env_1` + - The provisioned cluster MUST be encrypted and use RA3 cluster type (Check the [docs](https://docs.aws.amazon.com/redshift/latest/dg/datashare-overview.html) for other data sharing limitations) + - Both clusters must host the default `dev` database with the `public` schema. + - For each we need to ensure that the admin credentials are stored in Secrets Manager. The secrets MUST be tagged with the tag {key:dataall, value:True}. If you are going to use the Redshift Query Editor, then you will also need the tag {key:Redshift, value:any} + - For each we need to create a Redshift user (see SQL commands below) and store the credentials in Secrets Manager. The secrets MUST be tagged with the tag {key:dataall, value:True}. If you are going to use the Redshift Query Editor, then you will also need the tag {key:Redshift, value:any} + - For each we need to create a set of tables using the commands below + - For each we need to create a Redshift role as in the commands below + +Create User and grant basic permissions using admin connection +```sql +CREATE USER testuser PASSWORD 'Pass1Word!'; +GRANT USAGE ON SCHEMA public TO testuser; +GRANT SELECT ON ALL TABLES IN SCHEMA public TO testuser; +``` + +Create and attach role using admin connection +```sql +CREATE ROLE testrole; +GRANT ROLE testrole TO testuser; +``` + +Create tables using testuser connection +```sql +DROP TABLE IF EXISTS nation; +DROP TABLE IF EXISTS region; + +CREATE TABLE region ( + R_REGIONKEY bigint NOT NULL, + R_NAME varchar(25), + R_COMMENT varchar(152)) +diststyle all; + +CREATE TABLE nation ( + N_NATIONKEY bigint NOT NULL, + N_NAME varchar(25), + N_REGIONKEY bigint, + N_COMMENT varchar(152)) +diststyle all; +``` + +### Dashboard Tests Pre-Requisities + +In order to run the tests on the dashboards module the following steps are required: + +- Create Enterprise QuickSight Subscription in `session_env1` AWS Account +- Update QuickSight Account with a Reader Capacity Pricing Plan (required for generating embed URLs - `GenerateEmbedUrlForAnonymousUser`) +- Create / Publish a QuickSight Dashboard +- Create a QuickSight Group named `dataall` and give owner access of the published dashboard to the `dataall` group +- Provide the `dashboardId` in the `config.json` as shown above + +Rather than failing if the above pre-requisites are not completed, if ther eis no QuickSight Account is detected in `session_env1` the dashboard tests will be **skipped**. ## Run tests @@ -88,4 +182,9 @@ You can also run the tests locally by... ## Coverage -At the moment integration tests only cover Organizations module as an example. \ No newline at end of file +At the moment integration tests cover: +- Organizations +- Environments +- S3 Datasets +- Notebooks +- Worksheets \ No newline at end of file diff --git a/tests_new/integration_tests/aws_clients/s3.py b/tests_new/integration_tests/aws_clients/s3.py new file mode 100644 index 000000000..080105edb --- /dev/null +++ b/tests_new/integration_tests/aws_clients/s3.py @@ -0,0 +1,36 @@ +import logging +from botocore.exceptions import ClientError + +log = logging.getLogger(__name__) + + +class S3Client: + def __init__(self, session, account, region): + self._client = session.client('s3', region_name=region) + self._control_client = session.client('s3control', region_name=region) + self._resource = session.resource('s3', region_name=region) + self._account = account + self._region = region + + def delete_bucket(self, bucket_name): + """ + Delete an S3 bucket. + :param bucket_name: Name of the S3 bucket to be deleted + :return: None + """ + try: + # Delete all objects in the bucket before deleting the bucket + bucket = self._resource.Bucket(bucket_name) + # Delete all object versions + bucket.object_versions.all().delete() + # Delete any remaining objects (if versioning was not enabled) + bucket.objects.all().delete() + # Delete any remaining access point + access_points = self._control_client.list_access_points(AccountId=self._account, Bucket=bucket_name)[ + 'AccessPointList' + ] + for access_point in access_points: + self._control_client.delete_access_point(AccountId=self._account, Name=access_point['Name']) + bucket.delete() + except ClientError as e: + log.exception(f'Error deleting S3 bucket: {e}') diff --git a/tests_new/integration_tests/aws_clients/sts.py b/tests_new/integration_tests/aws_clients/sts.py new file mode 100644 index 000000000..e0d77b0c6 --- /dev/null +++ b/tests_new/integration_tests/aws_clients/sts.py @@ -0,0 +1,57 @@ +from uuid import uuid4 +import boto3 +from boto3 import Session +from botocore.credentials import RefreshableCredentials +from botocore.session import get_session + +SESSION_EXPIRATION_TIME_IN_SECONDS = 3600 + + +class STSClient: + def __init__(self, role_arn, region, session_name=None): + self.role_arn = role_arn + self.region = region + self.session_name = session_name or uuid4().hex + + def _refresh_credentials(self): + params = { + 'RoleArn': self.role_arn, + 'RoleSessionName': self.session_name, + 'DurationSeconds': SESSION_EXPIRATION_TIME_IN_SECONDS, + } + sts_client = boto3.client('sts', region_name=self.region) + + response = sts_client.assume_role(**params).get('Credentials') + credentials = { + 'access_key': response.get('AccessKeyId'), + 'secret_key': response.get('SecretAccessKey'), + 'token': response.get('SessionToken'), + 'expiry_time': response.get('Expiration').isoformat(), + } + return credentials + + def get_refreshable_session(self) -> Session: + """ + Get refreshable boto3 session. + """ + refreshable_credentials = RefreshableCredentials.create_from_metadata( + metadata=self._refresh_credentials(), + refresh_using=self._refresh_credentials, + method='sts-assume-role', + ) + + session = get_session() + session._credentials = refreshable_credentials + session.set_config_variable('region', self.region) + return Session(botocore_session=session) + + def get_role_session(self, session) -> Session: + sts_client = session.client('sts', region_name=self.region) + assumed_role_object = sts_client.assume_role(RoleArn=self.role_arn, RoleSessionName=self.session_name) + credentials = assumed_role_object['Credentials'] + + return boto3.Session( + aws_access_key_id=credentials['AccessKeyId'], + aws_secret_access_key=credentials['SecretAccessKey'], + aws_session_token=credentials['SessionToken'], + ) diff --git a/tests_new/integration_tests/client.py b/tests_new/integration_tests/client.py index 6f70f2b12..cb756ce9e 100644 --- a/tests_new/integration_tests/client.py +++ b/tests_new/integration_tests/client.py @@ -1,42 +1,49 @@ -import requests +import logging import os import uuid +from pprint import pformat from urllib.parse import parse_qs, urlparse + +import requests from munch import DefaultMunch -from retrying import retry -from integration_tests.errors import GqlError from oauthlib.oauth2 import WebApplicationClient from requests_oauthlib import OAuth2Session +from retrying import retry + +from integration_tests.errors import GqlError ENVNAME = os.getenv('ENVNAME', 'dev') +def _retry_if_connection_error(exception): + """Return True if we should retry, False otherwise""" + return isinstance(exception, requests.exceptions.ConnectionError) or isinstance(exception, requests.ReadTimeout) + + class Client: def __init__(self, username, password): self.username = username self.password = password self.access_token = self._get_jwt_tokens() - @staticmethod - def _retry_if_connection_error(exception): - """Return True if we should retry, False otherwise""" - return isinstance(exception, requests.exceptions.ConnectionError) or isinstance(exception, requests.ReadTimeout) - @retry( retry_on_exception=_retry_if_connection_error, stop_max_attempt_number=3, wait_random_min=1000, wait_random_max=3000, ) - def query(self, query: str): + def query(self, query: dict): graphql_endpoint = os.path.join(os.environ['API_ENDPOINT'], 'graphql', 'api') headers = {'accesskeyid': 'none', 'SecretKey': 'none', 'Authorization': f'Bearer {self.access_token}'} r = requests.post(graphql_endpoint, json=query, headers=headers) + response = r.json() + if errors := response.get('errors'): + if any((response.get('data', {}) or {}).values()): # check if there are data + logging.warning(f'{query=} returned both data and errors:\n {pformat(response)}') + else: + raise GqlError(errors) r.raise_for_status() - if errors := r.json().get('errors'): - raise GqlError(errors) - - return DefaultMunch.fromDict(r.json()) + return DefaultMunch.fromDict(response) def _get_jwt_tokens(self): token = uuid.uuid4() diff --git a/tests_new/integration_tests/core/environment/queries.py b/tests_new/integration_tests/core/environment/queries.py index 0d0de44c6..c817815e2 100644 --- a/tests_new/integration_tests/core/environment/queries.py +++ b/tests_new/integration_tests/core/environment/queries.py @@ -34,6 +34,7 @@ accountid region stackid + updated link outputs resources @@ -62,10 +63,8 @@ def create_environment(client, name, group, organizationUri, awsAccountId, regio 'region': region, 'description': 'Created for integration testing', 'tags': tags, - 'parameters': [ - {'key': 'notebooksEnabled', 'value': 'true'}, - ], 'type': 'IntegrationTesting', + 'parameters': [], } }, 'query': f""" @@ -241,6 +240,33 @@ def add_consumption_role(client, env_uri, group_uri, consumption_role_name, iam_ return response.data.addConsumptionRoleToEnvironment +def list_environment_consumption_roles(client, env_uri, filter): + query = { + 'operationName': 'listEnvironmentConsumptionRoles', + 'variables': {'environmentUri': env_uri, 'filter': filter}, + 'query': """ + query listEnvironmentConsumptionRoles($environmentUri: String!, $filter: ConsumptionRoleFilter) { + listEnvironmentConsumptionRoles(environmentUri: $environmentUri, filter: $filter) { + count + page + pages + hasNext + hasPrevious + nodes { + consumptionRoleUri + consumptionRoleName + environmentUri + groupUri + IAMRoleArn + } + } + } + """, + } + response = client.query(query=query) + return response.data.listEnvironmentConsumptionRoles + + def remove_consumption_role(client, env_uri, consumption_role_uri): query = { 'operationName': 'removeConsumptionRoleFromEnvironment', @@ -262,3 +288,26 @@ def remove_consumption_role(client, env_uri, consumption_role_uri): } response = client.query(query=query) return response.data.removeConsumptionRoleFromEnvironment + + +def get_environment_access_token(client, env_uri, group_uri): + query = { + 'operationName': 'generateEnvironmentAccessToken', + 'variables': { + 'environmentUri': env_uri, + 'groupUri': group_uri, + }, + 'query': """ + query generateEnvironmentAccessToken( + $environmentUri: String! + $groupUri: String + ) { + generateEnvironmentAccessToken( + environmentUri: $environmentUri + groupUri: $groupUri + ) + } + """, + } + response = client.query(query=query) + return response.data.generateEnvironmentAccessToken diff --git a/tests_new/integration_tests/core/environment/test_environment.py b/tests_new/integration_tests/core/environment/test_environment.py index 55d44188a..934f8ec2f 100644 --- a/tests_new/integration_tests/core/environment/test_environment.py +++ b/tests_new/integration_tests/core/environment/test_environment.py @@ -12,8 +12,8 @@ remove_consumption_role, remove_group_from_env, ) -from integration_tests.core.stack.queries import update_stack -from integration_tests.core.stack.utils import check_stack_in_progress, check_stack_ready +from integration_tests.core.environment.utils import update_env_stack +from integration_tests.core.stack.queries import get_stack_logs from integration_tests.errors import GqlError log = logging.getLogger(__name__) @@ -51,15 +51,15 @@ def test_list_envs_invited(client2, session_env1, session_env2, session_id): def test_persistent_env_update(client1, persistent_env1): - # wait for stack to get to a final state before triggering an update - stack_uri = persistent_env1.stack.stackUri - env_uri = persistent_env1.environmentUri - check_stack_ready(client1, env_uri, stack_uri) - update_stack(client1, env_uri, 'environment') - # wait for stack to move to "in_progress" state - check_stack_in_progress(client1, env_uri, stack_uri) - stack = check_stack_ready(client1, env_uri, stack_uri) - assert_that(stack.status).is_equal_to('UPDATE_COMPLETE') + def get_last_log_ts(): + logs = get_stack_logs(client1, target_uri=persistent_env1.environmentUri, target_type='environment') + return datetime.fromisoformat(logs[-1]['timestamp']) + + ts_before = get_last_log_ts() + stack = update_env_stack(client1, persistent_env1) + assert_that(stack).contains_entry(status='UPDATE_COMPLETE') + ts_after = get_last_log_ts() + assert_that(ts_after).is_greater_than_or_equal_to(ts_before) def test_invite_group_on_env_no_org(client1, session_env2, group4): @@ -117,3 +117,7 @@ def test_add_consumption_role_unauthorized(client2, session_env2, group1): assert_that(add_consumption_role).raises(GqlError).when_called_with( client2, env_uri, group1, 'TestConsumptionRole', f'arn:aws:iam::{session_env2.AwsAccountId}:role/Admin' ).contains('UnauthorizedOperation', 'ADD_ENVIRONMENT_CONSUMPTION_ROLES', env_uri) + + +def test_create_crossaccount_env(client5, session_cross_acc_env_1, group5): + assert_that(session_cross_acc_env_1.stack.status).is_in('CREATE_COMPLETE', 'UPDATE_COMPLETE') diff --git a/tests_new/integration_tests/core/environment/utils.py b/tests_new/integration_tests/core/environment/utils.py new file mode 100644 index 000000000..0db7fb255 --- /dev/null +++ b/tests_new/integration_tests/core/environment/utils.py @@ -0,0 +1,46 @@ +from integration_tests.core.environment.queries import update_environment +from integration_tests.core.stack.utils import check_stack_ready, check_stack_in_progress +from integration_tests.core.stack.queries import update_stack + + +def set_env_params(client, env, **new_params): + old_params = {param.key: param.value for param in env.parameters} + updated_params = {**old_params, **new_params} + + # update env only if there are param updates + if old_params != updated_params: + new_params_list = [{'key': param[0], 'value': param[1]} for param in updated_params.items()] + env_uri = env.environmentUri + stack_uri = env.stack.stackUri + check_stack_ready(client, env_uri, stack_uri) + update_environment( + client, + env.environmentUri, + { + k: v + for k, v in env.items() + if k + in [ + 'description', + 'label', + 'resourcePrefix', + 'subnetIds', + 'tags', + 'vpcId', + ] + } + | {'parameters': new_params_list}, + ) + check_stack_in_progress(client, env_uri, stack_uri) + check_stack_ready(client, env_uri, stack_uri) + + +def update_env_stack(client, env): + stack_uri = env.stack.stackUri + env_uri = env.environmentUri + # wait for stack to get to a final state before triggering an update + check_stack_ready(client, env_uri, stack_uri) + update_stack(client, env_uri, 'environment') + # wait for stack to move to "in_progress" state + check_stack_in_progress(client, env_uri, stack_uri) + return check_stack_ready(client, env_uri, stack_uri) diff --git a/tests_new/integration_tests/core/organizations/global_conftest.py b/tests_new/integration_tests/core/organizations/global_conftest.py index 40dded5ca..adbd73eb0 100644 --- a/tests_new/integration_tests/core/organizations/global_conftest.py +++ b/tests_new/integration_tests/core/organizations/global_conftest.py @@ -3,11 +3,17 @@ @pytest.fixture(scope='session') -def org1(client1, group1, session_id): +def org1(client1, group1, group5, session_id): """ Session org owned by group1 """ org = create_organization(client1, 'organization1', group1, tags=[session_id]) + invite_team_to_organization( + client=client1, + organizationUri=org.organizationUri, + group=group5, + permissions=['LINK_ENVIRONMENT', 'INVITE_ORGANIZATION_GROUP', 'REMOVE_ORGANIZATION_GROUP'], + ) yield org archive_organization(client1, org.organizationUri) diff --git a/tests_new/integration_tests/core/organizations/queries.py b/tests_new/integration_tests/core/organizations/queries.py index 107d88439..1534a02cf 100644 --- a/tests_new/integration_tests/core/organizations/queries.py +++ b/tests_new/integration_tests/core/organizations/queries.py @@ -82,10 +82,12 @@ def update_organization(client, organizationUri): return response.data.updateOrganization -def invite_team_to_organization(client, organizationUri, group): +def invite_team_to_organization(client, organizationUri, group, permissions=None): query = { 'operationName': 'inviteGroupToOrganization', - 'variables': {'input': {'organizationUri': organizationUri, 'groupUri': group}}, + 'variables': { + 'input': {'organizationUri': organizationUri, 'groupUri': group, 'permissions': permissions or []} + }, 'query': """mutation inviteGroupToOrganization($input:InviteGroupToOrganizationInput!){ inviteGroupToOrganization(input:$input){ organizationUri diff --git a/tests_new/integration_tests/core/organizations/test_organization.py b/tests_new/integration_tests/core/organizations/test_organization.py index dcdae19a6..c06081ce2 100644 --- a/tests_new/integration_tests/core/organizations/test_organization.py +++ b/tests_new/integration_tests/core/organizations/test_organization.py @@ -41,7 +41,6 @@ def test_get_organization_organization_with_admin_team(client1, org1): assert_that(response.owner).is_equal_to(organization.owner) assert_that(response.SamlGroupName).is_equal_to(organization.SamlGroupName) assert_that(response.userRoleInOrganization).is_equal_to('Owner') - assert_that(response.stats.groups).is_equal_to(0) def test_get_organization_with_invited_team(client2, org2): diff --git a/tests_new/integration_tests/core/permissions/queries.py b/tests_new/integration_tests/core/permissions/queries.py new file mode 100644 index 000000000..130011f1d --- /dev/null +++ b/tests_new/integration_tests/core/permissions/queries.py @@ -0,0 +1,64 @@ +# TODO: This file will be replaced by using the SDK directly +def update_group_tenant_permissions(client, group_uri, permissions=[]): + query = { + 'operationName': 'updateGroupTenantPermissions', + 'variables': { + 'input': { + 'groupUri': group_uri, + 'permissions': permissions, + } + }, + 'query': """ + mutation updateGroupTenantPermissions( + $input: UpdateGroupTenantPermissionsInput! + ) { + updateGroupTenantPermissions(input: $input) + } + """, + } + response = client.query(query=query) + return response.data.updateGroupTenantPermissions + + +def list_tenant_permissions(client): + query = { + 'operationName': 'listTenantPermissions', + 'variables': {}, + 'query': """ + query listTenantPermissions { + listTenantPermissions { + name + description + } + } + """, + } + response = client.query(query=query) + return response.data.listTenantPermissions + + +def list_tenant_groups(client, term=''): + query = { + 'operationName': 'listTenantGroups', + 'variables': {'filter': {'term': term}}, + 'query': """ + query listTenantGroups($filter: GroupFilter) { + listTenantGroups(filter: $filter) { + count + page + pages + hasNext + hasPrevious + nodes { + groupUri + tenantPermissions { + name + description + } + } + } + } + """, + } + response = client.query(query=query) + return response.data.listTenantGroups diff --git a/tests_new/integration_tests/core/permissions/test_permissions.py b/tests_new/integration_tests/core/permissions/test_permissions.py new file mode 100644 index 000000000..9d5ff3a50 --- /dev/null +++ b/tests_new/integration_tests/core/permissions/test_permissions.py @@ -0,0 +1,57 @@ +from assertpy import assert_that + +from integration_tests.core.permissions.queries import ( + update_group_tenant_permissions, + list_tenant_permissions, + list_tenant_groups, +) +from integration_tests.errors import GqlError + + +def test_list_tenant_permissions(clientTenant): + response = list_tenant_permissions(clientTenant) + assert_that(response).is_not_empty() + assert_that(len(response)).is_greater_than_or_equal_to(3) + assert_that(response).does_not_contain([None, '', False]) + assert_that([p.name for p in response]).does_not_contain([None, '', False]) + + +def test_list_tenant_permissions_unauthorized(client1): + assert_that(list_tenant_permissions).raises(GqlError).when_called_with(client1).contains( + 'UnauthorizedOperation', 'LIST_TENANT_TEAM_PERMISSIONS' + ) + + +def test_list_tenant_groups(clientTenant): + response = list_tenant_groups(clientTenant) + assert_that(response.count).is_greater_than_or_equal_to(4) + assert_that(response.nodes).is_not_empty() + assert_that(response.nodes[0]).contains_key('tenantPermissions') + ## Testing admin group DAAdministrators exists + admin_group = next(group for group in response.nodes if group.groupUri == 'DHAdmins') + assert_that(admin_group).contains_key('tenantPermissions') + + +def test_list_tenant_groups_unauthorized(client1): + assert_that(list_tenant_groups).raises(GqlError).when_called_with(client1).contains( + 'UnauthorizedOperation', 'LIST_TENANT_TEAMS' + ) + + +def test_update_group_tenant_permissions(clientTenant, group1): + # get group with permissions + response = list_tenant_groups(clientTenant, term=group1) + assert_that(response.count).is_equal_to(1) + assert_that(len(response.nodes[0].tenantPermissions)).is_greater_than_or_equal_to(1) + group1_perms = [p.name for p in response.nodes[0].tenantPermissions] + # update permissions + response = update_group_tenant_permissions(clientTenant, group1, group1_perms[:-1]) + assert_that(response).is_true() + # check permissions were updated + response = list_tenant_groups(clientTenant, term=group1) + assert_that(response.count).is_equal_to(1) + group1_p_updated = response.nodes[0] + assert_that(len(group1_p_updated.tenantPermissions)).is_equal_to(len(group1_perms) - 1) + assert_that(group1_p_updated.tenantPermissions).does_not_contain(group1_perms[-1]) + # update permissions back to initial state + update_group_tenant_permissions(clientTenant, group1, group1_perms) diff --git a/tests_new/integration_tests/core/stack/conftest.py b/tests_new/integration_tests/core/stack/conftest.py new file mode 100644 index 000000000..c6979c60d --- /dev/null +++ b/tests_new/integration_tests/core/stack/conftest.py @@ -0,0 +1,31 @@ +import pytest + +from integration_tests.core.stack.queries import update_key_value_tags + + +@pytest.fixture(scope='function') +def environment_tags_1(client1, session_env1, session_id): + tags = None + try: + tags = update_key_value_tags( + client1, + input={ + 'targetUri': session_env1.environmentUri, + 'targetType': 'environment', + 'tags': [ + {'key': 'key1', 'value': session_id, 'cascade': False}, + {'key': 'key2', 'value': session_id, 'cascade': True}, + ], + }, + ) + yield tags + finally: + if tags: + update_key_value_tags( + client1, + input={ + 'targetUri': session_env1.environmentUri, + 'targetType': 'environment', + 'tags': [], + }, + ) diff --git a/tests_new/integration_tests/core/stack/queries.py b/tests_new/integration_tests/core/stack/queries.py index f40ac253f..9757bf693 100644 --- a/tests_new/integration_tests/core/stack/queries.py +++ b/tests_new/integration_tests/core/stack/queries.py @@ -44,6 +44,7 @@ def get_stack(client, env_uri, stack_uri, target_uri, target_type): accountid region stackid + updated link outputs resources @@ -56,3 +57,68 @@ def get_stack(client, env_uri, stack_uri, target_uri, target_type): } response = client.query(query=query) return response.data.getStack + + +def get_stack_logs(client, target_uri, target_type): + query = { + 'operationName': 'getStackLogs', + 'variables': { + 'targetUri': target_uri, + 'targetType': target_type, + }, + 'query': """ + query getStackLogs($targetUri: String!, $targetType: String!) { + getStackLogs(targetUri: $targetUri, targetType: $targetType) { + message + timestamp + } + } + """, + } + response = client.query(query=query) + return response.data.getStackLogs + + +def list_key_value_tags(client, target_uri, target_type): + query = { + 'operationName': 'listKeyValueTags', + 'variables': { + 'targetUri': target_uri, + 'targetType': target_type, + }, + 'query': """ + query listKeyValueTags($targetUri: String!, $targetType: String!) { + listKeyValueTags(targetUri: $targetUri, targetType: $targetType) { + tagUri + targetUri + targetType + key + value + cascade + } + } + """, + } + response = client.query(query=query) + return response.data.listKeyValueTags + + +def update_key_value_tags(client, input): + query = { + 'operationName': 'updateKeyValueTags', + 'variables': {'input': input}, + 'query': """ + mutation updateKeyValueTags($input: UpdateKeyValueTagsInput!) { + updateKeyValueTags(input: $input) { + tagUri + targetUri + targetType + key + value + cascade + } + } + """, + } + response = client.query(query=query) + return response.data.updateKeyValueTags diff --git a/tests_new/integration_tests/core/stack/test_stack.py b/tests_new/integration_tests/core/stack/test_stack.py new file mode 100644 index 000000000..c8828d394 --- /dev/null +++ b/tests_new/integration_tests/core/stack/test_stack.py @@ -0,0 +1,102 @@ +from assertpy import assert_that + +from integration_tests.core.stack.queries import get_stack_logs, list_key_value_tags, update_key_value_tags +from integration_tests.errors import GqlError + + +## test_update_stack and test_get_stack are not needed as they are +## tested in each module that uses stacks (e.g. integration_tests.core.environment.test_environment.test_persistent_env_update) + + +def test_get_env_stack_logs(client1, session_env1): + response = get_stack_logs(client1, target_uri=session_env1.environmentUri, target_type='environment') + assert_that(response).is_not_empty() + + +def test_get_env_stack_logs_unauthorized(client2, session_env1): + assert_that(get_stack_logs).raises(GqlError).when_called_with( + client=client2, + target_uri=session_env1.environmentUri, + target_type='environment', + ).contains( + 'UnauthorizedOperation', + 'GET_ENVIRONMENT', + session_env1.environmentUri, + ) + + +def test_update_key_value_tags_add_tags(client1, environment_tags_1, session_id): + assert_that(len(environment_tags_1)).is_equal_to(2) + assert_that(environment_tags_1[0]).contains_entry(key='key1', value=session_id, cascade=False) + assert_that(environment_tags_1[1]).contains_entry(key='key2', value=session_id, cascade=True) + + +def test_update_key_value_tags_unauthorized(client2, session_env1, session_id): + assert_that(update_key_value_tags).raises(GqlError).when_called_with( + client=client2, + input={ + 'targetUri': session_env1.environmentUri, + 'targetType': 'environment', + 'tags': [ + {'key': 'key1U', 'value': session_id, 'cascade': False}, + {'key': 'key2U', 'value': session_id, 'cascade': True}, + ], + }, + ).contains( + 'UnauthorizedOperation', + 'UPDATE_ENVIRONMENT', + session_env1.environmentUri, + ) + + +def test_update_list_key_value_tags_add_tag_invalid_input(client1, session_env1, session_id): + assert_that(update_key_value_tags).raises(GqlError).when_called_with( + client=client1, + input={ + 'targetUri': session_env1.environmentUri, + 'targetType': 'environment', + 'tags': [ + {'key': 'keyDuplicated', 'value': session_id, 'cascade': False}, + {'key': 'keyDuplicated', 'value': session_id, 'cascade': True}, + ], + }, + ).contains( + 'UnauthorizedOperation', + 'SAVE_KEY_VALUE_TAGS', + 'Duplicate tag keys found', + ) + + +def test_update_key_value_tags_delete_tags(client1, session_env1, session_id): + response = update_key_value_tags( + client1, + input={ + 'targetUri': session_env1.environmentUri, + 'targetType': 'environment', + 'tags': [ + {'key': 'key1delete', 'value': session_id, 'cascade': False}, + {'key': 'key2delete', 'value': session_id, 'cascade': True}, + ], + }, + ) + assert_that(len(response)).is_equal_to(2) + # Test delete tag + response = update_key_value_tags( + client1, + input={ + 'targetUri': session_env1.environmentUri, + 'targetType': 'environment', + 'tags': [], + }, + ) + assert_that(response).is_equal_to([]) + # Test list tags after delete + response = list_key_value_tags(client1, target_uri=session_env1.environmentUri, target_type='environment') + assert_that(response).is_equal_to([]) + + +def test_list_key_value_tags(client1, environment_tags_1, session_env1, session_id): + response = list_key_value_tags(client1, target_uri=session_env1.environmentUri, target_type='environment') + assert_that(len(response)).is_equal_to(2) + assert_that(response[0]).contains_entry(key='key1', value=session_id, cascade=False) + assert_that(response[1]).contains_entry(key='key2', value=session_id, cascade=True) diff --git a/tests_new/integration_tests/core/vpc/conftest.py b/tests_new/integration_tests/core/vpc/conftest.py new file mode 100644 index 000000000..fbb88c8cb --- /dev/null +++ b/tests_new/integration_tests/core/vpc/conftest.py @@ -0,0 +1,21 @@ +import pytest +from integration_tests.core.vpc.queries import create_network, delete_network + + +@pytest.fixture(scope='function') +def network1(client1, group1, session_env1, session_id): + network = None + try: + network = create_network( + client1, + name='testVpc1', + vpc_id='someId', + public_subnets=['testSubnet1'], + environment_uri=session_env1.environmentUri, + group=group1, + tags=[session_id], + ) + yield network + finally: + if network: + delete_network(client1, vpc_uri=network.vpcUri) diff --git a/tests_new/integration_tests/core/vpc/queries.py b/tests_new/integration_tests/core/vpc/queries.py new file mode 100644 index 000000000..4069c43d4 --- /dev/null +++ b/tests_new/integration_tests/core/vpc/queries.py @@ -0,0 +1,84 @@ +# TODO: This file will be replaced by using the SDK directly + +NETWORK_TYPE = """ +VpcId +vpcUri +environment { + environmentUri + label + AwsAccountId + region +} +label +owner +name +description +tags +AwsAccountId +region +privateSubnetIds +publicSubnetIds +SamlGroupName +default +""" + + +def create_network(client, name, environment_uri, group, vpc_id, public_subnets=[], private_subnets=[], tags=[]): + query = { + 'operationName': 'createNetwork', + 'variables': { + 'input': { + 'label': name, + 'environmentUri': environment_uri, + 'vpcId': vpc_id, + 'publicSubnetIds': public_subnets, + 'privateSubnetIds': private_subnets, + 'SamlGroupName': group, + 'description': 'Created for integration testing', + 'tags': tags, + } + }, + 'query': f"""mutation createNetwork($input: NewVpcInput!) {{ + createNetwork(input: $input) {{ + {NETWORK_TYPE} + }} + }} + """, + } + response = client.query(query=query) + return response.data.createNetwork + + +def delete_network(client, vpc_uri): + query = { + 'operationName': 'deleteNetwork', + 'variables': {'vpcUri': vpc_uri}, + 'query': """mutation deleteNetwork($vpcUri: String!) { + deleteNetwork(vpcUri: $vpcUri) + } + """, + } + response = client.query(query=query) + return response.data.deleteNetwork + + +def list_environment_networks(client, environment_uri, term=''): + query = { + 'operationName': 'listEnvironmentNetworks', + 'variables': {'environmentUri': environment_uri, 'filter': {'term': term}}, + 'query': f"""query listEnvironmentNetworks($environmentUri: String!, $filter: VpcFilter!) {{ + listEnvironmentNetworks(environmentUri: $environmentUri, filter: $filter) {{ + count + page + pages + hasNext + hasPrevious + nodes {{ + {NETWORK_TYPE} + }} + }} + }} + """, + } + response = client.query(query=query) + return response.data.listEnvironmentNetworks diff --git a/tests_new/integration_tests/core/vpc/test_vpc.py b/tests_new/integration_tests/core/vpc/test_vpc.py new file mode 100644 index 000000000..eb9a78d8e --- /dev/null +++ b/tests_new/integration_tests/core/vpc/test_vpc.py @@ -0,0 +1,68 @@ +from assertpy import assert_that + +from integration_tests.errors import GqlError +from integration_tests.core.vpc.queries import create_network, delete_network, list_environment_networks + + +def test_create_network(network1, session_id): + assert_that(network1).contains_entry(label='testVpc1', tags=[session_id], VpcId='someId') + assert_that(network1.vpcUri).is_not_none() + + +def test_create_network_unauthorized(client2, group2, session_env1, session_id): + assert_that(create_network).raises(GqlError).when_called_with( + client2, + name='testVpc2', + vpc_id='someId2', + public_subnets=['testSubnet2'], + environment_uri=session_env1.environmentUri, + group=group2, + tags=[session_id], + ).contains('UnauthorizedOperation', 'CREATE_NETWORK', session_env1.environmentUri) + + +def test_create_duplicated_network_invalid(client1, group1, session_env1, session_id, network1): + assert_that(create_network).raises(GqlError).when_called_with( + client1, + name='testVpcDuplicated2', + vpc_id='someId', + public_subnets=['testSubnet1'], + environment_uri=session_env1.environmentUri, + group=group1, + tags=[session_id], + ).contains('ResourceAlreadyExists', 'CREATE_NETWORK', 'someId') + + +def test_delete_network(client1, group1, session_env1, session_id): + response = create_network( + client1, + name='testVpcDelete', + vpc_id='someIdDelete', + public_subnets=['testSubnet1'], + environment_uri=session_env1.environmentUri, + group=group1, + tags=[session_id], + ) + assert_that(response.vpcUri).is_not_none() + response = delete_network(client1, vpc_uri=response.vpcUri) + assert_that(response).is_true() + + +def test_delete_network_unauthorized(client2, network1): + assert_that(delete_network).raises(GqlError).when_called_with( + client2, + vpc_uri=network1.vpcUri, + ).contains('UnauthorizedOperation', 'DELETE_NETWORK', network1.vpcUri) + + +def test_list_environment_networks(client1, network1, session_env1, session_id): + response = list_environment_networks(client1, environment_uri=session_env1.environmentUri, term=session_id) + assert_that(response.count).is_equal_to(1) + assert_that(response.nodes[0]).contains_entry(label='testVpc1', VpcId='someId', vpcUri=network1.vpcUri) + + +def test_list_environment_networks_unauthorized(client2, network1, session_env1): + assert_that(list_environment_networks).raises(GqlError).when_called_with( + client2, + environment_uri=session_env1.environmentUri, + ).contains('UnauthorizedOperation', 'LIST_ENVIRONMENT_NETWORKS', session_env1.environmentUri)