From d1c61c34f62d8ee51964f47877802d070dfa9e98 Mon Sep 17 00:00:00 2001 From: Katrina Rogan Date: Tue, 24 Sep 2019 11:16:15 -0700 Subject: [PATCH] Initial Commit --- .dockerignore | 1 + .gitignore | 8 + .golangci.yml | 27 + .travis.yml | 27 + CODE_OF_CONDUCT.md | 3 + Dockerfile | 33 + Gopkg.lock | 1191 +++++++++++ Gopkg.toml | 110 + LICENSE | 202 ++ Makefile | 37 + NOTICE | 4 + README.rst | 8 + boilerplate/lyft/docker_build/Makefile | 12 + boilerplate/lyft/docker_build/Readme.rst | 23 + boilerplate/lyft/docker_build/docker_build.sh | 67 + .../golang_dockerfile/Dockerfile.GoTemplate | 33 + boilerplate/lyft/golang_dockerfile/Readme.rst | 16 + boilerplate/lyft/golang_dockerfile/update.sh | 13 + boilerplate/lyft/golang_test_targets/Makefile | 38 + .../lyft/golang_test_targets/Readme.rst | 31 + .../lyft/golang_test_targets/goimports | 8 + boilerplate/update.cfg | 3 + boilerplate/update.sh | 53 + cmd/entrypoints/clusterresource.go | 111 + cmd/entrypoints/migrate.go | 133 ++ cmd/entrypoints/root.go | 83 + cmd/entrypoints/serve.go | 219 ++ cmd/main.go | 14 + flyteadmin_config.yaml | 141 ++ pkg/async/notifications/email.go | 54 + pkg/async/notifications/email_test.go | 79 + pkg/async/notifications/factory.go | 111 + .../implementations/aws_emailer.go | 86 + .../implementations/aws_emailer_test.go | 121 ++ .../implementations/noop_notifications.go | 54 + .../implementations/processor.go | 160 ++ .../implementations/processor_test.go | 140 ++ .../implementations/publisher.go | 52 + .../implementations/publisher_test.go | 76 + pkg/async/notifications/interfaces/emailer.go | 13 + .../notifications/interfaces/processor.go | 17 + .../notifications/interfaces/publisher.go | 27 + pkg/async/notifications/mocks/emailer.go | 24 + pkg/async/notifications/mocks/processor.go | 47 + pkg/async/notifications/mocks/publisher.go | 24 + .../schedule/aws/cloud_watch_scheduler.go | 263 +++ .../aws/cloud_watch_scheduler_test.go | 272 +++ .../interfaces/cloud_watch_event_client.go | 11 + .../mocks/mock_cloud_watch_event_client.go | 71 + pkg/async/schedule/aws/serialization.go | 86 + pkg/async/schedule/aws/serialization_test.go | 61 + pkg/async/schedule/aws/shared.go | 24 + pkg/async/schedule/aws/shared_test.go | 18 + pkg/async/schedule/aws/workflow_executor.go | 312 +++ .../schedule/aws/workflow_executor_test.go | 307 +++ pkg/async/schedule/factory.go | 98 + .../schedule/interfaces/event_scheduler.go | 25 + .../schedule/interfaces/workflow_executor.go | 7 + .../schedule/mocks/mock_event_scheduler.go | 42 + .../schedule/mocks/mock_workflow_executor.go | 28 + pkg/async/schedule/noop/event_scheduler.go | 30 + pkg/async/schedule/noop/workflow_executor.go | 15 + pkg/clusterresource/controller.go | 348 +++ pkg/clusterresource/controller_test.go | 108 + pkg/common/cloud.go | 10 + pkg/common/constants.go | 6 + pkg/common/entity.go | 13 + pkg/common/executions.go | 59 + pkg/common/executions_test.go | 17 + pkg/common/filters.go | 280 +++ pkg/common/filters_test.go | 131 ++ pkg/common/mocks/storage.go | 71 + pkg/common/sorting.go | 39 + pkg/common/sorting_test.go | 26 + pkg/config/config.go | 44 + pkg/config/config_flags.go | 22 + pkg/config/config_flags_test.go | 191 ++ pkg/data/factory.go | 54 + pkg/data/implementations/aws_remote_url.go | 104 + .../implementations/aws_remote_url_test.go | 85 + pkg/data/implementations/noop_remote_url.go | 34 + .../implementations/noop_remote_url_test.go | 42 + pkg/data/interfaces/remote.go | 12 + pkg/data/mocks/remote.go | 24 + pkg/errors/errors.go | 93 + pkg/errors/errors_test.go | 29 + pkg/flytek8s/client.go | 76 + pkg/manager/impl/execution_manager.go | 810 +++++++ pkg/manager/impl/execution_manager_test.go | 1887 +++++++++++++++++ pkg/manager/impl/executions/queues.go | 248 +++ pkg/manager/impl/executions/queues_test.go | 179 ++ pkg/manager/impl/launch_plan_manager.go | 560 +++++ pkg/manager/impl/launch_plan_manager_test.go | 1322 ++++++++++++ pkg/manager/impl/node_execution_manager.go | 377 ++++ .../impl/node_execution_manager_test.go | 805 +++++++ pkg/manager/impl/project_manager.go | 69 + pkg/manager/impl/project_manager_test.go | 125 ++ pkg/manager/impl/shared/constants.go | 33 + pkg/manager/impl/shared/errors.go | 21 + pkg/manager/impl/task_execution_manager.go | 293 +++ .../impl/task_execution_manager_test.go | 905 ++++++++ pkg/manager/impl/task_manager.go | 255 +++ pkg/manager/impl/task_manager_test.go | 400 ++++ pkg/manager/impl/testutils/config.go | 29 + pkg/manager/impl/testutils/constants.go | 8 + pkg/manager/impl/testutils/mock_closures.go | 50 + pkg/manager/impl/testutils/mock_requests.go | 290 +++ pkg/manager/impl/testutils/repository.go | 28 + pkg/manager/impl/util/digests.go | 51 + pkg/manager/impl/util/digests_test.go | 174 ++ pkg/manager/impl/util/filters.go | 267 +++ pkg/manager/impl/util/filters_test.go | 194 ++ pkg/manager/impl/util/shared.go | 215 ++ pkg/manager/impl/util/shared_test.go | 385 ++++ pkg/manager/impl/util/testdata/workflow.json | 722 +++++++ .../impl/validation/execution_validator.go | 149 ++ .../validation/execution_validator_test.go | 224 ++ .../impl/validation/launch_plan_validator.go | 156 ++ .../validation/launch_plan_validator_test.go | 370 ++++ .../validation/node_execution_validator.go | 41 + .../node_execution_validator_test.go | 131 ++ .../impl/validation/project_validator.go | 60 + .../impl/validation/project_validator_test.go | 116 + .../validation/task_execution_validator.go | 56 + .../task_execution_validator_test.go | 171 ++ pkg/manager/impl/validation/task_validator.go | 339 +++ .../impl/validation/task_validator_test.go | 557 +++++ pkg/manager/impl/validation/validation.go | 181 ++ .../impl/validation/validation_test.go | 290 +++ .../impl/validation/workflow_validator.go | 63 + .../validation/workflow_validator_test.go | 81 + pkg/manager/impl/workflow_manager.go | 353 +++ pkg/manager/impl/workflow_manager_test.go | 575 +++++ pkg/manager/interfaces/execution.go | 24 + pkg/manager/interfaces/launch_plan.go | 26 + pkg/manager/interfaces/node_execution.go | 18 + pkg/manager/interfaces/project.go | 13 + pkg/manager/interfaces/task.go | 16 + pkg/manager/interfaces/task_execution.go | 17 + pkg/manager/interfaces/workflow.go | 16 + pkg/manager/mocks/execution.go | 120 ++ pkg/manager/mocks/launch_plan.go | 115 + pkg/manager/mocks/node_execution.go | 86 + pkg/manager/mocks/project.go | 39 + pkg/manager/mocks/task.go | 50 + pkg/manager/mocks/task_execution.go | 75 + pkg/manager/mocks/workflow.go | 41 + pkg/repositories/config/database.go | 12 + pkg/repositories/config/migration_models.go | 82 + pkg/repositories/config/migrations.go | 112 + pkg/repositories/config/postgres.go | 81 + pkg/repositories/config/postgres_test.go | 21 + pkg/repositories/config/seed_data.go | 26 + pkg/repositories/database_test.go | 30 + pkg/repositories/errors/error_transformer.go | 10 + pkg/repositories/errors/errors.go | 26 + pkg/repositories/errors/postgres.go | 95 + pkg/repositories/errors/postgres_test.go | 43 + .../errors/test_error_transformer.go | 31 + pkg/repositories/factory.go | 48 + pkg/repositories/gormimpl/common.go | 116 + pkg/repositories/gormimpl/execution_repo.go | 150 ++ .../gormimpl/execution_repo_test.go | 409 ++++ pkg/repositories/gormimpl/launch_plan_repo.go | 196 ++ .../gormimpl/launch_plan_repo_test.go | 494 +++++ pkg/repositories/gormimpl/metrics.go | 33 + .../gormimpl/node_execution_repo.go | 179 ++ .../gormimpl/node_execution_repo_test.go | 423 ++++ pkg/repositories/gormimpl/project_repo.go | 72 + .../gormimpl/project_repo_test.go | 83 + .../gormimpl/task_execution_repo.go | 140 ++ .../gormimpl/task_execution_repo_test.go | 230 ++ pkg/repositories/gormimpl/task_repo.go | 135 ++ pkg/repositories/gormimpl/task_repo_test.go | 264 +++ pkg/repositories/gormimpl/test_utils.go | 30 + pkg/repositories/gormimpl/workflow_repo.go | 134 ++ .../gormimpl/workflow_repo_test.go | 270 +++ pkg/repositories/interfaces/common.go | 29 + pkg/repositories/interfaces/execution_repo.go | 29 + .../interfaces/launch_plan_repo.go | 37 + .../interfaces/node_execution_repo.go | 37 + pkg/repositories/interfaces/project_repo.go | 17 + .../interfaces/task_execution_repo.go | 29 + pkg/repositories/interfaces/task_repo.go | 25 + pkg/repositories/interfaces/workflow_repo.go | 23 + pkg/repositories/mocks/execution_repo.go | 96 + pkg/repositories/mocks/launch_plan_repo.go | 107 + pkg/repositories/mocks/node_execution_repo.go | 85 + pkg/repositories/mocks/project_repo.go | 44 + pkg/repositories/mocks/repository.go | 56 + pkg/repositories/mocks/task_execution_repo.go | 68 + pkg/repositories/mocks/task_repo.go | 79 + pkg/repositories/mocks/workflow_repo.go | 80 + pkg/repositories/models/base_model.go | 13 + pkg/repositories/models/execution.go | 46 + pkg/repositories/models/execution_event.go | 13 + pkg/repositories/models/launch_plan.go | 35 + pkg/repositories/models/node_execution.go | 36 + .../models/node_execution_event.go | 13 + pkg/repositories/models/project.go | 7 + pkg/repositories/models/task.go | 21 + pkg/repositories/models/task_execution.go | 39 + pkg/repositories/models/workflow.go | 21 + pkg/repositories/postgres_repo.go | 60 + pkg/repositories/transformers/execution.go | 194 ++ .../transformers/execution_event.go | 27 + .../transformers/execution_event_test.go | 45 + .../transformers/execution_test.go | 470 ++++ pkg/repositories/transformers/launch_plan.go | 133 ++ .../transformers/launch_plan_test.go | 267 +++ .../transformers/node_execution.go | 200 ++ .../transformers/node_execution_event.go | 30 + .../transformers/node_execution_event_test.go | 49 + .../transformers/node_execution_test.go | 257 +++ pkg/repositories/transformers/project.go | 36 + pkg/repositories/transformers/project_test.go | 74 + pkg/repositories/transformers/task.go | 81 + .../transformers/task_execution.go | 211 ++ .../transformers/task_execution_test.go | 503 +++++ pkg/repositories/transformers/task_test.go | 148 ++ pkg/repositories/transformers/workflow.go | 82 + .../transformers/workflow_test.go | 139 ++ pkg/rpc/adminservice/base.go | 163 ++ pkg/rpc/adminservice/execution.go | 139 ++ pkg/rpc/adminservice/launch_plan.go | 154 ++ pkg/rpc/adminservice/metrics.go | 152 ++ pkg/rpc/adminservice/node_execution.go | 110 + pkg/rpc/adminservice/project.go | 46 + pkg/rpc/adminservice/task.go | 93 + pkg/rpc/adminservice/task_execution.go | 113 + pkg/rpc/adminservice/tests/execution_test.go | 325 +++ .../adminservice/tests/launch_plan_test.go | 162 ++ .../adminservice/tests/node_execution_test.go | 252 +++ pkg/rpc/adminservice/tests/project_test.go | 55 + .../adminservice/tests/task_execution_test.go | 349 +++ pkg/rpc/adminservice/tests/task_test.go | 94 + pkg/rpc/adminservice/tests/util.go | 31 + pkg/rpc/adminservice/tests/workflow_test.go | 64 + pkg/rpc/adminservice/util/metrics.go | 67 + pkg/rpc/adminservice/util/transformers.go | 20 + .../adminservice/util/transformers_test.go | 40 + pkg/rpc/adminservice/workflow.go | 94 + pkg/runtime/application_config_provider.go | 76 + pkg/runtime/cluster_config_provider.go | 45 + pkg/runtime/cluster_resource_provider.go | 48 + pkg/runtime/cluster_resource_provider_test.go | 40 + pkg/runtime/config_provider_test.go | 42 + pkg/runtime/configuration_provider.go | 56 + pkg/runtime/execution_queue_provider.go | 38 + .../interfaces/application_configuration.go | 104 + .../interfaces/cluster_configuration.go | 51 + .../cluster_resource_configuration.go | 32 + pkg/runtime/interfaces/configuration.go | 12 + pkg/runtime/interfaces/queue_configuration.go | 35 + .../registration_validation_provider.go | 16 + .../interfaces/task_resource_configuration.go | 14 + pkg/runtime/interfaces/whitelist.go | 14 + .../mocks/mock_application_provider.go | 62 + .../mocks/mock_cluster_resource_provider.go | 28 + .../mocks/mock_configuration_provider.go | 64 + .../mocks/mock_execution_queue_provider.go | 25 + .../mock_registration_validation_provider.go | 30 + .../mocks/mock_task_resource_provider.go | 22 + pkg/runtime/mocks/mock_whitelist_provider.go | 15 + .../registration_validation_provider.go | 53 + pkg/runtime/task_resource_provider.go | 41 + .../testdata/cluster_resource_config.yaml | 11 + pkg/runtime/testdata/clusters_config.yaml | 15 + pkg/runtime/testdata/config.yaml | 20 + pkg/runtime/whitelist_provider.go | 29 + pkg/workflowengine/impl/compiler.go | 46 + pkg/workflowengine/impl/interface_provider.go | 49 + .../impl/interface_provider_test.go | 76 + pkg/workflowengine/impl/propeller_executor.go | 193 ++ .../impl/propeller_executor_test.go | 397 ++++ pkg/workflowengine/interfaces/compiler.go | 16 + pkg/workflowengine/interfaces/executor.go | 32 + pkg/workflowengine/mocks/mock_compiler.go | 62 + pkg/workflowengine/mocks/mock_executor.go | 44 + sampleresourcetemplates/docker.yaml | 8 + sampleresourcetemplates/imagepullsecrets.yaml | 7 + sampleresourcetemplates/namespace.yaml | 7 + script/integration/k8s/integration.yaml | 433 ++++ script/integration/k8s/main.sh | 46 + script/integration/launch.sh | 40 + tests/bootstrap.go | 66 + tests/execution_test.go | 280 +++ tests/helpers.go | 28 + tests/launch_plan_test.go | 898 ++++++++ tests/node_execution_test.go | 283 +++ tests/project.go | 45 + tests/shared.go | 6 + tests/task_execution_test.go | 382 ++++ tests/task_test.go | 540 +++++ tests/workflow_test.go | 344 +++ 295 files changed, 38548 insertions(+) create mode 100644 .dockerignore create mode 100644 .gitignore create mode 100644 .golangci.yml create mode 100644 .travis.yml create mode 100644 CODE_OF_CONDUCT.md create mode 100644 Dockerfile create mode 100644 Gopkg.lock create mode 100644 Gopkg.toml create mode 100644 LICENSE create mode 100644 Makefile create mode 100644 NOTICE create mode 100644 README.rst create mode 100644 boilerplate/lyft/docker_build/Makefile create mode 100644 boilerplate/lyft/docker_build/Readme.rst create mode 100755 boilerplate/lyft/docker_build/docker_build.sh create mode 100644 boilerplate/lyft/golang_dockerfile/Dockerfile.GoTemplate create mode 100644 boilerplate/lyft/golang_dockerfile/Readme.rst create mode 100755 boilerplate/lyft/golang_dockerfile/update.sh create mode 100644 boilerplate/lyft/golang_test_targets/Makefile create mode 100644 boilerplate/lyft/golang_test_targets/Readme.rst create mode 100755 boilerplate/lyft/golang_test_targets/goimports create mode 100644 boilerplate/update.cfg create mode 100755 boilerplate/update.sh create mode 100644 cmd/entrypoints/clusterresource.go create mode 100644 cmd/entrypoints/migrate.go create mode 100644 cmd/entrypoints/root.go create mode 100644 cmd/entrypoints/serve.go create mode 100644 cmd/main.go create mode 100644 flyteadmin_config.yaml create mode 100644 pkg/async/notifications/email.go create mode 100644 pkg/async/notifications/email_test.go create mode 100644 pkg/async/notifications/factory.go create mode 100644 pkg/async/notifications/implementations/aws_emailer.go create mode 100644 pkg/async/notifications/implementations/aws_emailer_test.go create mode 100644 pkg/async/notifications/implementations/noop_notifications.go create mode 100644 pkg/async/notifications/implementations/processor.go create mode 100644 pkg/async/notifications/implementations/processor_test.go create mode 100644 pkg/async/notifications/implementations/publisher.go create mode 100644 pkg/async/notifications/implementations/publisher_test.go create mode 100644 pkg/async/notifications/interfaces/emailer.go create mode 100644 pkg/async/notifications/interfaces/processor.go create mode 100644 pkg/async/notifications/interfaces/publisher.go create mode 100644 pkg/async/notifications/mocks/emailer.go create mode 100644 pkg/async/notifications/mocks/processor.go create mode 100644 pkg/async/notifications/mocks/publisher.go create mode 100644 pkg/async/schedule/aws/cloud_watch_scheduler.go create mode 100644 pkg/async/schedule/aws/cloud_watch_scheduler_test.go create mode 100644 pkg/async/schedule/aws/interfaces/cloud_watch_event_client.go create mode 100644 pkg/async/schedule/aws/mocks/mock_cloud_watch_event_client.go create mode 100644 pkg/async/schedule/aws/serialization.go create mode 100644 pkg/async/schedule/aws/serialization_test.go create mode 100644 pkg/async/schedule/aws/shared.go create mode 100644 pkg/async/schedule/aws/shared_test.go create mode 100644 pkg/async/schedule/aws/workflow_executor.go create mode 100644 pkg/async/schedule/aws/workflow_executor_test.go create mode 100644 pkg/async/schedule/factory.go create mode 100644 pkg/async/schedule/interfaces/event_scheduler.go create mode 100644 pkg/async/schedule/interfaces/workflow_executor.go create mode 100644 pkg/async/schedule/mocks/mock_event_scheduler.go create mode 100644 pkg/async/schedule/mocks/mock_workflow_executor.go create mode 100644 pkg/async/schedule/noop/event_scheduler.go create mode 100644 pkg/async/schedule/noop/workflow_executor.go create mode 100644 pkg/clusterresource/controller.go create mode 100644 pkg/clusterresource/controller_test.go create mode 100644 pkg/common/cloud.go create mode 100644 pkg/common/constants.go create mode 100644 pkg/common/entity.go create mode 100644 pkg/common/executions.go create mode 100644 pkg/common/executions_test.go create mode 100644 pkg/common/filters.go create mode 100644 pkg/common/filters_test.go create mode 100644 pkg/common/mocks/storage.go create mode 100644 pkg/common/sorting.go create mode 100644 pkg/common/sorting_test.go create mode 100644 pkg/config/config.go create mode 100755 pkg/config/config_flags.go create mode 100755 pkg/config/config_flags_test.go create mode 100644 pkg/data/factory.go create mode 100644 pkg/data/implementations/aws_remote_url.go create mode 100644 pkg/data/implementations/aws_remote_url_test.go create mode 100644 pkg/data/implementations/noop_remote_url.go create mode 100644 pkg/data/implementations/noop_remote_url_test.go create mode 100644 pkg/data/interfaces/remote.go create mode 100644 pkg/data/mocks/remote.go create mode 100644 pkg/errors/errors.go create mode 100644 pkg/errors/errors_test.go create mode 100644 pkg/flytek8s/client.go create mode 100644 pkg/manager/impl/execution_manager.go create mode 100644 pkg/manager/impl/execution_manager_test.go create mode 100644 pkg/manager/impl/executions/queues.go create mode 100644 pkg/manager/impl/executions/queues_test.go create mode 100644 pkg/manager/impl/launch_plan_manager.go create mode 100644 pkg/manager/impl/launch_plan_manager_test.go create mode 100644 pkg/manager/impl/node_execution_manager.go create mode 100644 pkg/manager/impl/node_execution_manager_test.go create mode 100644 pkg/manager/impl/project_manager.go create mode 100644 pkg/manager/impl/project_manager_test.go create mode 100644 pkg/manager/impl/shared/constants.go create mode 100644 pkg/manager/impl/shared/errors.go create mode 100644 pkg/manager/impl/task_execution_manager.go create mode 100644 pkg/manager/impl/task_execution_manager_test.go create mode 100644 pkg/manager/impl/task_manager.go create mode 100644 pkg/manager/impl/task_manager_test.go create mode 100644 pkg/manager/impl/testutils/config.go create mode 100644 pkg/manager/impl/testutils/constants.go create mode 100644 pkg/manager/impl/testutils/mock_closures.go create mode 100644 pkg/manager/impl/testutils/mock_requests.go create mode 100644 pkg/manager/impl/testutils/repository.go create mode 100644 pkg/manager/impl/util/digests.go create mode 100644 pkg/manager/impl/util/digests_test.go create mode 100644 pkg/manager/impl/util/filters.go create mode 100644 pkg/manager/impl/util/filters_test.go create mode 100644 pkg/manager/impl/util/shared.go create mode 100644 pkg/manager/impl/util/shared_test.go create mode 100644 pkg/manager/impl/util/testdata/workflow.json create mode 100644 pkg/manager/impl/validation/execution_validator.go create mode 100644 pkg/manager/impl/validation/execution_validator_test.go create mode 100644 pkg/manager/impl/validation/launch_plan_validator.go create mode 100644 pkg/manager/impl/validation/launch_plan_validator_test.go create mode 100644 pkg/manager/impl/validation/node_execution_validator.go create mode 100644 pkg/manager/impl/validation/node_execution_validator_test.go create mode 100644 pkg/manager/impl/validation/project_validator.go create mode 100644 pkg/manager/impl/validation/project_validator_test.go create mode 100644 pkg/manager/impl/validation/task_execution_validator.go create mode 100644 pkg/manager/impl/validation/task_execution_validator_test.go create mode 100644 pkg/manager/impl/validation/task_validator.go create mode 100644 pkg/manager/impl/validation/task_validator_test.go create mode 100644 pkg/manager/impl/validation/validation.go create mode 100644 pkg/manager/impl/validation/validation_test.go create mode 100644 pkg/manager/impl/validation/workflow_validator.go create mode 100644 pkg/manager/impl/validation/workflow_validator_test.go create mode 100644 pkg/manager/impl/workflow_manager.go create mode 100644 pkg/manager/impl/workflow_manager_test.go create mode 100644 pkg/manager/interfaces/execution.go create mode 100644 pkg/manager/interfaces/launch_plan.go create mode 100644 pkg/manager/interfaces/node_execution.go create mode 100644 pkg/manager/interfaces/project.go create mode 100644 pkg/manager/interfaces/task.go create mode 100644 pkg/manager/interfaces/task_execution.go create mode 100644 pkg/manager/interfaces/workflow.go create mode 100644 pkg/manager/mocks/execution.go create mode 100644 pkg/manager/mocks/launch_plan.go create mode 100644 pkg/manager/mocks/node_execution.go create mode 100644 pkg/manager/mocks/project.go create mode 100644 pkg/manager/mocks/task.go create mode 100644 pkg/manager/mocks/task_execution.go create mode 100644 pkg/manager/mocks/workflow.go create mode 100644 pkg/repositories/config/database.go create mode 100644 pkg/repositories/config/migration_models.go create mode 100644 pkg/repositories/config/migrations.go create mode 100644 pkg/repositories/config/postgres.go create mode 100644 pkg/repositories/config/postgres_test.go create mode 100644 pkg/repositories/config/seed_data.go create mode 100644 pkg/repositories/database_test.go create mode 100644 pkg/repositories/errors/error_transformer.go create mode 100644 pkg/repositories/errors/errors.go create mode 100644 pkg/repositories/errors/postgres.go create mode 100644 pkg/repositories/errors/postgres_test.go create mode 100644 pkg/repositories/errors/test_error_transformer.go create mode 100644 pkg/repositories/factory.go create mode 100644 pkg/repositories/gormimpl/common.go create mode 100644 pkg/repositories/gormimpl/execution_repo.go create mode 100644 pkg/repositories/gormimpl/execution_repo_test.go create mode 100644 pkg/repositories/gormimpl/launch_plan_repo.go create mode 100644 pkg/repositories/gormimpl/launch_plan_repo_test.go create mode 100644 pkg/repositories/gormimpl/metrics.go create mode 100644 pkg/repositories/gormimpl/node_execution_repo.go create mode 100644 pkg/repositories/gormimpl/node_execution_repo_test.go create mode 100644 pkg/repositories/gormimpl/project_repo.go create mode 100644 pkg/repositories/gormimpl/project_repo_test.go create mode 100644 pkg/repositories/gormimpl/task_execution_repo.go create mode 100644 pkg/repositories/gormimpl/task_execution_repo_test.go create mode 100644 pkg/repositories/gormimpl/task_repo.go create mode 100644 pkg/repositories/gormimpl/task_repo_test.go create mode 100644 pkg/repositories/gormimpl/test_utils.go create mode 100644 pkg/repositories/gormimpl/workflow_repo.go create mode 100644 pkg/repositories/gormimpl/workflow_repo_test.go create mode 100644 pkg/repositories/interfaces/common.go create mode 100644 pkg/repositories/interfaces/execution_repo.go create mode 100644 pkg/repositories/interfaces/launch_plan_repo.go create mode 100644 pkg/repositories/interfaces/node_execution_repo.go create mode 100644 pkg/repositories/interfaces/project_repo.go create mode 100644 pkg/repositories/interfaces/task_execution_repo.go create mode 100644 pkg/repositories/interfaces/task_repo.go create mode 100644 pkg/repositories/interfaces/workflow_repo.go create mode 100644 pkg/repositories/mocks/execution_repo.go create mode 100644 pkg/repositories/mocks/launch_plan_repo.go create mode 100644 pkg/repositories/mocks/node_execution_repo.go create mode 100644 pkg/repositories/mocks/project_repo.go create mode 100644 pkg/repositories/mocks/repository.go create mode 100644 pkg/repositories/mocks/task_execution_repo.go create mode 100644 pkg/repositories/mocks/task_repo.go create mode 100644 pkg/repositories/mocks/workflow_repo.go create mode 100644 pkg/repositories/models/base_model.go create mode 100644 pkg/repositories/models/execution.go create mode 100644 pkg/repositories/models/execution_event.go create mode 100644 pkg/repositories/models/launch_plan.go create mode 100644 pkg/repositories/models/node_execution.go create mode 100644 pkg/repositories/models/node_execution_event.go create mode 100644 pkg/repositories/models/project.go create mode 100644 pkg/repositories/models/task.go create mode 100644 pkg/repositories/models/task_execution.go create mode 100644 pkg/repositories/models/workflow.go create mode 100644 pkg/repositories/postgres_repo.go create mode 100644 pkg/repositories/transformers/execution.go create mode 100644 pkg/repositories/transformers/execution_event.go create mode 100644 pkg/repositories/transformers/execution_event_test.go create mode 100644 pkg/repositories/transformers/execution_test.go create mode 100644 pkg/repositories/transformers/launch_plan.go create mode 100644 pkg/repositories/transformers/launch_plan_test.go create mode 100644 pkg/repositories/transformers/node_execution.go create mode 100644 pkg/repositories/transformers/node_execution_event.go create mode 100644 pkg/repositories/transformers/node_execution_event_test.go create mode 100644 pkg/repositories/transformers/node_execution_test.go create mode 100644 pkg/repositories/transformers/project.go create mode 100644 pkg/repositories/transformers/project_test.go create mode 100644 pkg/repositories/transformers/task.go create mode 100644 pkg/repositories/transformers/task_execution.go create mode 100644 pkg/repositories/transformers/task_execution_test.go create mode 100644 pkg/repositories/transformers/task_test.go create mode 100644 pkg/repositories/transformers/workflow.go create mode 100644 pkg/repositories/transformers/workflow_test.go create mode 100644 pkg/rpc/adminservice/base.go create mode 100644 pkg/rpc/adminservice/execution.go create mode 100644 pkg/rpc/adminservice/launch_plan.go create mode 100644 pkg/rpc/adminservice/metrics.go create mode 100644 pkg/rpc/adminservice/node_execution.go create mode 100644 pkg/rpc/adminservice/project.go create mode 100644 pkg/rpc/adminservice/task.go create mode 100644 pkg/rpc/adminservice/task_execution.go create mode 100644 pkg/rpc/adminservice/tests/execution_test.go create mode 100644 pkg/rpc/adminservice/tests/launch_plan_test.go create mode 100644 pkg/rpc/adminservice/tests/node_execution_test.go create mode 100644 pkg/rpc/adminservice/tests/project_test.go create mode 100644 pkg/rpc/adminservice/tests/task_execution_test.go create mode 100644 pkg/rpc/adminservice/tests/task_test.go create mode 100644 pkg/rpc/adminservice/tests/util.go create mode 100644 pkg/rpc/adminservice/tests/workflow_test.go create mode 100644 pkg/rpc/adminservice/util/metrics.go create mode 100644 pkg/rpc/adminservice/util/transformers.go create mode 100644 pkg/rpc/adminservice/util/transformers_test.go create mode 100644 pkg/rpc/adminservice/workflow.go create mode 100644 pkg/runtime/application_config_provider.go create mode 100644 pkg/runtime/cluster_config_provider.go create mode 100644 pkg/runtime/cluster_resource_provider.go create mode 100644 pkg/runtime/cluster_resource_provider_test.go create mode 100644 pkg/runtime/config_provider_test.go create mode 100644 pkg/runtime/configuration_provider.go create mode 100644 pkg/runtime/execution_queue_provider.go create mode 100644 pkg/runtime/interfaces/application_configuration.go create mode 100644 pkg/runtime/interfaces/cluster_configuration.go create mode 100644 pkg/runtime/interfaces/cluster_resource_configuration.go create mode 100644 pkg/runtime/interfaces/configuration.go create mode 100644 pkg/runtime/interfaces/queue_configuration.go create mode 100644 pkg/runtime/interfaces/registration_validation_provider.go create mode 100644 pkg/runtime/interfaces/task_resource_configuration.go create mode 100644 pkg/runtime/interfaces/whitelist.go create mode 100644 pkg/runtime/mocks/mock_application_provider.go create mode 100644 pkg/runtime/mocks/mock_cluster_resource_provider.go create mode 100644 pkg/runtime/mocks/mock_configuration_provider.go create mode 100644 pkg/runtime/mocks/mock_execution_queue_provider.go create mode 100644 pkg/runtime/mocks/mock_registration_validation_provider.go create mode 100644 pkg/runtime/mocks/mock_task_resource_provider.go create mode 100644 pkg/runtime/mocks/mock_whitelist_provider.go create mode 100644 pkg/runtime/registration_validation_provider.go create mode 100644 pkg/runtime/task_resource_provider.go create mode 100644 pkg/runtime/testdata/cluster_resource_config.yaml create mode 100644 pkg/runtime/testdata/clusters_config.yaml create mode 100644 pkg/runtime/testdata/config.yaml create mode 100644 pkg/runtime/whitelist_provider.go create mode 100644 pkg/workflowengine/impl/compiler.go create mode 100644 pkg/workflowengine/impl/interface_provider.go create mode 100644 pkg/workflowengine/impl/interface_provider_test.go create mode 100644 pkg/workflowengine/impl/propeller_executor.go create mode 100644 pkg/workflowengine/impl/propeller_executor_test.go create mode 100644 pkg/workflowengine/interfaces/compiler.go create mode 100644 pkg/workflowengine/interfaces/executor.go create mode 100644 pkg/workflowengine/mocks/mock_compiler.go create mode 100644 pkg/workflowengine/mocks/mock_executor.go create mode 100644 sampleresourcetemplates/docker.yaml create mode 100644 sampleresourcetemplates/imagepullsecrets.yaml create mode 100644 sampleresourcetemplates/namespace.yaml create mode 100644 script/integration/k8s/integration.yaml create mode 100755 script/integration/k8s/main.sh create mode 100755 script/integration/launch.sh create mode 100644 tests/bootstrap.go create mode 100644 tests/execution_test.go create mode 100644 tests/helpers.go create mode 100644 tests/launch_plan_test.go create mode 100644 tests/node_execution_test.go create mode 100644 tests/project.go create mode 100644 tests/shared.go create mode 100644 tests/task_execution_test.go create mode 100644 tests/task_test.go create mode 100644 tests/workflow_test.go diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 000000000..140fada73 --- /dev/null +++ b/.dockerignore @@ -0,0 +1 @@ +vendor/* diff --git a/.gitignore b/.gitignore new file mode 100644 index 000000000..5ed4a2ac2 --- /dev/null +++ b/.gitignore @@ -0,0 +1,8 @@ + +.idea/ +.DS_Store +.vscode/ +.vendor-new/ + +vendor/ +node_modules/ diff --git a/.golangci.yml b/.golangci.yml new file mode 100644 index 000000000..3df02b549 --- /dev/null +++ b/.golangci.yml @@ -0,0 +1,27 @@ +run: + skip-files: + # because we're skipping TLS verification - for now + - cmd/entrypoints/serve.go + - pkg/async/messages/sqs.go + +linters: + disable-all: true + enable: + - deadcode + - errcheck + - gas + - goconst + - goimports + - golint + - gosimple + - govet + - ineffassign + - misspell + - nakedret + - staticcheck + - structcheck + - typecheck + - unconvert + - unparam + - unused + - varcheck diff --git a/.travis.yml b/.travis.yml new file mode 100644 index 000000000..8764b329a --- /dev/null +++ b/.travis.yml @@ -0,0 +1,27 @@ +sudo: required +language: go +go: + - "1.10" +services: + - docker +jobs: + include: + # dont push to dockerhub on forks + - if: fork = true + stage: test + name: build, integration test + install: true + script: BUILD_PHASE=builder make docker_build && make k8s_integration + - if: fork = false + stage: test + name: build, integration test, and push + install: true + script: BUILD_PHASE=builder make docker_build && make k8s_integration && make dockerhub_push + - stage: test + name: unit tests + install: make install + script: make test_unit + - stage: test + install: make install + name: lint + script: make lint diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md new file mode 100644 index 000000000..803d8a77f --- /dev/null +++ b/CODE_OF_CONDUCT.md @@ -0,0 +1,3 @@ +This project is governed by [Lyft's code of +conduct](https://github.com/lyft/code-of-conduct). All contributors +and participants agree to abide by its terms. diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 000000000..11fde287e --- /dev/null +++ b/Dockerfile @@ -0,0 +1,33 @@ +# WARNING: THIS FILE IS MANAGED IN THE 'BOILERPLATE' REPO AND COPIED TO OTHER REPOSITORIES. +# ONLY EDIT THIS FILE FROM WITHIN THE 'LYFT/BOILERPLATE' REPOSITORY: +# +# TO OPT OUT OF UPDATES, SEE https://github.com/lyft/boilerplate/blob/master/Readme.rst + +# Using go1.10.4 +FROM golang:1.10.4-alpine3.8 as builder +RUN apk add git openssh-client make curl dep + +# COPY only the dep files for efficient caching +COPY Gopkg.* /go/src/github.com/lyft/flyteadmin/ +WORKDIR /go/src/github.com/lyft/flyteadmin + +# Pull dependencies +RUN dep ensure -vendor-only + +# COPY the rest of the source code +COPY . /go/src/github.com/lyft/flyteadmin/ + +# This 'linux_compile' target should compile binaries to the /artifacts directory +# The main entrypoint should be compiled to /artifacts/flyteadmin +RUN make linux_compile + +# update the PATH to include the /artifacts directory +ENV PATH="/artifacts:${PATH}" + +# This will eventually move to centurylink/ca-certs:latest for minimum possible image size +FROM alpine:3.8 +COPY --from=builder /artifacts /bin + +RUN apk --update add ca-certificates + +CMD ["flyteadmin"] diff --git a/Gopkg.lock b/Gopkg.lock new file mode 100644 index 000000000..864d54160 --- /dev/null +++ b/Gopkg.lock @@ -0,0 +1,1191 @@ +# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. + + +[[projects]] + digest = "1:80004fcc5cf64e591486b3e11b406f1e0d17bf85d475d64203c8494f5da4fcd1" + name = "cloud.google.com/go" + packages = ["compute/metadata"] + pruneopts = "UT" + revision = "ceeb313ad77b789a7fa5287b36a1d127b69b7093" + version = "v0.44.3" + +[[projects]] + digest = "1:94d4ae958b3d2ab476bef4bed53c1dcc3cb0fb2639bd45dd08b40e57139192e5" + name = "github.com/Azure/azure-sdk-for-go" + packages = ["storage"] + pruneopts = "UT" + revision = "2d49bb8f2cee530cc16f1f1a9f0aae763dee257d" + version = "v10.2.1-beta" + +[[projects]] + digest = "1:0aa68ac7d88c06b85442e07b9e4d56cb5e332df2360fa2a5441b2edc5f1ae32b" + name = "github.com/Azure/go-autorest" + packages = [ + "autorest", + "autorest/adal", + "autorest/azure", + "autorest/date", + "logger", + "tracing", + ] + pruneopts = "UT" + revision = "5e7a399d8bbf4953ab0c8e3167d7fd535fd74ce1" + version = "v13.0.0" + +[[projects]] + digest = "1:4d8aa8bc01f60d0fd7f764e1838f26dbc5a5dec428217f936726007cdf3929f0" + name = "github.com/NYTimes/gizmo" + packages = [ + "config/aws", + "pubsub", + "pubsub/aws", + "pubsub/pubsubtest", + ] + pruneopts = "UT" + revision = "27bac814561a097fe9af4585fcefe223315973b2" + version = "v0.4.3" + +[[projects]] + digest = "1:7e704bce17074e862cfe9e4c2849320c2628fc3501b7d0795c589a427ef2bf50" + name = "github.com/Selvatico/go-mocket" + packages = ["."] + pruneopts = "UT" + revision = "c368d4162be502eea110ae12fb85e98567b0f1e6" + version = "v1.0.7" + +[[projects]] + digest = "1:313b743d54588010f7c6f5e00bbfe00ad0a2d63a075cb7d71ea85eaf8f91efa7" + name = "github.com/aws/aws-sdk-go" + packages = [ + "aws", + "aws/awserr", + "aws/awsutil", + "aws/client", + "aws/client/metadata", + "aws/corehandlers", + "aws/credentials", + "aws/credentials/ec2rolecreds", + "aws/credentials/endpointcreds", + "aws/credentials/processcreds", + "aws/credentials/stscreds", + "aws/csm", + "aws/defaults", + "aws/ec2metadata", + "aws/endpoints", + "aws/request", + "aws/session", + "aws/signer/v4", + "internal/ini", + "internal/s3err", + "internal/sdkio", + "internal/sdkmath", + "internal/sdkrand", + "internal/sdkuri", + "internal/shareddefaults", + "private/protocol", + "private/protocol/eventstream", + "private/protocol/eventstream/eventstreamapi", + "private/protocol/json/jsonutil", + "private/protocol/jsonrpc", + "private/protocol/query", + "private/protocol/query/queryutil", + "private/protocol/rest", + "private/protocol/restxml", + "private/protocol/xml/xmlutil", + "service/cloudwatchevents", + "service/elasticache", + "service/s3", + "service/s3/s3iface", + "service/s3/s3manager", + "service/ses", + "service/ses/sesiface", + "service/sns", + "service/sns/snsiface", + "service/sqs", + "service/sqs/sqsiface", + "service/sts", + "service/sts/stsiface", + ] + pruneopts = "UT" + revision = "d57c8d96f72d9475194ccf18d2ba70ac294b0cb3" + version = "v1.23.13" + +[[projects]] + branch = "master" + digest = "1:0ad5484a25fbd88409bae8b8b19134135fe73d3cb00e45d3255280b2ab975fcc" + name = "github.com/benbjohnson/clock" + packages = ["."] + pruneopts = "UT" + revision = "7dc76406b6d3c05b5f71a86293cbcf3c4ea03b19" + +[[projects]] + branch = "master" + digest = "1:a6609679ca468a89b711934f16b346e99f6ec344eadd2f7b00b1156785dd1236" + name = "github.com/benlaurie/objecthash" + packages = ["go/objecthash"] + pruneopts = "UT" + revision = "d1e3d6079fc16f8f542183fb5b2fdc11d9f00866" + +[[projects]] + digest = "1:d6afaeed1502aa28e80a4ed0981d570ad91b2579193404256ce672ed0a609e0d" + name = "github.com/beorn7/perks" + packages = ["quantile"] + pruneopts = "UT" + revision = "37c8de3658fcb183f997c4e13e8337516ab753e6" + version = "v1.0.1" + +[[projects]] + branch = "master" + digest = "1:f98385a9b77f6cacae716a59c04e6ac374d101466d4369c4e8cc706a39c4bb2e" + name = "github.com/bradfitz/gomemcache" + packages = ["memcache"] + pruneopts = "UT" + revision = "551aad21a6682b95329c1f5bd62ee5060d64f7e8" + +[[projects]] + digest = "1:998cf998358a303ac2430c386ba3fd3398477d6013153d3c6e11432765cc9ae6" + name = "github.com/cespare/xxhash" + packages = ["."] + pruneopts = "UT" + revision = "3b82fb7d186719faeedd0c2864f868c74fbf79a1" + version = "v2.0.0" + +[[projects]] + digest = "1:00eb5d8bd96289512920ac43367d5bee76bbca2062da34862a98b26b92741896" + name = "github.com/coocood/freecache" + packages = ["."] + pruneopts = "UT" + revision = "3c79a0a23c1940ab4479332fb3e0127265650ce3" + version = "v1.1.0" + +[[projects]] + digest = "1:ffe9824d294da03b391f44e1ae8281281b4afc1bdaa9588c9097785e3af10cec" + name = "github.com/davecgh/go-spew" + packages = ["spew"] + pruneopts = "UT" + revision = "8991bc29aa16c548c550c7ff78260e27b9ab7c73" + version = "v1.1.1" + +[[projects]] + digest = "1:76dc72490af7174349349838f2fe118996381b31ea83243812a97e5a0fd5ed55" + name = "github.com/dgrijalva/jwt-go" + packages = ["."] + pruneopts = "UT" + revision = "06ea1031745cb8b3dab3f6a236daf2b0aa468b7e" + version = "v3.2.0" + +[[projects]] + digest = "1:865079840386857c809b72ce300be7580cb50d3d3129ce11bf9aa6ca2bc1934a" + name = "github.com/fatih/color" + packages = ["."] + pruneopts = "UT" + revision = "5b77d2a35fb0ede96d138fc9a99f5c9b6aef11b4" + version = "v1.7.0" + +[[projects]] + branch = "master" + digest = "1:78a5b63751bd99054bee07a498f6aa54da0a909922f9365d1aa3339091efa70a" + name = "github.com/fsnotify/fsnotify" + packages = ["."] + pruneopts = "UT" + revision = "1485a34d5d5723fea214f5710708e19a831720e4" + +[[projects]] + digest = "1:4d02824a56d268f74a6b6fdd944b20b58a77c3d70e81008b3ee0c4f1a6777340" + name = "github.com/gogo/protobuf" + packages = [ + "proto", + "sortkeys", + ] + pruneopts = "UT" + revision = "ba06b47c162d49f2af050fb4c75bcbc86a159d5c" + version = "v1.2.1" + +[[projects]] + branch = "master" + digest = "1:1ba1d79f2810270045c328ae5d674321db34e3aae468eb4233883b473c5c0467" + name = "github.com/golang/glog" + packages = ["."] + pruneopts = "UT" + revision = "23def4e6c14b4da8ac2ed8007337bc5eb5007998" + +[[projects]] + digest = "1:b532ee3f683c057e797694b5bfeb3827d89e6adf41c53dbc80e549bca76364ea" + name = "github.com/golang/protobuf" + packages = [ + "jsonpb", + "proto", + "protoc-gen-go/descriptor", + "protoc-gen-go/generator", + "protoc-gen-go/generator/internal/remap", + "protoc-gen-go/plugin", + "ptypes", + "ptypes/any", + "ptypes/duration", + "ptypes/struct", + "ptypes/timestamp", + "ptypes/wrappers", + ] + pruneopts = "UT" + revision = "6c65a5562fc06764971b7c5d05c76c75e84bdbf7" + version = "v1.3.2" + +[[projects]] + digest = "1:0bfbe13936953a98ae3cfe8ed6670d396ad81edf069a806d2f6515d7bb6950df" + name = "github.com/google/btree" + packages = ["."] + pruneopts = "UT" + revision = "4030bb1f1f0c35b30ca7009e9ebd06849dd45306" + version = "v1.0.0" + +[[projects]] + digest = "1:a6181aca1fd5e27103f9a920876f29ac72854df7345a39f3b01e61c8c94cc8af" + name = "github.com/google/gofuzz" + packages = ["."] + pruneopts = "UT" + revision = "f140a6486e521aad38f5917de355cbf147cc0496" + version = "v1.0.0" + +[[projects]] + digest = "1:766102087520f9d54f2acc72bd6637045900ac735b4a419b128d216f0c5c4876" + name = "github.com/googleapis/gax-go" + packages = ["v2"] + pruneopts = "UT" + revision = "bd5b16380fd03dc758d11cef74ba2e3bc8b0e8c2" + version = "v2.0.5" + +[[projects]] + digest = "1:ca4524b4855ded427c7003ec903a5c854f37e7b1e8e2a93277243462c5b753a8" + name = "github.com/googleapis/gnostic" + packages = [ + "OpenAPIv2", + "compiler", + "extensions", + ] + pruneopts = "UT" + revision = "ab0dd09aa10e2952b28e12ecd35681b20463ebab" + version = "v0.3.1" + +[[projects]] + digest = "1:16e1cbd76f0d4152b5573f08f38b451748f74ec59b99a004a7481342b3fc05af" + name = "github.com/graymeta/stow" + packages = [ + ".", + "azure", + "google", + "local", + "oracle", + "s3", + "swift", + ] + pruneopts = "UT" + revision = "903027f87de7054953efcdb8ba70d5dc02df38c7" + +[[projects]] + branch = "master" + digest = "1:5fc0e23b254a1bd7d8d2d42fa093ba33471d08f52fe04afd3713adabb5888dc3" + name = "github.com/gregjones/httpcache" + packages = [ + ".", + "diskcache", + ] + pruneopts = "UT" + revision = "901d90724c7919163f472a9812253fb26761123d" + +[[projects]] + digest = "1:73513cdd52d6f0768201cebbf82612aa39a9d8022bc6337815cd504e532281b7" + name = "github.com/grpc-ecosystem/go-grpc-middleware" + packages = [ + ".", + "retry", + "util/backoffutils", + "util/metautils", + ] + pruneopts = "UT" + revision = "c250d6563d4d4c20252cd865923440e829844f4e" + version = "v1.0.0" + +[[projects]] + digest = "1:9b7a07ac7577787a8ecc1334cb9f34df1c76ed82a917d556c5713d3ab84fbc43" + name = "github.com/grpc-ecosystem/go-grpc-prometheus" + packages = ["."] + pruneopts = "UT" + revision = "c225b8c3b01faf2899099b768856a9e916e5087b" + version = "v1.2.0" + +[[projects]] + digest = "1:9da9ffdf93e29e054fb3b066e3c258e8ed090f6bec4bba1e86aeb9b1ba0056a9" + name = "github.com/grpc-ecosystem/grpc-gateway" + packages = [ + "internal", + "protoc-gen-swagger/options", + "runtime", + "utilities", + ] + pruneopts = "UT" + revision = "a9bbe40ed238db18f710b0e3d2970348c8fcec41" + version = "v1.10.0" + +[[projects]] + digest = "1:7fae9ec96d10b2afce0da23c378c8b3389319b7f92fa092f2621bba3078cfb4b" + name = "github.com/hashicorp/golang-lru" + packages = ["simplelru"] + pruneopts = "UT" + revision = "7f827b33c0f158ec5dfbba01bb0b14a4541fd81d" + version = "v0.5.3" + +[[projects]] + digest = "1:c0d19ab64b32ce9fe5cf4ddceba78d5bc9807f0016db6b1183599da3dcc24d10" + name = "github.com/hashicorp/hcl" + packages = [ + ".", + "hcl/ast", + "hcl/parser", + "hcl/printer", + "hcl/scanner", + "hcl/strconv", + "hcl/token", + "json/parser", + "json/scanner", + "json/token", + ] + pruneopts = "UT" + revision = "8cb6e5b959231cc1119e43259c4a608f9c51a241" + version = "v1.0.0" + +[[projects]] + digest = "1:a0cefd27d12712af4b5018dc7046f245e1e3b5760e2e848c30b171b570708f9b" + name = "github.com/imdario/mergo" + packages = ["."] + pruneopts = "UT" + revision = "7c29201646fa3de8506f701213473dd407f19646" + version = "v0.3.7" + +[[projects]] + digest = "1:870d441fe217b8e689d7949fef6e43efbc787e50f200cb1e70dbca9204a1d6be" + name = "github.com/inconshreveable/mousetrap" + packages = ["."] + pruneopts = "UT" + revision = "76626ae9c91c4f2a10f34cad8ce83ea42c93bb75" + version = "v1.0" + +[[projects]] + digest = "1:da6718abe4d47b1132d98bf3f9b18e302d537bf6daf02bd40804d9295a3f32bd" + name = "github.com/jinzhu/gorm" + packages = [ + ".", + "dialects/postgres", + ] + pruneopts = "UT" + revision = "836fb2c19d84dac7b0272958dfb9af7cf0d0ade4" + version = "v1.9.10" + +[[projects]] + digest = "1:01ed62f8f4f574d8aff1d88caee113700a2b44c42351943fa73cc1808f736a50" + name = "github.com/jinzhu/inflection" + packages = ["."] + pruneopts = "UT" + revision = "f5c5f50e6090ae76a29240b61ae2a90dd810112e" + version = "v1.0.0" + +[[projects]] + digest = "1:bb81097a5b62634f3e9fec1014657855610c82d19b9a40c17612e32651e35dca" + name = "github.com/jmespath/go-jmespath" + packages = ["."] + pruneopts = "UT" + revision = "c2b33e84" + +[[projects]] + digest = "1:709cd2a2c29cc9b89732f6c24846bbb9d6270f28ef5ef2128cc73bd0d6d7bff9" + name = "github.com/json-iterator/go" + packages = ["."] + pruneopts = "UT" + revision = "27518f6661eba504be5a7a9a9f6d9460d892ade3" + version = "v1.1.7" + +[[projects]] + digest = "1:fd9bea48bbc5bba66d9891c72af7255fbebecdff845c37c679406174ece5ca1b" + name = "github.com/kelseyhightower/envconfig" + packages = ["."] + pruneopts = "UT" + revision = "0b417c4ec4a8a82eecc22a1459a504aa55163d61" + version = "v1.4.0" + +[[projects]] + digest = "1:31e761d97c76151dde79e9d28964a812c46efc5baee4085b86f68f0c654450de" + name = "github.com/konsorten/go-windows-terminal-sequences" + packages = ["."] + pruneopts = "UT" + revision = "f55edac94c9bbba5d6182a4be46d86a2c9b5b50e" + version = "v1.0.2" + +[[projects]] + digest = "1:0ead8e64fe356bd9221605e3ec40b4438509868018cbbbaaaff3ebae1b69b78b" + name = "github.com/lib/pq" + packages = [ + ".", + "hstore", + "oid", + "scram", + ] + pruneopts = "UT" + revision = "3427c32cb71afc948325f299f040e53c1dd78979" + version = "v1.2.0" + +[[projects]] + digest = "1:4c02e347457c97ee8cfafb413554854fe236d715879ac0a43743017cd179de2e" + name = "github.com/lyft/flyteidl" + packages = [ + "clients/go/admin", + "clients/go/admin/mocks", + "clients/go/events", + "clients/go/events/errors", + "gen/pb-go/flyteidl/admin", + "gen/pb-go/flyteidl/core", + "gen/pb-go/flyteidl/event", + "gen/pb-go/flyteidl/service", + ] + pruneopts = "UT" + revision = "c92b79f5f448ec36420eb79bbebb2b372261b77f" + source = "https://github.com/lyft/flyteidl" + version = "v0.1.1" + +[[projects]] + digest = "1:09785a77f804b9b5524cfec6d6240ea0ce53251a38eb55abeb616bcfdd85de99" + name = "github.com/lyft/flyteplugins" + packages = ["go/tasks/v1/types"] + pruneopts = "UT" + revision = "9156da396c7af5b34b4411c3ec99470864425b18" + source = "https://github.com/lyft/flyteplugins" + version = "v0.1.1" + +[[projects]] + digest = "1:3dfb37d4f608c21e5f1d14de40b82d919b76c5044cc6daf38f94a98162e899c7" + name = "github.com/lyft/flytepropeller" + packages = [ + "pkg/apis/flyteworkflow", + "pkg/apis/flyteworkflow/v1alpha1", + "pkg/client/clientset/versioned", + "pkg/client/clientset/versioned/scheme", + "pkg/client/clientset/versioned/typed/flyteworkflow/v1alpha1", + "pkg/compiler", + "pkg/compiler/common", + "pkg/compiler/errors", + "pkg/compiler/transformers/k8s", + "pkg/compiler/typing", + "pkg/compiler/validators", + "pkg/utils", + ] + pruneopts = "UT" + revision = "40db32eaa4dc75293560e50c51c2120c9c41d4bb" + source = "https://github.com/lyft/flytepropeller" + version = "v0.1.0" + +[[projects]] + digest = "1:3218b76036eebb079cc456504891ab7b5edace6bc8ce8473b507a5cfd7a6f81e" + name = "github.com/lyft/flytestdlib" + packages = [ + "atomic", + "config", + "config/files", + "config/viper", + "contextutils", + "errors", + "ioutils", + "logger", + "pbhash", + "profutils", + "promutils", + "promutils/labeled", + "storage", + "version", + ] + pruneopts = "UT" + revision = "7292f20ec17b42f104fd61d7f0120e17bcacf751" + source = "https://github.com/lyft/flytestdlib" + version = "v0.2.16" + +[[projects]] + digest = "1:2a0da3440db3f2892609d99cd0389c2776a3fef24435f7b7b58bfc9030aa86ca" + name = "github.com/magiconair/properties" + packages = [ + ".", + "assert", + ] + pruneopts = "UT" + revision = "de8848e004dd33dc07a2947b3d76f618a7fc7ef1" + version = "v1.8.1" + +[[projects]] + digest = "1:c658e84ad3916da105a761660dcaeb01e63416c8ec7bc62256a9b411a05fcd67" + name = "github.com/mattn/go-colorable" + packages = ["."] + pruneopts = "UT" + revision = "167de6bfdfba052fa6b2d3664c8f5272e23c9072" + version = "v0.0.9" + +[[projects]] + digest = "1:36325ebb862e0382f2f14feef409ba9351271b89ada286ae56836c603d43b59c" + name = "github.com/mattn/go-isatty" + packages = ["."] + pruneopts = "UT" + revision = "e1f7b56ace729e4a73a29a6b4fac6cd5fcda7ab3" + version = "v0.0.9" + +[[projects]] + digest = "1:ff5ebae34cfbf047d505ee150de27e60570e8c394b3b8fdbb720ff6ac71985fc" + name = "github.com/matttproud/golang_protobuf_extensions" + packages = ["pbutil"] + pruneopts = "UT" + revision = "c12348ce28de40eed0136aa2b644d0ee0650e56c" + version = "v1.0.1" + +[[projects]] + digest = "1:53bc4cd4914cd7cd52139990d5170d6dc99067ae31c56530621b18b35fc30318" + name = "github.com/mitchellh/mapstructure" + packages = ["."] + pruneopts = "UT" + revision = "3536a929edddb9a5b34bd6861dc4a9647cb459fe" + version = "v1.1.2" + +[[projects]] + digest = "1:33422d238f147d247752996a26574ac48dcf472976eda7f5134015f06bf16563" + name = "github.com/modern-go/concurrent" + packages = ["."] + pruneopts = "UT" + revision = "bacd9c7ef1dd9b15be4a9909b8ac7a4e313eec94" + version = "1.0.3" + +[[projects]] + digest = "1:e32bdbdb7c377a07a9a46378290059822efdce5c8d96fe71940d87cb4f918855" + name = "github.com/modern-go/reflect2" + packages = ["."] + pruneopts = "UT" + revision = "4b7aa43c6742a2c18fdef89dd197aaae7dac7ccd" + version = "1.0.1" + +[[projects]] + branch = "master" + digest = "1:2339820c575323b56a7f94146a2549fd344c51c637fa5b8bafae9695ffa6e1a5" + name = "github.com/ncw/swift" + packages = ["."] + pruneopts = "UT" + revision = "a24ef33bc9b7e59ae4bed9e87a51d7bc76122731" + +[[projects]] + digest = "1:93131d8002d7025da13582877c32d1fc302486775a1b06f62241741006428c5e" + name = "github.com/pelletier/go-toml" + packages = ["."] + pruneopts = "UT" + revision = "728039f679cbcd4f6a54e080d2219a4c4928c546" + version = "v1.4.0" + +[[projects]] + branch = "master" + digest = "1:89da0f0574bc94cfd0ac8b59af67bf76cdd110d503df2721006b9f0492394333" + name = "github.com/petar/GoLLRB" + packages = ["llrb"] + pruneopts = "UT" + revision = "33fb24c13b99c46c93183c291836c573ac382536" + +[[projects]] + digest = "1:a8c2725121694dfbf6d552fb86fe6b46e3e7135ea05db580c28695b916162aad" + name = "github.com/peterbourgon/diskv" + packages = ["."] + pruneopts = "UT" + revision = "0be1b92a6df0e4f5cb0a5d15fb7f643d0ad93ce6" + version = "v3.0.0" + +[[projects]] + digest = "1:cf31692c14422fa27c83a05292eb5cbe0fb2775972e8f1f8446a71549bd8980b" + name = "github.com/pkg/errors" + packages = ["."] + pruneopts = "UT" + revision = "ba968bfe8b2f7e042a574c888954fccecfa385b4" + version = "v0.8.1" + +[[projects]] + digest = "1:0028cb19b2e4c3112225cd871870f2d9cf49b9b4276531f03438a88e94be86fe" + name = "github.com/pmezard/go-difflib" + packages = ["difflib"] + pruneopts = "UT" + revision = "792786c7400a136282c1664665ae0a8db921c6c2" + version = "v1.0.0" + +[[projects]] + digest = "1:e89f2cdede55684adbe44b5566f55838ad2aee1dff348d14b73ccf733607b671" + name = "github.com/prometheus/client_golang" + packages = [ + "prometheus", + "prometheus/internal", + "prometheus/promhttp", + ] + pruneopts = "UT" + revision = "2641b987480bca71fb39738eb8c8b0d577cb1d76" + version = "v0.9.4" + +[[projects]] + branch = "master" + digest = "1:2d5cd61daa5565187e1d96bae64dbbc6080dacf741448e9629c64fd93203b0d4" + name = "github.com/prometheus/client_model" + packages = ["go"] + pruneopts = "UT" + revision = "14fe0d1b01d4d5fc031dd4bec1823bd3ebbe8016" + +[[projects]] + digest = "1:8dcedf2e8f06c7f94e48267dea0bc0be261fa97b377f3ae3e87843a92a549481" + name = "github.com/prometheus/common" + packages = [ + "expfmt", + "internal/bitbucket.org/ww/goautoneg", + "model", + ] + pruneopts = "UT" + revision = "31bed53e4047fd6c510e43a941f90cb31be0972a" + version = "v0.6.0" + +[[projects]] + digest = "1:8232537905152d6a0b116b9af5a0868fcac0e84eb02ec5a150624c077bdedb0b" + name = "github.com/prometheus/procfs" + packages = [ + ".", + "internal/fs", + "internal/util", + ] + pruneopts = "UT" + revision = "00ec24a6a2d86e7074629c8384715dbb05adccd8" + version = "v0.0.4" + +[[projects]] + digest = "1:274f67cb6fed9588ea2521ecdac05a6d62a8c51c074c1fccc6a49a40ba80e925" + name = "github.com/satori/uuid" + packages = ["."] + pruneopts = "UT" + revision = "f58768cc1a7a7e77a3bd49e98cdd21419399b6a3" + version = "v1.2.0" + +[[projects]] + digest = "1:04457f9f6f3ffc5fea48e71d62f2ca256637dee0a04d710288e27e05c8b41976" + name = "github.com/sirupsen/logrus" + packages = ["."] + pruneopts = "UT" + revision = "839c75faf7f98a33d445d181f3018b5c3409a45e" + version = "v1.4.2" + +[[projects]] + digest = "1:bb495ec276ab82d3dd08504bbc0594a65de8c3b22c6f2aaa92d05b73fbf3a82e" + name = "github.com/spf13/afero" + packages = [ + ".", + "mem", + ] + pruneopts = "UT" + revision = "588a75ec4f32903aa5e39a2619ba6a4631e28424" + version = "v1.2.2" + +[[projects]] + digest = "1:08d65904057412fc0270fc4812a1c90c594186819243160dc779a402d4b6d0bc" + name = "github.com/spf13/cast" + packages = ["."] + pruneopts = "UT" + revision = "8c9545af88b134710ab1cd196795e7f2388358d7" + version = "v1.3.0" + +[[projects]] + digest = "1:e096613fb7cf34743d49af87d197663cfccd61876e2219853005a57baedfa562" + name = "github.com/spf13/cobra" + packages = ["."] + pruneopts = "UT" + revision = "f2b07da1e2c38d5f12845a4f607e2e1018cbb1f5" + version = "v0.0.5" + +[[projects]] + digest = "1:1b753ec16506f5864d26a28b43703c58831255059644351bbcb019b843950900" + name = "github.com/spf13/jwalterweatherman" + packages = ["."] + pruneopts = "UT" + revision = "94f6ae3ed3bceceafa716478c5fbf8d29ca601a1" + version = "v1.1.0" + +[[projects]] + digest = "1:c1b1102241e7f645bc8e0c22ae352e8f0dc6484b6cb4d132fa9f24174e0119e2" + name = "github.com/spf13/pflag" + packages = ["."] + pruneopts = "UT" + revision = "298182f68c66c05229eb03ac171abe6e309ee79a" + version = "v1.0.3" + +[[projects]] + digest = "1:2532daa308722c7b65f4566e634dac2ddfaa0a398a17d8418e96ef2af3939e37" + name = "github.com/spf13/viper" + packages = ["."] + pruneopts = "UT" + revision = "ae103d7e593e371c69e832d5eb3347e2b80cbbc9" + +[[projects]] + digest = "1:ac83cf90d08b63ad5f7e020ef480d319ae890c208f8524622a2f3136e2686b02" + name = "github.com/stretchr/objx" + packages = ["."] + pruneopts = "UT" + revision = "477a77ecc69700c7cdeb1fa9e129548e1c1c393c" + version = "v0.1.1" + +[[projects]] + digest = "1:ad527ce5c6b2426790449db7663fe53f8bb647f9387295406794c8be001238da" + name = "github.com/stretchr/testify" + packages = [ + "assert", + "mock", + ] + pruneopts = "UT" + revision = "221dbe5ed46703ee255b1da0dec05086f5035f62" + version = "v1.4.0" + +[[projects]] + digest = "1:74055050ea547bb04600be79cc501965cb3de8988018262f2ca430f0a0b48ec3" + name = "go.opencensus.io" + packages = [ + ".", + "internal", + "internal/tagencoding", + "metric/metricdata", + "metric/metricproducer", + "plugin/ochttp", + "plugin/ochttp/propagation/b3", + "resource", + "stats", + "stats/internal", + "stats/view", + "tag", + "trace", + "trace/internal", + "trace/propagation", + "trace/tracestate", + ] + pruneopts = "UT" + revision = "9c377598961b706d1542bd2d84d538b5094d596e" + version = "v0.22.0" + +[[projects]] + branch = "master" + digest = "1:bbe51412d9915d64ffaa96b51d409e070665efc5194fcf145c4a27d4133107a4" + name = "golang.org/x/crypto" + packages = ["ssh/terminal"] + pruneopts = "UT" + revision = "9756ffdc24725223350eb3266ffb92590d28f278" + +[[projects]] + branch = "master" + digest = "1:e93fe09ca93cf16f8b2dc48053f56c2f91ed4f3fd16bfaf9596b6548c7b48a7f" + name = "golang.org/x/net" + packages = [ + "context", + "context/ctxhttp", + "http/httpguts", + "http2", + "http2/hpack", + "idna", + "internal/timeseries", + "trace", + ] + pruneopts = "UT" + revision = "ba9fcec4b297b415637633c5a6e8fa592e4a16c3" + +[[projects]] + branch = "master" + digest = "1:31e33f76456ccf54819ab4a646cf01271d1a99d7712ab84bf1a9e7b61cd2031b" + name = "golang.org/x/oauth2" + packages = [ + ".", + "google", + "internal", + "jws", + "jwt", + ] + pruneopts = "UT" + revision = "0f29369cfe4552d0e4bcddc57cc75f4d7e672a33" + +[[projects]] + branch = "master" + digest = "1:db4d094dcdda93745779828d4f7536085eae66f9ebcba842bda762883db08800" + name = "golang.org/x/sys" + packages = [ + "unix", + "windows", + ] + pruneopts = "UT" + revision = "1e83adbbebd0f5dc971915fd7e5db032c3d2b731" + +[[projects]] + digest = "1:8d8faad6b12a3a4c819a3f9618cb6ee1fa1cfc33253abeeea8b55336721e3405" + name = "golang.org/x/text" + packages = [ + "collate", + "collate/build", + "internal/colltab", + "internal/gen", + "internal/language", + "internal/language/compact", + "internal/tag", + "internal/triegen", + "internal/ucd", + "language", + "secure/bidirule", + "transform", + "unicode/bidi", + "unicode/cldr", + "unicode/norm", + "unicode/rangetable", + ] + pruneopts = "UT" + revision = "342b2e1fbaa52c93f31447ad2c6abc048c63e475" + version = "v0.3.2" + +[[projects]] + branch = "master" + digest = "1:9fdc2b55e8e0fafe4b41884091e51e77344f7dc511c5acedcfd98200003bff90" + name = "golang.org/x/time" + packages = ["rate"] + pruneopts = "UT" + revision = "9d24e82272b4f38b78bc8cff74fa936d31ccd8ef" + +[[projects]] + branch = "master" + digest = "1:218feb07b42ba85b991b6f2decbc81e7fa6bec9d59cb0c617be40c65dd5edf22" + name = "google.golang.org/api" + packages = [ + "gensupport", + "googleapi", + "googleapi/internal/uritemplates", + "googleapi/transport", + "internal", + "option", + "storage/v1", + "transport/http", + "transport/http/internal/propagation", + ] + pruneopts = "UT" + revision = "d1c9f49851b5339dea6bf7e4076b60a66e62be1f" + +[[projects]] + digest = "1:498b722d33dde4471e7d6e5d88a5e7132d2a8306fea5ff5ee82d1f418b4f41ed" + name = "google.golang.org/appengine" + packages = [ + ".", + "internal", + "internal/app_identity", + "internal/base", + "internal/datastore", + "internal/log", + "internal/modules", + "internal/remote_api", + "internal/urlfetch", + "urlfetch", + ] + pruneopts = "UT" + revision = "5f2a59506353b8d5ba8cbbcd9f3c1f41f1eaf079" + version = "v1.6.2" + +[[projects]] + branch = "master" + digest = "1:1233ed1b527b0ff66c3df5879f7e80b1d8631e030cc45821b77fc25acd0d72a6" + name = "google.golang.org/genproto" + packages = [ + "googleapis/api/annotations", + "googleapis/api/httpbody", + "googleapis/rpc/status", + "protobuf/field_mask", + ] + pruneopts = "UT" + revision = "24fa4b261c55da65468f2abfdae2b024eef27dfb" + +[[projects]] + digest = "1:3b97661db2e5d4c87f7345e875ea28f911e54c715ba0a74be08e1649d67e05cd" + name = "google.golang.org/grpc" + packages = [ + ".", + "balancer", + "balancer/base", + "balancer/roundrobin", + "binarylog/grpc_binarylog_v1", + "codes", + "connectivity", + "credentials", + "credentials/internal", + "encoding", + "encoding/proto", + "grpclog", + "internal", + "internal/backoff", + "internal/balancerload", + "internal/binarylog", + "internal/channelz", + "internal/envconfig", + "internal/grpcrand", + "internal/grpcsync", + "internal/syscall", + "internal/transport", + "keepalive", + "metadata", + "naming", + "peer", + "resolver", + "resolver/dns", + "resolver/passthrough", + "serviceconfig", + "stats", + "status", + "tap", + ] + pruneopts = "UT" + revision = "6eaf6f47437a6b4e2153a190160ef39a92c7eceb" + version = "v1.23.0" + +[[projects]] + digest = "1:1048ae210f190cd7b6aea19a92a055bd6112b025dd49f560579dfdfd76c8c42e" + name = "gopkg.in/gormigrate.v1" + packages = ["."] + pruneopts = "UT" + revision = "ff46dd7d2c0b00a58540e19ca3d3f5e370fa3607" + version = "v1.6.0" + +[[projects]] + digest = "1:2d1fbdc6777e5408cabeb02bf336305e724b925ff4546ded0fa8715a7267922a" + name = "gopkg.in/inf.v0" + packages = ["."] + pruneopts = "UT" + revision = "d2d2541c53f18d2a059457998ce2876cc8e67cbf" + version = "v0.9.1" + +[[projects]] + digest = "1:4d2e5a73dc1500038e504a8d78b986630e3626dc027bc030ba5c75da257cdb96" + name = "gopkg.in/yaml.v2" + packages = ["."] + pruneopts = "UT" + revision = "51d6538a90f86fe93ac480b35f37b2be17fef232" + version = "v2.2.2" + +[[projects]] + branch = "release-1.13" + digest = "1:86b38004415341a2f678a19f9312213bc851a3620bb42b3dca005c8ad0d3485c" + name = "k8s.io/api" + packages = [ + "admissionregistration/v1alpha1", + "admissionregistration/v1beta1", + "apps/v1", + "apps/v1beta1", + "apps/v1beta2", + "auditregistration/v1alpha1", + "authentication/v1", + "authentication/v1beta1", + "authorization/v1", + "authorization/v1beta1", + "autoscaling/v1", + "autoscaling/v2beta1", + "autoscaling/v2beta2", + "batch/v1", + "batch/v1beta1", + "batch/v2alpha1", + "certificates/v1beta1", + "coordination/v1beta1", + "core/v1", + "events/v1beta1", + "extensions/v1beta1", + "networking/v1", + "policy/v1beta1", + "rbac/v1", + "rbac/v1alpha1", + "rbac/v1beta1", + "scheduling/v1alpha1", + "scheduling/v1beta1", + "settings/v1alpha1", + "storage/v1", + "storage/v1alpha1", + "storage/v1beta1", + ] + pruneopts = "UT" + revision = "ebce17126a01f5fe02364d88c899816bcc2a8165" + +[[projects]] + digest = "1:97be1d171d2125d42ddc05182cb53f0c22bff4d6eb20e6c56709e4173242423f" + name = "k8s.io/apimachinery" + packages = [ + "pkg/api/errors", + "pkg/api/meta", + "pkg/api/resource", + "pkg/apis/meta/v1", + "pkg/apis/meta/v1/unstructured", + "pkg/apis/meta/v1beta1", + "pkg/conversion", + "pkg/conversion/queryparams", + "pkg/fields", + "pkg/labels", + "pkg/runtime", + "pkg/runtime/schema", + "pkg/runtime/serializer", + "pkg/runtime/serializer/json", + "pkg/runtime/serializer/protobuf", + "pkg/runtime/serializer/recognizer", + "pkg/runtime/serializer/streaming", + "pkg/runtime/serializer/versioning", + "pkg/selection", + "pkg/types", + "pkg/util/clock", + "pkg/util/errors", + "pkg/util/framer", + "pkg/util/intstr", + "pkg/util/json", + "pkg/util/naming", + "pkg/util/net", + "pkg/util/rand", + "pkg/util/runtime", + "pkg/util/sets", + "pkg/util/validation", + "pkg/util/validation/field", + "pkg/util/wait", + "pkg/util/yaml", + "pkg/version", + "pkg/watch", + "third_party/forked/golang/reflect", + ] + pruneopts = "UT" + revision = "2b1284ed4c93a43499e781493253e2ac5959c4fd" + version = "kubernetes-1.13.1" + +[[projects]] + digest = "1:a7b135b3eb8e33e02745491c89c990daf1c22f096fc168e295c868d2ad617c0c" + name = "k8s.io/client-go" + packages = [ + "discovery", + "dynamic", + "kubernetes/scheme", + "pkg/apis/clientauthentication", + "pkg/apis/clientauthentication/v1alpha1", + "pkg/apis/clientauthentication/v1beta1", + "pkg/version", + "plugin/pkg/client/auth/exec", + "rest", + "rest/watch", + "restmapper", + "tools/auth", + "tools/clientcmd", + "tools/clientcmd/api", + "tools/clientcmd/api/latest", + "tools/clientcmd/api/v1", + "tools/metrics", + "transport", + "util/cert", + "util/connrotation", + "util/flowcontrol", + "util/homedir", + "util/integer", + "util/workqueue", + ] + pruneopts = "UT" + revision = "8d9ed539ba3134352c586810e749e58df4e94e4f" + version = "kubernetes-1.13.1" + +[[projects]] + digest = "1:ccb9be4c583b6ec848eb98aa395a4e8c8f8ad9ebb823642c0dd1c1c45939a5bb" + name = "k8s.io/klog" + packages = ["."] + pruneopts = "UT" + revision = "3ca30a56d8a775276f9cdae009ba326fdc05af7f" + version = "v0.4.0" + +[[projects]] + digest = "1:8f87a12b4d6f63f7787a8b5ca06348741321048d08023f5fda3ddf63e3ca2e6a" + name = "sigs.k8s.io/controller-runtime" + packages = [ + "pkg/client", + "pkg/client/apiutil", + ] + pruneopts = "UT" + revision = "477bf4f046c31c351b46fa00262bc814ac0bbca1" + version = "v0.1.11" + +[[projects]] + digest = "1:7719608fe0b52a4ece56c2dde37bedd95b938677d1ab0f84b8a7852e4c59f849" + name = "sigs.k8s.io/yaml" + packages = ["."] + pruneopts = "UT" + revision = "fd68e9863619f6ec2fdd8625fe1f02e7c877e480" + version = "v1.1.0" + +[solve-meta] + analyzer-name = "dep" + analyzer-version = 1 + input-imports = [ + "github.com/NYTimes/gizmo/pubsub", + "github.com/NYTimes/gizmo/pubsub/aws", + "github.com/NYTimes/gizmo/pubsub/pubsubtest", + "github.com/Selvatico/go-mocket", + "github.com/aws/aws-sdk-go/aws", + "github.com/aws/aws-sdk-go/aws/awserr", + "github.com/aws/aws-sdk-go/aws/request", + "github.com/aws/aws-sdk-go/aws/session", + "github.com/aws/aws-sdk-go/service/cloudwatchevents", + "github.com/aws/aws-sdk-go/service/s3", + "github.com/aws/aws-sdk-go/service/ses", + "github.com/aws/aws-sdk-go/service/ses/sesiface", + "github.com/benbjohnson/clock", + "github.com/gogo/protobuf/proto", + "github.com/golang/glog", + "github.com/golang/protobuf/jsonpb", + "github.com/golang/protobuf/proto", + "github.com/golang/protobuf/ptypes", + "github.com/golang/protobuf/ptypes/duration", + "github.com/golang/protobuf/ptypes/struct", + "github.com/golang/protobuf/ptypes/timestamp", + "github.com/grpc-ecosystem/go-grpc-prometheus", + "github.com/grpc-ecosystem/grpc-gateway/runtime", + "github.com/jinzhu/gorm", + "github.com/jinzhu/gorm/dialects/postgres", + "github.com/lib/pq", + "github.com/lyft/flyteidl/gen/pb-go/flyteidl/admin", + "github.com/lyft/flyteidl/gen/pb-go/flyteidl/core", + "github.com/lyft/flyteidl/gen/pb-go/flyteidl/event", + "github.com/lyft/flyteidl/gen/pb-go/flyteidl/service", + "github.com/lyft/flytepropeller/pkg/apis/flyteworkflow/v1alpha1", + "github.com/lyft/flytepropeller/pkg/client/clientset/versioned", + "github.com/lyft/flytepropeller/pkg/client/clientset/versioned/typed/flyteworkflow/v1alpha1", + "github.com/lyft/flytepropeller/pkg/compiler", + "github.com/lyft/flytepropeller/pkg/compiler/common", + "github.com/lyft/flytepropeller/pkg/compiler/transformers/k8s", + "github.com/lyft/flytepropeller/pkg/compiler/validators", + "github.com/lyft/flytepropeller/pkg/utils", + "github.com/lyft/flytestdlib/config", + "github.com/lyft/flytestdlib/config/viper", + "github.com/lyft/flytestdlib/contextutils", + "github.com/lyft/flytestdlib/logger", + "github.com/lyft/flytestdlib/pbhash", + "github.com/lyft/flytestdlib/profutils", + "github.com/lyft/flytestdlib/promutils", + "github.com/lyft/flytestdlib/promutils/labeled", + "github.com/lyft/flytestdlib/storage", + "github.com/magiconair/properties/assert", + "github.com/mitchellh/mapstructure", + "github.com/pkg/errors", + "github.com/prometheus/client_golang/prometheus", + "github.com/spf13/cobra", + "github.com/spf13/pflag", + "github.com/stretchr/testify/assert", + "google.golang.org/grpc", + "google.golang.org/grpc/codes", + "google.golang.org/grpc/credentials", + "google.golang.org/grpc/grpclog", + "google.golang.org/grpc/status", + "gopkg.in/gormigrate.v1", + "k8s.io/apimachinery/pkg/api/errors", + "k8s.io/apimachinery/pkg/api/resource", + "k8s.io/apimachinery/pkg/apis/meta/v1", + "k8s.io/apimachinery/pkg/runtime/schema", + "k8s.io/apimachinery/pkg/util/validation", + "k8s.io/apimachinery/pkg/util/wait", + "k8s.io/client-go/kubernetes/scheme", + "k8s.io/client-go/rest", + "k8s.io/client-go/tools/clientcmd", + "sigs.k8s.io/controller-runtime/pkg/client", + ] + solver-name = "gps-cdcl" + solver-version = 1 diff --git a/Gopkg.toml b/Gopkg.toml new file mode 100644 index 000000000..fad2d6f53 --- /dev/null +++ b/Gopkg.toml @@ -0,0 +1,110 @@ +# Gopkg.toml example +# +# Refer to https://golang.github.io/dep/docs/Gopkg.toml.html +# for detailed Gopkg.toml documentation. +# +# required = ["github.com/user/thing/cmd/thing"] +# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"] +# +# [[constraint]] +# name = "github.com/user/project" +# version = "1.0.0" +# +# [[constraint]] +# name = "github.com/user/project2" +# branch = "dev" +# source = "github.com/myfork/project2" +# +# [[override]] +# name = "github.com/x/y" +# version = "2.4.0" +# +# [prune] +# non-go = false +# go-tests = true +# unused-packages = true + + +[[constraint]] + name = "github.com/aws/aws-sdk-go" + version = "1.15.0" + +[[constraint]] + name = "github.com/NYTimes/gizmo" + version = "v0.4.2" + +[[constraint]] + branch = "master" + name = "github.com/golang/glog" + +[[constraint]] + name = "github.com/golang/protobuf" + version = "1.1.0" + +[[constraint]] + name = "github.com/grpc-ecosystem/grpc-gateway" + version = "1.5.1" + +[[constraint]] + name = "github.com/grpc-ecosystem/go-grpc-prometheus" + version = "1.2.0" + +[[constraint]] + name = "github.com/lib/pq" + version = "1.0.0" + +[[override]] + name = "github.com/lyft/flyteidl" + source = "https://github.com/lyft/flyteidl" + version = "^0.1.x" + +[[constraint]] + name = "github.com/lyft/flytepropeller" + source = "https://github.com/lyft/flytepropeller" + version = "^v0.1.x" + +[[override]] + name = "github.com/lyft/flytestdlib" + source = "https://github.com/lyft/flytestdlib" + version = "^v0.2.12" + +[[constraint]] + name = "github.com/magiconair/properties" + version = "1.8.0" + +[[constraint]] + name = "github.com/spf13/cobra" + version = "0.0.3" + +[[constraint]] + name = "github.com/spf13/pflag" + version = "1.0.1" + +[[constraint]] + name = "google.golang.org/grpc" + version = "1.16.0" + +[[constraint]] + name = "gopkg.in/gormigrate.v1" + version = "1.2.1" + +[[constraint]] + name = "k8s.io/apimachinery" + version = "kubernetes-1.13.1" + +[[constraint]] + name = "k8s.io/client-go" + version = "kubernetes-1.13.1" + +[[override]] + branch = "master" + name = "golang.org/x/net" + +[[override]] + name = "github.com/json-iterator/go" + version = "^1.1.5" + +[prune] + go-tests = true + unused-packages = true + diff --git a/LICENSE b/LICENSE new file mode 100644 index 000000000..bed437514 --- /dev/null +++ b/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2019 Lyft, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/Makefile b/Makefile new file mode 100644 index 000000000..cf5ce51d5 --- /dev/null +++ b/Makefile @@ -0,0 +1,37 @@ +export REPOSITORY=flyteadmin +include boilerplate/lyft/docker_build/Makefile +include boilerplate/lyft/golang_test_targets/Makefile + +.PHONY: update_boilerplate +update_boilerplate: + @boilerplate/update.sh + +.PHONY: integration +integration: + GOCACHE=off go test -v -tags=integration ./tests/... + +.PHONY: k8s_integration +k8s_integration: + @script/integration/launch.sh + +.PHONY: compile +compile: + go build -o flyteadmin ./cmd/ && mv ./flyteadmin ${GOPATH}/bin + +.PHONY: linux_compile +linux_compile: + GOOS=linux GOARCH=amd64 CGO_ENABLED=0 go build -o /artifacts/flyteadmin ./cmd/ + +.PHONY: server +server: + go run cmd/main.go --logtostderr --application.kube-config ~/.kube/config --config flyteadmin_config.yaml serve + +.PHONY: migrate +migrate: + go run cmd/main.go --logtostderr --application.kube-config ~/.kube/config --config flyteadmin_config.yaml migrate run + +.PHONY: seed_projects +seed_projects: + go run cmd/main.go --logtostderr --application.kube-config ~/.kube/config --config flyteadmin_config.yaml migrate seed-projects project admintests flytekit + +all: compile diff --git a/NOTICE b/NOTICE new file mode 100644 index 000000000..dab3948b4 --- /dev/null +++ b/NOTICE @@ -0,0 +1,4 @@ +flyteadmin +Copyright 2019 Lyft Inc. + +This product includes software developed at Lyft Inc. diff --git a/README.rst b/README.rst new file mode 100644 index 000000000..2512efbba --- /dev/null +++ b/README.rst @@ -0,0 +1,8 @@ +Flyteadmin +============= + +Flyteadmin is the control plane for Flyte responsible for managing entities (task, workflows, launch plans) and +administering workflow executions. Flyteadmin implements the +`AdminService `_ which +defines a stateless REST/gRPC service for interacting with registered Flyte entities and executions. +Flyteadmin uses a relational style Metadata Store abstracted by `GORM `_ ORM library. diff --git a/boilerplate/lyft/docker_build/Makefile b/boilerplate/lyft/docker_build/Makefile new file mode 100644 index 000000000..4019dab83 --- /dev/null +++ b/boilerplate/lyft/docker_build/Makefile @@ -0,0 +1,12 @@ +# WARNING: THIS FILE IS MANAGED IN THE 'BOILERPLATE' REPO AND COPIED TO OTHER REPOSITORIES. +# ONLY EDIT THIS FILE FROM WITHIN THE 'LYFT/BOILERPLATE' REPOSITORY: +# +# TO OPT OUT OF UPDATES, SEE https://github.com/lyft/boilerplate/blob/master/Readme.rst + +.PHONY: docker_build +docker_build: + IMAGE_NAME=$$REPOSITORY ./boilerplate/lyft/docker_build/docker_build.sh + +.PHONY: dockerhub_push +dockerhub_push: + IMAGE_NAME=lyft/$$REPOSITORY REGISTRY=docker.io ./boilerplate/lyft/docker_build/docker_build.sh diff --git a/boilerplate/lyft/docker_build/Readme.rst b/boilerplate/lyft/docker_build/Readme.rst new file mode 100644 index 000000000..bb6af9b49 --- /dev/null +++ b/boilerplate/lyft/docker_build/Readme.rst @@ -0,0 +1,23 @@ +Docker Build and Push +~~~~~~~~~~~~~~~~~~~~~ + +Provides a ``make docker_build`` target that builds your image locally. + +Provides a ``make dockerhub_push`` target that pushes your final image to Dockerhub. + +The Dockerhub image will tagged ``:`` + +If git head has a git tag, the Dockerhub image will also be tagged ``:``. + +**To Enable:** + +Add ``lyft/docker_build`` to your ``boilerplate/update.cfg`` file. + +Add ``include boilerplate/lyft/docker_build/Makefile`` in your main ``Makefile`` _after_ your REPOSITORY environment variable + +:: + + REPOSITORY= + include boilerplate/lyft/docker_build/Makefile + +(this ensures the extra Make targets get included in your main Makefile) diff --git a/boilerplate/lyft/docker_build/docker_build.sh b/boilerplate/lyft/docker_build/docker_build.sh new file mode 100755 index 000000000..f504c100c --- /dev/null +++ b/boilerplate/lyft/docker_build/docker_build.sh @@ -0,0 +1,67 @@ +#!/usr/bin/env bash + +# WARNING: THIS FILE IS MANAGED IN THE 'BOILERPLATE' REPO AND COPIED TO OTHER REPOSITORIES. +# ONLY EDIT THIS FILE FROM WITHIN THE 'LYFT/BOILERPLATE' REPOSITORY: +# +# TO OPT OUT OF UPDATES, SEE https://github.com/lyft/boilerplate/blob/master/Readme.rst + +set -e + +echo "" +echo "------------------------------------" +echo " DOCKER BUILD" +echo "------------------------------------" +echo "" + +if [ -n "$REGISTRY" ]; then + # Do not push if there are unstaged git changes + CHANGED=$(git status --porcelain) + if [ -n "$CHANGED" ]; then + echo "Please commit git changes before pushing to a registry" + exit 1 + fi +fi + + +GIT_SHA=$(git rev-parse HEAD) + +IMAGE_TAG_SUFFIX="" +# for intermediate build phases, append -$BUILD_PHASE to all image tags +if [ -n "$BUILD_PHASE" ]; then + IMAGE_TAG_SUFFIX="-${BUILD_PHASE}" +fi + +IMAGE_TAG_WITH_SHA="${IMAGE_NAME}:${GIT_SHA}${IMAGE_TAG_SUFFIX}" + +RELEASE_SEMVER=$(git describe --tags --exact-match "$GIT_SHA" 2>/dev/null) || true +if [ -n "$RELEASE_SEMVER" ]; then + IMAGE_TAG_WITH_SEMVER="${IMAGE_NAME}:${RELEASE_SEMVER}${IMAGE_TAG_SUFFIX}" +fi + +# build the image +# passing no build phase will build the final image +docker build -t "$IMAGE_TAG_WITH_SHA" --target=${BUILD_PHASE} . +echo "${IMAGE_TAG_WITH_SHA} built locally." + +# if REGISTRY specified, push the images to the remote registy +if [ -n "$REGISTRY" ]; then + + if [ -n "${DOCKER_REGISTRY_PASSWORD}" ]; then + docker login --username="$DOCKER_REGISTRY_USERNAME" --password="$DOCKER_REGISTRY_PASSWORD" + fi + + docker tag "$IMAGE_TAG_WITH_SHA" "${REGISTRY}/${IMAGE_TAG_WITH_SHA}" + + docker push "${REGISTRY}/${IMAGE_TAG_WITH_SHA}" + echo "${REGISTRY}/${IMAGE_TAG_WITH_SHA} pushed to remote." + + # If the current commit has a semver tag, also push the images with the semver tag + if [ -n "$RELEASE_SEMVER" ]; then + + docker tag "$IMAGE_TAG_WITH_SHA" "${REGISTRY}/${IMAGE_TAG_WITH_SEMVER}" + + docker push "${REGISTRY}/${IMAGE_TAG_WITH_SEMVER}" + echo "${REGISTRY}/${IMAGE_TAG_WITH_SEMVER} pushed to remote." + + fi +fi diff --git a/boilerplate/lyft/golang_dockerfile/Dockerfile.GoTemplate b/boilerplate/lyft/golang_dockerfile/Dockerfile.GoTemplate new file mode 100644 index 000000000..5e7b984a1 --- /dev/null +++ b/boilerplate/lyft/golang_dockerfile/Dockerfile.GoTemplate @@ -0,0 +1,33 @@ +# WARNING: THIS FILE IS MANAGED IN THE 'BOILERPLATE' REPO AND COPIED TO OTHER REPOSITORIES. +# ONLY EDIT THIS FILE FROM WITHIN THE 'LYFT/BOILERPLATE' REPOSITORY: +# +# TO OPT OUT OF UPDATES, SEE https://github.com/lyft/boilerplate/blob/master/Readme.rst + +# Using go1.10.4 +FROM golang:1.10.4-alpine3.8 as builder +RUN apk add git openssh-client make curl dep + +# COPY only the dep files for efficient caching +COPY Gopkg.* /go/src/github.com/lyft/{{REPOSITORY}}/ +WORKDIR /go/src/github.com/lyft/{{REPOSITORY}} + +# Pull dependencies +RUN dep ensure -vendor-only + +# COPY the rest of the source code +COPY . /go/src/github.com/lyft/{{REPOSITORY}}/ + +# This 'linux_compile' target should compile binaries to the /artifacts directory +# The main entrypoint should be compiled to /artifacts/{{REPOSITORY}} +RUN make linux_compile + +# update the PATH to include the /artifacts directory +ENV PATH="/artifacts:${PATH}" + +# This will eventually move to centurylink/ca-certs:latest for minimum possible image size +FROM alpine:3.8 +COPY --from=builder /artifacts /bin + +RUN apk --update add ca-certificates + +CMD ["{{REPOSITORY}}"] diff --git a/boilerplate/lyft/golang_dockerfile/Readme.rst b/boilerplate/lyft/golang_dockerfile/Readme.rst new file mode 100644 index 000000000..f801ef98d --- /dev/null +++ b/boilerplate/lyft/golang_dockerfile/Readme.rst @@ -0,0 +1,16 @@ +Golang Dockerfile +~~~~~~~~~~~~~~~~~ + +Provides a Dockerfile that produces a small image. + +**To Enable:** + +Add ``lyft/golang_dockerfile`` to your ``boilerplate/update.cfg`` file. + +Create and configure a ``make linux_compile`` target that compiles your go binaries to the ``/artifacts`` directory :: + + .PHONY: linux_compile + linux_compile: + RUN GOOS=linux GOARCH=amd64 CGO_ENABLED=0 go build -o /artifacts {{ packages }} + +All binaries compiled to ``/artifacts`` will be available at ``/bin`` in your final image. diff --git a/boilerplate/lyft/golang_dockerfile/update.sh b/boilerplate/lyft/golang_dockerfile/update.sh new file mode 100755 index 000000000..7d8466326 --- /dev/null +++ b/boilerplate/lyft/golang_dockerfile/update.sh @@ -0,0 +1,13 @@ +#!/usr/bin/env bash + +# WARNING: THIS FILE IS MANAGED IN THE 'BOILERPLATE' REPO AND COPIED TO OTHER REPOSITORIES. +# ONLY EDIT THIS FILE FROM WITHIN THE 'LYFT/BOILERPLATE' REPOSITORY: +# +# TO OPT OUT OF UPDATES, SEE https://github.com/lyft/boilerplate/blob/master/Readme.rst + +set -e + +DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null && pwd )" + +echo " - generating Dockerfile in root directory." +sed -e "s/{{REPOSITORY}}/${REPOSITORY}/g" ${DIR}/Dockerfile.GoTemplate > ${DIR}/../../../Dockerfile diff --git a/boilerplate/lyft/golang_test_targets/Makefile b/boilerplate/lyft/golang_test_targets/Makefile new file mode 100644 index 000000000..6c1e527fd --- /dev/null +++ b/boilerplate/lyft/golang_test_targets/Makefile @@ -0,0 +1,38 @@ +# WARNING: THIS FILE IS MANAGED IN THE 'BOILERPLATE' REPO AND COPIED TO OTHER REPOSITORIES. +# ONLY EDIT THIS FILE FROM WITHIN THE 'LYFT/BOILERPLATE' REPOSITORY: +# +# TO OPT OUT OF UPDATES, SEE https://github.com/lyft/boilerplate/blob/master/Readme.rst + +DEP_SHA=1f7c19e5f52f49ffb9f956f64c010be14683468b + +.PHONY: lint +lint: #lints the package for common code smells + which golangci-lint || curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | bash -s -- -b $$GOPATH/bin v1.16.0 + golangci-lint run --exclude deprecated + +# If code is failing goimports linter, this will fix. +# skips 'vendor' +.PHONY: goimports +goimports: + @boilerplate/lyft/golang_test_targets/goimports + +.PHONY: install +install: #download dependencies (including test deps) for the package + which dep || (curl "https://raw.githubusercontent.com/golang/dep/${DEP_SHA}/install.sh" | sh) + dep ensure + +.PHONY: test_unit +test_unit: + go test -cover ./... -race + +.PHONY: test_benchmark +test_benchmark: + go test -bench . ./... + +.PHONY: test_unit_cover +test_unit_cover: + go test ./... -coverprofile /tmp/cover.out -covermode=count; go tool cover -func /tmp/cover.out + +.PHONY: test_unit_visual +test_unit_visual: + go test ./... -coverprofile /tmp/cover.out -covermode=count; go tool cover -html=/tmp/cover.out diff --git a/boilerplate/lyft/golang_test_targets/Readme.rst b/boilerplate/lyft/golang_test_targets/Readme.rst new file mode 100644 index 000000000..acc5744f5 --- /dev/null +++ b/boilerplate/lyft/golang_test_targets/Readme.rst @@ -0,0 +1,31 @@ +Golang Test Targets +~~~~~~~~~~~~~~~~~~~ + +Provides an ``install`` make target that uses ``dep`` install golang dependencies. + +Provides a ``lint`` make target that uses golangci to lint your code. + +Provides a ``test_unit`` target for unit tests. + +Provides a ``test_unit_cover`` target for analysing coverage of unit tests, which will output the coverage of each function and total statement coverage. + +Provides a ``test_unit_visual`` target for visualizing coverage of unit tests through an interactive html code heat map. + +Provides a ``test_benchmark`` target for benchmark tests. + +**To Enable:** + +Add ``lyft/golang_test_targets`` to your ``boilerplate/update.cfg`` file. + +Make sure you're using ``dep`` for dependency management. + +Provide a ``.golangci`` configuration (the lint target requires it). + +Add ``include boilerplate/lyft/golang_test_targets/Makefile`` in your main ``Makefile`` _after_ your REPOSITORY environment variable + +:: + + REPOSITORY= + include boilerplate/lyft/golang_test_targets/Makefile + +(this ensures the extra make targets get included in your main Makefile) diff --git a/boilerplate/lyft/golang_test_targets/goimports b/boilerplate/lyft/golang_test_targets/goimports new file mode 100755 index 000000000..160525a8c --- /dev/null +++ b/boilerplate/lyft/golang_test_targets/goimports @@ -0,0 +1,8 @@ +#!/usr/bin/env bash + +# WARNING: THIS FILE IS MANAGED IN THE 'BOILERPLATE' REPO AND COPIED TO OTHER REPOSITORIES. +# ONLY EDIT THIS FILE FROM WITHIN THE 'LYFT/BOILERPLATE' REPOSITORY: +# +# TO OPT OUT OF UPDATES, SEE https://github.com/lyft/boilerplate/blob/master/Readme.rst + +goimports -w $(find . -type f -name '*.go' -not -path "./vendor/*" -not -path "./pkg/client/*") diff --git a/boilerplate/update.cfg b/boilerplate/update.cfg new file mode 100644 index 000000000..c454fda70 --- /dev/null +++ b/boilerplate/update.cfg @@ -0,0 +1,3 @@ +lyft/docker_build +lyft/golang_test_targets +lyft/golang_dockerfile diff --git a/boilerplate/update.sh b/boilerplate/update.sh new file mode 100755 index 000000000..bea661d9a --- /dev/null +++ b/boilerplate/update.sh @@ -0,0 +1,53 @@ +#!/usr/bin/env bash + +# WARNING: THIS FILE IS MANAGED IN THE 'BOILERPLATE' REPO AND COPIED TO OTHER REPOSITORIES. +# ONLY EDIT THIS FILE FROM WITHIN THE 'LYFT/BOILERPLATE' REPOSITORY: +# +# TO OPT OUT OF UPDATES, SEE https://github.com/lyft/boilerplate/blob/master/Readme.rst + +set -e + +DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null && pwd )" + +OUT="$(mktemp -d)" +git clone git@github.com:lyft/boilerplate.git "${OUT}" + +echo "Updating the update.sh script." +cp "${OUT}/boilerplate/update.sh" "${DIR}/update.sh" +echo "" + + +CONFIG_FILE="${DIR}/update.cfg" +README="https://github.com/lyft/boilerplate/blob/master/Readme.rst" + +if [ ! -f "$CONFIG_FILE" ]; then + echo "$CONFIG_FILE not found." + echo "This file is required in order to select which features to include." + echo "See $README for more details." + exit 1 +fi + +if [ -z "$REPOSITORY" ]; then + echo '$REPOSITORY is required to run this script' + echo "See $README for more details." + exit 1 +fi + +while read directory; do + echo "***********************************************************************************" + echo "$directory is configured in update.cfg." + echo "-----------------------------------------------------------------------------------" + echo "syncing files from source." + dir_path="${OUT}/boilerplate/${directory}" + rm -rf "${DIR}/${directory}" + mkdir -p $(dirname "${DIR}/${directory}") + cp -r "$dir_path" "${DIR}/${directory}" + if [ -f "${DIR}/${directory}/update.sh" ]; then + echo "executing ${DIR}/${directory}/update.sh" + "${DIR}/${directory}/update.sh" + fi + echo "***********************************************************************************" + echo "" +done < "$CONFIG_FILE" + +rm -rf "${OUT}" diff --git a/cmd/entrypoints/clusterresource.go b/cmd/entrypoints/clusterresource.go new file mode 100644 index 000000000..05a1e4a8e --- /dev/null +++ b/cmd/entrypoints/clusterresource.go @@ -0,0 +1,111 @@ +package entrypoints + +import ( + "context" + + "github.com/lyft/flyteadmin/pkg/clusterresource" + + "github.com/lyft/flyteadmin/pkg/flytek8s" + + "github.com/lyft/flyteadmin/pkg/runtime" + + "github.com/lyft/flytestdlib/logger" + + _ "github.com/jinzhu/gorm/dialects/postgres" // Required to import database driver. + "github.com/lyft/flyteadmin/pkg/config" + "github.com/lyft/flyteadmin/pkg/repositories" + repositoryConfig "github.com/lyft/flyteadmin/pkg/repositories/config" + "github.com/lyft/flytestdlib/promutils" + "github.com/spf13/cobra" +) + +var parentClusterResourceCmd = &cobra.Command{ + Use: "clusterresource", + Short: "This command administers the ClusterResourceController. Please choose a subcommand.", +} + +func GetLocalDbConfig() repositoryConfig.DbConfig { + return repositoryConfig.DbConfig{ + Host: "localhost", + Port: 5432, + DbName: "postgres", + User: "postgres", + } +} + +var controllerRunCmd = &cobra.Command{ + Use: "run", + Short: "This command will start a cluster resource controller to periodically sync cluster resources", + Run: func(cmd *cobra.Command, args []string) { + ctx := context.Background() + configuration := runtime.NewConfigurationProvider() + scope := promutils.NewScope(configuration.ApplicationConfiguration().GetTopLevelConfig().MetricsScope).NewSubScope("clusterresource") + dbConfigValues := configuration.ApplicationConfiguration().GetDbConfig() + dbConfig := repositoryConfig.DbConfig{ + Host: dbConfigValues.Host, + Port: dbConfigValues.Port, + DbName: dbConfigValues.DbName, + User: dbConfigValues.User, + Password: dbConfigValues.Password, + ExtraOptions: dbConfigValues.ExtraOptions, + } + db := repositories.GetRepository( + repositories.POSTGRES, dbConfig, scope.NewSubScope("database")) + + cfg := config.GetConfig() + kubeClient, err := flytek8s.NewKubeClient(cfg.KubeConfig, cfg.Master, configuration.ClusterConfiguration()) + if err != nil { + scope.NewSubScope("flytekubeconfig").MustNewCounter( + "kubeconfig_get_error", + "count of errors encountered fetching and initializing kube config").Inc() + logger.Fatalf(ctx, "Failed to initialize kubeClient: %+v", err) + } + + clusterResourceController := clusterresource.NewClusterResourceController(db, kubeClient, scope) + clusterResourceController.Run() + logger.Infof(ctx, "ClusterResourceController started successfully") + }, +} + +var controllerSyncCmd = &cobra.Command{ + Use: "sync", + Short: "This command will sync cluster resources", + Run: func(cmd *cobra.Command, args []string) { + ctx := context.Background() + configuration := runtime.NewConfigurationProvider() + scope := promutils.NewScope(configuration.ApplicationConfiguration().GetTopLevelConfig().MetricsScope).NewSubScope("clusterresource") + dbConfigValues := configuration.ApplicationConfiguration().GetDbConfig() + dbConfig := repositoryConfig.DbConfig{ + Host: dbConfigValues.Host, + Port: dbConfigValues.Port, + DbName: dbConfigValues.DbName, + User: dbConfigValues.User, + Password: dbConfigValues.Password, + ExtraOptions: dbConfigValues.ExtraOptions, + } + db := repositories.GetRepository( + repositories.POSTGRES, dbConfig, scope.NewSubScope("database")) + + cfg := config.GetConfig() + kubeClient, err := flytek8s.NewKubeClient(cfg.KubeConfig, cfg.Master, configuration.ClusterConfiguration()) + if err != nil { + scope.NewSubScope("flytekubeconfig").MustNewCounter( + "kubeconfig_get_error", + "count of errors encountered fetching and initializing kube config").Inc() + logger.Fatalf(ctx, "Failed to initialize kubeClient: %+v", err) + } + + clusterResourceController := clusterresource.NewClusterResourceController(db, kubeClient, scope) + err = clusterResourceController.Sync(ctx) + if err != nil { + logger.Fatalf(ctx, "Failed to sync cluster resources [%+v]", err) + } + logger.Infof(ctx, "ClusterResourceController started successfully") + }, +} + +func init() { + RootCmd.AddCommand(parentClusterResourceCmd) + parentClusterResourceCmd.AddCommand(controllerRunCmd) + parentClusterResourceCmd.AddCommand(controllerSyncCmd) +} diff --git a/cmd/entrypoints/migrate.go b/cmd/entrypoints/migrate.go new file mode 100644 index 000000000..b081d5a3c --- /dev/null +++ b/cmd/entrypoints/migrate.go @@ -0,0 +1,133 @@ +package entrypoints + +import ( + "context" + + "github.com/lyft/flyteadmin/pkg/runtime" + + "github.com/lyft/flytestdlib/promutils" + + "github.com/lyft/flytestdlib/logger" + + "github.com/jinzhu/gorm" + _ "github.com/jinzhu/gorm/dialects/postgres" // Required to import database driver. + "github.com/lyft/flyteadmin/pkg/repositories/config" + "github.com/spf13/cobra" + gormigrate "gopkg.in/gormigrate.v1" +) + +var parentMigrateCmd = &cobra.Command{ + Use: "migrate", + Short: "This command controls migration behavior for the Flyte admin database. Please choose a subcommand.", +} + +var migrationsScope = promutils.NewScope("migrations") +var migrateScope = migrationsScope.NewSubScope("migrate") +var rollbackScope = promutils.NewScope("migrations").NewSubScope("rollback") + +// This runs all the migrations +var migrateCmd = &cobra.Command{ + Use: "run", + Short: "This command will run all the migrations for the database", + Run: func(cmd *cobra.Command, args []string) { + ctx := context.Background() + configuration := runtime.NewConfigurationProvider() + databaseConfig := configuration.ApplicationConfiguration().GetDbConfig() + postgresConfigProvider := config.NewPostgresConfigProvider(config.DbConfig{ + Host: databaseConfig.Host, + Port: databaseConfig.Port, + DbName: databaseConfig.DbName, + User: databaseConfig.User, + Password: databaseConfig.Password, + ExtraOptions: databaseConfig.ExtraOptions, + }, migrateScope) + db, err := gorm.Open(postgresConfigProvider.GetType(), postgresConfigProvider.GetArgs()) + if err != nil { + logger.Fatal(ctx, err) + } + defer db.Close() + db.LogMode(true) + if err = db.DB().Ping(); err != nil { + logger.Fatal(ctx, err) + } + + m := gormigrate.New(db, gormigrate.DefaultOptions, config.Migrations) + if err = m.Migrate(); err != nil { + logger.Fatalf(ctx, "Could not migrate: %v", err) + } + logger.Infof(ctx, "Migration ran successfully") + }, +} + +// Rollback the latest migration +var rollbackCmd = &cobra.Command{ + Use: "rollback", + Short: "This command will rollback one migration", + Run: func(cmd *cobra.Command, args []string) { + ctx := context.Background() + configuration := runtime.NewConfigurationProvider() + databaseConfig := configuration.ApplicationConfiguration().GetDbConfig() + postgresConfigProvider := config.NewPostgresConfigProvider(config.DbConfig{ + Host: databaseConfig.Host, + Port: databaseConfig.Port, + DbName: databaseConfig.DbName, + User: databaseConfig.User, + Password: databaseConfig.Password, + ExtraOptions: databaseConfig.ExtraOptions, + }, rollbackScope) + + db, err := gorm.Open(postgresConfigProvider.GetType(), postgresConfigProvider.GetArgs()) + if err != nil { + logger.Fatal(ctx, err) + } + defer db.Close() + db.LogMode(true) + if err = db.DB().Ping(); err != nil { + logger.Fatal(ctx, err) + } + + m := gormigrate.New(db, gormigrate.DefaultOptions, config.Migrations) + err = m.RollbackLast() + if err != nil { + logger.Fatalf(ctx, "Could not rollback latest migration: %v", err) + } + logger.Infof(ctx, "Rolled back one migration successfully") + }, +} + +// This seeds the database with project values +var seedProjectsCmd = &cobra.Command{ + Use: "seed-projects", + Short: "Seed projects in the database.", + Run: func(cmd *cobra.Command, args []string) { + ctx := context.Background() + configuration := runtime.NewConfigurationProvider() + databaseConfig := configuration.ApplicationConfiguration().GetDbConfig() + postgresConfigProvider := config.NewPostgresConfigProvider(config.DbConfig{ + Host: databaseConfig.Host, + Port: databaseConfig.Port, + DbName: databaseConfig.DbName, + User: databaseConfig.User, + Password: databaseConfig.Password, + ExtraOptions: databaseConfig.ExtraOptions, + }, migrateScope) + db, err := gorm.Open(postgresConfigProvider.GetType(), postgresConfigProvider.GetArgs()) + if err != nil { + logger.Fatal(ctx, err) + } + defer db.Close() + db.LogMode(true) + + if err = config.SeedProjects(db, args); err != nil { + logger.Fatalf(ctx, "Could not add projects to database with err: %v", err) + } + logger.Infof(ctx, "Successfully added projects to database") + }, +} + +func init() { + RootCmd.AddCommand(parentMigrateCmd) + parentMigrateCmd.AddCommand(migrateCmd) + parentMigrateCmd.AddCommand(rollbackCmd) + parentMigrateCmd.AddCommand(seedProjectsCmd) +} diff --git a/cmd/entrypoints/root.go b/cmd/entrypoints/root.go new file mode 100644 index 000000000..f2f8c6f1f --- /dev/null +++ b/cmd/entrypoints/root.go @@ -0,0 +1,83 @@ +package entrypoints + +import ( + "context" + "flag" + "fmt" + "os" + + "github.com/lyft/flytestdlib/config" + "github.com/lyft/flytestdlib/config/viper" + "github.com/spf13/cobra" + "github.com/spf13/pflag" +) + +var ( + cfgFile string + kubeMasterURL string + configAccessor = viper.NewAccessor(config.Options{}) +) + +// RootCmd represents the base command when called without any subcommands +var RootCmd = &cobra.Command{ + Use: "flyteadmin", + Short: "Fill in later", + Long: ` +To get started run the serve subcommand which will start a server on localhost:8088: + + flyteadmin serve + +Then you can hit it with the client: + + flyteadmin adminservice foo bar baz + +Or over HTTP 1.1 with curl: + curl -X POST http://localhost:8088/api/v1/projects' +`, + PersistentPreRunE: func(cmd *cobra.Command, args []string) error { + return initConfig(cmd.Flags()) + }, +} + +// Execute adds all child commands to the root command sets flags appropriately. +// This is called by main.main(). It only needs to happen once to the rootCmd. +func Execute() error { + if err := RootCmd.Execute(); err != nil { + fmt.Println(err) + return err + } + return nil +} + +func init() { + // allows `$ flyteadmin --logtostderr` to work + pflag.CommandLine.AddGoFlagSet(flag.CommandLine) + + // Add persistent flags - persistent flags persist through all sub-commands + RootCmd.PersistentFlags().StringVar(&cfgFile, "config", "", "config file (default is ./flyteadmin_config.yaml)") + RootCmd.PersistentFlags().StringVar(&kubeMasterURL, "master", "", "The address of the Kubernetes API server. Overrides any value in kubeconfig. Only required if out-of-cluster.") + + RootCmd.AddCommand(viper.GetConfigCommand()) + + // Allow viper to read the value of the flags + configAccessor.InitializePflags(RootCmd.PersistentFlags()) + + err := flag.CommandLine.Parse([]string{}) + if err != nil { + fmt.Println(err) + os.Exit(-1) + } +} + +func initConfig(flags *pflag.FlagSet) error { + configAccessor = viper.NewAccessor(config.Options{ + SearchPaths: []string{cfgFile, ".", "/etc/flyte/config", "$GOPATH/src/github.com/lyft/flyteadmin"}, + StrictMode: false, + }) + + fmt.Println("Using config file: ", configAccessor.ConfigFilesUsed()) + + configAccessor.InitializePflags(flags) + + return configAccessor.UpdateConfig(context.TODO()) +} diff --git a/cmd/entrypoints/serve.go b/cmd/entrypoints/serve.go new file mode 100644 index 000000000..3c76d6bed --- /dev/null +++ b/cmd/entrypoints/serve.go @@ -0,0 +1,219 @@ +package entrypoints + +import ( + "context" + "crypto/tls" + "crypto/x509" + "fmt" + "io/ioutil" + "net" + "net/http" + _ "net/http/pprof" // Required to serve application. + "strings" + + "github.com/pkg/errors" + "google.golang.org/grpc/credentials" + + "github.com/lyft/flyteadmin/pkg/common" + + "github.com/lyft/flytestdlib/logger" + + "github.com/grpc-ecosystem/grpc-gateway/runtime" + flyteService "github.com/lyft/flyteidl/gen/pb-go/flyteidl/service" + + "github.com/lyft/flyteadmin/pkg/config" + "github.com/lyft/flyteadmin/pkg/rpc/adminservice" + + "github.com/spf13/cobra" + + grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus" + "github.com/lyft/flytestdlib/contextutils" + "github.com/lyft/flytestdlib/promutils/labeled" + "google.golang.org/grpc" +) + +type ServingOptions struct { + Secure bool + // Optional Arguments, Should be provided to enable secure mode + CertFile string + KeyFile string +} + +var serviceOpts = ServingOptions{} + +// serveCmd represents the serve command +var serveCmd = &cobra.Command{ + Use: "serve", + Short: "Launches the Flyte admin server", + RunE: func(cmd *cobra.Command, args []string) error { + ctx := context.Background() + cfg := config.GetConfig() + if serviceOpts.Secure { + return serveGatewaySecure(ctx, cfg, serviceOpts) + } + return serveGatewayInsecure(ctx, cfg) + }, +} + +func init() { + // Command information + RootCmd.AddCommand(serveCmd) + serveCmd.Flags().BoolVarP(&serviceOpts.Secure, "secure", "s", false, "Use ssl") + serveCmd.Flags().StringVarP(&serviceOpts.CertFile, "cert-file", "c", "", "Path of file that contains x509 certificate") + serveCmd.Flags().StringVarP(&serviceOpts.KeyFile, "key-file", "k", "", "Path of file that contains x509 client key") + + // Set Keys + labeled.SetMetricKeys(contextutils.AppNameKey, contextutils.ProjectKey, contextutils.DomainKey, + contextutils.ExecIDKey, contextutils.WorkflowIDKey, contextutils.NodeIDKey, contextutils.TaskIDKey, + contextutils.TaskTypeKey, common.RuntimeTypeKey, common.RuntimeVersionKey) +} + +// Creates a new gRPC Server with all the configuration +func newGRPCServer(_ context.Context, cfg *config.Config, opts ...grpc.ServerOption) (*grpc.Server, error) { + serverOpts := []grpc.ServerOption{ + grpc.StreamInterceptor(grpc_prometheus.StreamServerInterceptor), + grpc.UnaryInterceptor(grpc_prometheus.UnaryServerInterceptor), + } + serverOpts = append(serverOpts, opts...) + grpcServer := grpc.NewServer(serverOpts...) + grpc_prometheus.Register(grpcServer) + flyteService.RegisterAdminServiceServer(grpcServer, adminservice.NewAdminServer(cfg.KubeConfig, cfg.Master)) + return grpcServer, nil +} + +func newHTTPServer(ctx context.Context, cfg *config.Config, grpcConnectionOpts []grpc.DialOption, grpcAddress string) (*http.ServeMux, error) { + // Register the server that will serve HTTP/REST Traffic + mux := http.NewServeMux() + + // Register healthcheck + mux.HandleFunc("/healthcheck", func(w http.ResponseWriter, r *http.Request) { + // A very simple health check. + w.WriteHeader(http.StatusOK) + }) + + // Register OpenAPI endpoint + // This endpoint will serve the OpenAPI2 spec generated by the swagger protoc plugin, and bundled by go-bindata + mux.HandleFunc("/api/v1/openapi", func(w http.ResponseWriter, r *http.Request) { + swaggerBytes, err := flyteService.Asset("admin.swagger.json") + if err != nil { + logger.Warningf(ctx, "Err %v", err) + w.WriteHeader(http.StatusFailedDependency) + } else { + w.WriteHeader(http.StatusOK) + _, err := w.Write(swaggerBytes) + if err != nil { + logger.Errorf(ctx, "failed to write openAPI information, error: %s", err.Error()) + } + } + }) + + // Register the actual Server that will service gRPC traffic + gwmux := runtime.NewServeMux(runtime.WithMarshalerOption("application/octet-stream", &runtime.ProtoMarshaller{})) + err := flyteService.RegisterAdminServiceHandlerFromEndpoint(ctx, gwmux, grpcAddress, grpcConnectionOpts) + if err != nil { + return nil, errors.Wrap(err, "error registering admin service") + } + + mux.Handle("/", gwmux) + + return mux, nil +} + +func serveGatewayInsecure(ctx context.Context, cfg *config.Config) error { + logger.Infof(ctx, "Serving FlyteAdmin Insecure") + grpcServer, err := newGRPCServer(ctx, cfg) + if err != nil { + return errors.Wrap(err, "failed to create GRPC server") + } + + logger.Infof(ctx, "Serving GRPC Traffic on: %s", cfg.GetGrpcHostAddress()) + lis, err := net.Listen("tcp", cfg.GetGrpcHostAddress()) + if err != nil { + return errors.Wrapf(err, "failed to listen on GRPC port: %s", cfg.GetGrpcHostAddress()) + } + + go func() { + err := grpcServer.Serve(lis) + logger.Fatalf(ctx, "Failed to create GRPC Server, Err: ", err) + }() + + logger.Infof(ctx, "Starting HTTP/1 Gateway server on %s", cfg.GetHostAddress()) + httpServer, err := newHTTPServer(ctx, cfg, []grpc.DialOption{grpc.WithInsecure()}, cfg.GetGrpcHostAddress()) + if err != nil { + return err + } + err = http.ListenAndServe(cfg.GetHostAddress(), httpServer) + if err != nil { + return errors.Wrapf(err, "failed to Start HTTP Server") + } + + return nil +} + +// grpcHandlerFunc returns an http.Handler that delegates to grpcServer on incoming gRPC +// connections or otherHandler otherwise. +// See https://github.com/philips/grpc-gateway-example/blob/master/cmd/serve.go for reference +func grpcHandlerFunc(grpcServer *grpc.Server, otherHandler http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // This is a partial recreation of gRPC's internal checks + if r.ProtoMajor == 2 && strings.Contains(r.Header.Get("Content-Type"), "application/grpc") { + logger.Infof(context.TODO(), "Received GRPC request, %s, %s, %s, %d, %d", r.RequestURI, r.Header, r.Method, r.ProtoMajor, r.ProtoMinor) + grpcServer.ServeHTTP(w, r) + } else { + logger.Infof(context.TODO(), "Received regular request - %s, %s, %s, %d, %d", r.RequestURI, r.Header, r.Method, r.ProtoMajor, r.ProtoMinor) + otherHandler.ServeHTTP(w, r) + } + }) +} + +func serveGatewaySecure(ctx context.Context, cfg *config.Config, opts ServingOptions) error { + // This support single cert right now? + var err error + cert, err := tls.LoadX509KeyPair(opts.CertFile, opts.KeyFile) + if err != nil { + return err + } + certPool := x509.NewCertPool() + data, err := ioutil.ReadFile(opts.CertFile) + if err != nil { + return errors.Wrapf(err, "failed to read server cert file: %s", opts.CertFile) + } + if ok := certPool.AppendCertsFromPEM([]byte(data)); !ok { + return fmt.Errorf("failed to load certificate into the pool") + } + + grpcServer, err := newGRPCServer(ctx, cfg, grpc.Creds(credentials.NewClientTLSFromCert(certPool, cfg.GetHostAddress()))) + if err != nil { + return errors.Wrap(err, "failed to create GRPC server") + } + + dialCreds := credentials.NewTLS(&tls.Config{ + ServerName: cfg.GetHostAddress(), + RootCAs: certPool, + }) + httpServer, err := newHTTPServer(ctx, cfg, []grpc.DialOption{grpc.WithTransportCredentials(dialCreds)}, cfg.GetHostAddress()) + if err != nil { + return err + } + + conn, err := net.Listen("tcp", cfg.GetHostAddress()) + if err != nil { + panic(err) + } + + srv := &http.Server{ + Addr: cfg.GetHostAddress(), + Handler: grpcHandlerFunc(grpcServer, httpServer), + TLSConfig: &tls.Config{ + Certificates: []tls.Certificate{cert}, + NextProtos: []string{"h2"}, + }, + } + + err = srv.Serve(tls.NewListener(conn, srv.TLSConfig)) + + if err != nil { + return errors.Wrapf(err, "failed to Start HTTP/2 Server") + } + return nil +} diff --git a/cmd/main.go b/cmd/main.go new file mode 100644 index 000000000..a9c0708d9 --- /dev/null +++ b/cmd/main.go @@ -0,0 +1,14 @@ +package main + +import ( + "github.com/golang/glog" + "github.com/lyft/flyteadmin/cmd/entrypoints" +) + +func main() { + glog.V(2).Info("Beginning Flyte Controller") + err := entrypoints.Execute() + if err != nil { + panic(err) + } +} diff --git a/flyteadmin_config.yaml b/flyteadmin_config.yaml new file mode 100644 index 000000000..377126c61 --- /dev/null +++ b/flyteadmin_config.yaml @@ -0,0 +1,141 @@ +# This is a sample configuration file. +# Real configuration when running inside K8s (local or otherwise) lives in a ConfigMap +# Look in the artifacts directory in the flyte repo for what's actually run +# https://github.com/lyft/flyte/blob/b47565c9998cde32b0b5f995981e3f3c990fa7cd/artifacts/flyteadmin.yaml#L72 +application: + httpPort: 8088 + grpcPort: 8089 +flyteadmin: + someBoolean: true + someOtherBoolean: false + runScheduler: false + roleNameKey: "iam.amazonaws.com/role" + metricsScope: "flyte:" + profilerPort: 10254 + testing: + host: "http://localhost:8088" + # This last must be in order! For example, a file path would be prefixed with metadata/admin/... + metadataStoragePrefix: + - "metadata" + - "admin" +database: + port: 5432 + username: postgres + host: localhost + dbname: postgres + options: "sslmode=disable" +scheduler: + eventScheduler: + scheme: local + region: "my-region" + scheduleRole: "arn:aws:iam::abc123:role/my-iam-role" + targetName: "arn:aws:sqs:my-region:abc123:my-queue" + workflowExecutor: + scheme: local + region: "my-region" + scheduleQueueName: "won't-work-locally" + accountId: "abc123" +remoteData: + region: "my-region" + scheme: local + signedUrls: + durationMinutes: 3 +notifications: + type: local + region: "my-region" + publisher: + topicName: "foo" + processor: + queueName: "queue" + accountId: "bar" + emailer: + subject: "Notice: Execution \"{{ name }}\" has {{ phase }} in \"{{ domain }}\"." + sender: "flyte-notifications@example.com" + body: > + Execution \"{{ name }}\" has {{ phase }} in \"{{ domain }}\". View details at + + http://example.com/projects/{{ project }}/domains/{{ domain }}/executions/{{ name }}. {{ error }} +Logger: + show-source: true + level: 5 +storage: + type: minio + connection: + access-key: minio + auth-type: accesskey + secret-key: miniostorage + disable-ssl: true + endpoint: "http://localhost:9000" + region: my-region + cache: + max_size_mbs: 10 + target_gc_percent: 100 + container: "flyte" +queues: + executionQueues: + - primary: "gpu_primary" + dynamic: "gpu_dynamic" + attributes: + - gpu + - primary: "critical" + dynamic: "critical" + attributes: + - critical + - primary: "default" + dynamic: "default" + attributes: + - defaultclusters + - primary: "my_queue_1" + domain: "production" + workflowName: "my_workflow_1" + tags: + - critical + - primary: "my_queue_1" + domain: "production" + workflowName: "my_workflow_2" + tags: + - gpu + - primary: "my_queue_3" + domain: "production" + workflowName: "my_workflow_3" + tags: + - critical + - tags: + - default +task_resources: + defaults: + cpu: 100m + gpu: 20m + memory: 1Mi + storage: 10M + limits: + cpu: 500m + gpu: 100m + memory: 1Mi + storage: 10G +task_type_whitelist: + sparkonk8s: + - project: my_queue_1 + domain: production + - project: my_queue_2 + domain: production + qubolespark: + - project: my_queue_2 +domains: + - id: development + name: development + - id: staging + name: staging + - id: production + name: production + - id: domain + name: domain +cluster_resources: + templatePath: pkg/clusterresource/sampletemplates + templateData: + foo: + value: "bar" + foofoo: + valueFrom: + env: SHELL + refresh: 3s diff --git a/pkg/async/notifications/email.go b/pkg/async/notifications/email.go new file mode 100644 index 000000000..ed888d381 --- /dev/null +++ b/pkg/async/notifications/email.go @@ -0,0 +1,54 @@ +package notifications + +import ( + "fmt" + + "strings" + + "github.com/lyft/flyteadmin/pkg/repositories/models" + runtimeInterfaces "github.com/lyft/flyteadmin/pkg/runtime/interfaces" + "github.com/lyft/flyteidl/gen/pb-go/flyteidl/admin" +) + +const executionError = " The execution failed with error: [%s]." + +const substitutionParam = "{{ %s }}" +const project = "project" +const domain = "domain" +const name = "name" +const phase = "phase" +const errorPlaceholder = "error" +const replaceAllInstances = -1 + +func substituteEmailParameters(message string, request admin.WorkflowExecutionEventRequest, execution models.Execution) string { + response := strings.Replace(message, fmt.Sprintf(substitutionParam, project), execution.Project, replaceAllInstances) + response = strings.Replace(response, fmt.Sprintf(substitutionParam, domain), execution.Domain, replaceAllInstances) + response = strings.Replace(response, fmt.Sprintf(substitutionParam, name), execution.Name, replaceAllInstances) + response = strings.Replace(response, fmt.Sprintf(substitutionParam, phase), + strings.ToLower(request.Event.Phase.String()), replaceAllInstances) + if request.Event.GetError() != nil { + response = strings.Replace(response, fmt.Sprintf(substitutionParam, errorPlaceholder), + fmt.Sprintf(executionError, request.Event.GetError().Message), replaceAllInstances) + } else { + // Replace the optional error placeholder with an empty string. + response = strings.Replace(response, fmt.Sprintf(substitutionParam, errorPlaceholder), "", replaceAllInstances) + } + + return response +} + +// Converts a terminal execution event and existing execution model to an admin.EmailMessage proto, substituting parameters +// in customizable email fields set in the flyteadmin application notifications config. +func ToEmailMessageFromWorkflowExecutionEvent( + config runtimeInterfaces.NotificationsConfig, + emailNotification admin.EmailNotification, + request admin.WorkflowExecutionEventRequest, + execution models.Execution) *admin.EmailMessage { + + return &admin.EmailMessage{ + SubjectLine: substituteEmailParameters(config.NotificationsEmailerConfig.Subject, request, execution), + SenderEmail: config.NotificationsEmailerConfig.Sender, + RecipientsEmail: emailNotification.GetRecipientsEmail(), + Body: substituteEmailParameters(config.NotificationsEmailerConfig.Body, request, execution), + } +} diff --git a/pkg/async/notifications/email_test.go b/pkg/async/notifications/email_test.go new file mode 100644 index 000000000..1c301edbd --- /dev/null +++ b/pkg/async/notifications/email_test.go @@ -0,0 +1,79 @@ +package notifications + +import ( + "fmt" + "testing" + + "github.com/gogo/protobuf/proto" + "github.com/lyft/flyteadmin/pkg/repositories/models" + runtimeInterfaces "github.com/lyft/flyteadmin/pkg/runtime/interfaces" + "github.com/lyft/flyteidl/gen/pb-go/flyteidl/admin" + "github.com/lyft/flyteidl/gen/pb-go/flyteidl/core" + "github.com/lyft/flyteidl/gen/pb-go/flyteidl/event" + "github.com/stretchr/testify/assert" +) + +func TestSubstituteEmailParameters(t *testing.T) { + message := "{{ unused }}. {{project }} and {{ domain }} and {{ name }} ended up in {{ phase }}.{{ error }}" + request := admin.WorkflowExecutionEventRequest{ + Event: &event.WorkflowExecutionEvent{ + Phase: core.WorkflowExecution_SUCCEEDED, + }, + } + model := models.Execution{ + ExecutionKey: models.ExecutionKey{ + Project: "proj", + Domain: "prod", + Name: "e124", + }, + } + assert.Equal(t, "{{ unused }}. {{project }} and prod and e124 ended up in succeeded.", + substituteEmailParameters(message, request, model)) + request.Event.OutputResult = &event.WorkflowExecutionEvent_Error{ + Error: &core.ExecutionError{ + Message: "uh-oh", + }, + } + assert.Equal(t, "{{ unused }}. {{project }} and prod and e124 ended up in succeeded. The execution failed with error: [uh-oh].", + substituteEmailParameters(message, request, model)) +} + +func TestToEmailMessageFromWorkflowExecutionEvent(t *testing.T) { + notificationsConfig := runtimeInterfaces.NotificationsConfig{ + NotificationsEmailerConfig: runtimeInterfaces.NotificationsEmailerConfig{ + Body: "Execution \"{{ name }}\" has succeeded in \"{{ domain }}\". View details at " + + "" + + "https://example.com/executions/{{ project }}/{{ domain }}/{{ name }}.", + Sender: "no-reply@example.com", + Subject: "Notice: Execution \"{{ name }}\" has succeeded in \"{{ domain }}\".", + }, + } + emailNotification := admin.EmailNotification{ + RecipientsEmail: []string{ + "a@example.com", "b@example.org", + }, + } + request := admin.WorkflowExecutionEventRequest{ + Event: &event.WorkflowExecutionEvent{ + Phase: core.WorkflowExecution_ABORTED, + }, + } + model := models.Execution{ + ExecutionKey: models.ExecutionKey{ + Project: "proj", + Domain: "prod", + Name: "e124", + }, + } + emailMessage := ToEmailMessageFromWorkflowExecutionEvent(notificationsConfig, emailNotification, request, model) + assert.True(t, proto.Equal(emailMessage, &admin.EmailMessage{ + RecipientsEmail: []string{ + "a@example.com", "b@example.org", + }, + SenderEmail: "no-reply@example.com", + SubjectLine: "Notice: Execution \"e124\" has succeeded in \"prod\".", + Body: "Execution \"e124\" has succeeded in \"prod\". View details at " + + "" + + "https://example.com/executions/proj/prod/e124.", + }), fmt.Sprintf("%+v", emailMessage)) +} diff --git a/pkg/async/notifications/factory.go b/pkg/async/notifications/factory.go new file mode 100644 index 000000000..72e78cf1d --- /dev/null +++ b/pkg/async/notifications/factory.go @@ -0,0 +1,111 @@ +package notifications + +import ( + "context" + + "github.com/lyft/flyteadmin/pkg/async/notifications/implementations" + "github.com/lyft/flyteadmin/pkg/async/notifications/interfaces" + runtimeInterfaces "github.com/lyft/flyteadmin/pkg/runtime/interfaces" + "github.com/lyft/flytestdlib/logger" + + "github.com/NYTimes/gizmo/pubsub" + gizmoConfig "github.com/NYTimes/gizmo/pubsub/aws" + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/ses" + + "github.com/lyft/flyteadmin/pkg/common" + "github.com/lyft/flytestdlib/promutils" +) + +const maxRetries = 3 + +var enable64decoding = false + +type PublisherConfig struct { + TopicName string +} + +type ProcessorConfig struct { + QueueName string + AccountID string +} + +type EmailerConfig struct { + SenderEmail string + BaseURL string +} + +func GetEmailer(config runtimeInterfaces.NotificationsConfig, scope promutils.Scope) interfaces.Emailer { + switch config.Type { + case common.AWS: + awsConfig := aws.NewConfig().WithRegion(config.Region).WithMaxRetries(maxRetries) + awsSession, err := session.NewSession(awsConfig) + if err != nil { + panic(err) + } + sesClient := ses.New(awsSession) + return implementations.NewAwsEmailer( + config, + scope, + sesClient, + ) + case common.Local: + fallthrough + default: + logger.Infof(context.Background(), "Using default noop emailer implementation for config type [%s]", config.Type) + return implementations.NewNoopEmail() + } +} + +func NewNotificationsProcessor(config runtimeInterfaces.NotificationsConfig, scope promutils.Scope) interfaces.Processor { + var sub pubsub.Subscriber + var emailer interfaces.Emailer + switch config.Type { + case common.AWS: + sqsConfig := gizmoConfig.SQSConfig{ + QueueName: config.NotificationsProcessorConfig.QueueName, + QueueOwnerAccountID: config.NotificationsProcessorConfig.AccountID, + // The AWS configuration type uses SNS to SQS for notifications. + // Gizmo by default will decode the SQS message using Base64 decoding. + // However, the message body of SQS is the SNS message format which isn't Base64 encoded. + ConsumeBase64: &enable64decoding, + } + sqsConfig.Region = config.Region + process, err := gizmoConfig.NewSubscriber(sqsConfig) + if err != nil { + panic(err) + } + sub = process + emailer = GetEmailer(config, scope) + case common.Local: + fallthrough + default: + logger.Infof(context.Background(), + "Using default noop notifications processor implementation for config type [%s]", config.Type) + return implementations.NewNoopProcess() + } + return implementations.NewProcessor(sub, emailer, scope) +} + +func NewNotificationsPublisher(config runtimeInterfaces.NotificationsConfig, scope promutils.Scope) interfaces.Publisher { + switch config.Type { + case common.AWS: + snsConfig := gizmoConfig.SNSConfig{ + Topic: config.NotificationsPublisherConfig.TopicName, + } + snsConfig.Region = config.Region + publisher, err := gizmoConfig.NewPublisher(snsConfig) + // Any errors initiating Publisher with Amazon configurations results in a failed start up. + if err != nil { + panic(err) + } + return implementations.NewPublisher(publisher, scope) + case common.Local: + fallthrough + default: + logger.Infof(context.Background(), + "Using default noop notifications publisher implementation for config type [%s]", config.Type) + return implementations.NewNoopPublish() + } +} diff --git a/pkg/async/notifications/implementations/aws_emailer.go b/pkg/async/notifications/implementations/aws_emailer.go new file mode 100644 index 000000000..516a8aebd --- /dev/null +++ b/pkg/async/notifications/implementations/aws_emailer.go @@ -0,0 +1,86 @@ +package implementations + +import ( + "context" + + "github.com/aws/aws-sdk-go/service/ses" + "github.com/aws/aws-sdk-go/service/ses/sesiface" + "github.com/lyft/flyteadmin/pkg/async/notifications/interfaces" + "github.com/lyft/flyteadmin/pkg/errors" + runtimeInterfaces "github.com/lyft/flyteadmin/pkg/runtime/interfaces" + "github.com/lyft/flyteidl/gen/pb-go/flyteidl/admin" + "github.com/lyft/flytestdlib/logger" + "github.com/lyft/flytestdlib/promutils" + "github.com/prometheus/client_golang/prometheus" + "google.golang.org/grpc/codes" +) + +type emailMetrics struct { + Scope promutils.Scope + SendSuccess prometheus.Counter + SendError prometheus.Counter + SendTotal prometheus.Counter +} + +func newEmailMetrics(scope promutils.Scope) emailMetrics { + return emailMetrics{ + Scope: scope, + SendSuccess: scope.MustNewCounter("send_success", "Number of successful emails sent via Emailer."), + SendError: scope.MustNewCounter("send_error", "Number of errors when sending email via Emailer"), + SendTotal: scope.MustNewCounter("send_total", "Total number of emails attempted to be sent"), + } +} + +type AwsEmailer struct { + config runtimeInterfaces.NotificationsConfig + systemMetrics emailMetrics + awsEmail sesiface.SESAPI +} + +func (e *AwsEmailer) SendEmail(ctx context.Context, email admin.EmailMessage) error { + var toAddress []*string + for _, toEmail := range email.RecipientsEmail { + toAddress = append(toAddress, &toEmail) + } + + emailInput := ses.SendEmailInput{ + Destination: &ses.Destination{ + ToAddresses: toAddress, + }, + // Currently use the senderEmail specified apart of the Emailer instead of the body. + // Once a more generic way of setting the emailNotification is defined, remove this + // workaround and defer back to email.SenderEmail + Source: &email.SenderEmail, + Message: &ses.Message{ + Body: &ses.Body{ + Html: &ses.Content{ + Data: &email.Body, + }, + }, + Subject: &ses.Content{ + Data: &email.SubjectLine, + }, + }, + } + + _, err := e.awsEmail.SendEmail(&emailInput) + e.systemMetrics.SendTotal.Inc() + + if err != nil { + // TODO: If we see a certain set of AWS errors consistently, we can break the errors down based on type. + logger.Errorf(ctx, "error in sending email [%s] via ses mailer with err: %s", email.String(), err) + e.systemMetrics.SendError.Inc() + return errors.NewFlyteAdminErrorf(codes.Internal, "errors were seen while sending emails") + } + + e.systemMetrics.SendSuccess.Inc() + return nil +} + +func NewAwsEmailer(config runtimeInterfaces.NotificationsConfig, scope promutils.Scope, awsEmail sesiface.SESAPI) interfaces.Emailer { + return &AwsEmailer{ + config: config, + systemMetrics: newEmailMetrics(scope.NewSubScope("aws_ses")), + awsEmail: awsEmail, + } +} diff --git a/pkg/async/notifications/implementations/aws_emailer_test.go b/pkg/async/notifications/implementations/aws_emailer_test.go new file mode 100644 index 000000000..c72e35603 --- /dev/null +++ b/pkg/async/notifications/implementations/aws_emailer_test.go @@ -0,0 +1,121 @@ +package implementations + +import ( + "testing" + + "context" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/ses" + "github.com/aws/aws-sdk-go/service/ses/sesiface" + "github.com/lyft/flyteadmin/pkg/async/notifications/mocks" + runtimeInterfaces "github.com/lyft/flyteadmin/pkg/runtime/interfaces" + "github.com/lyft/flyteidl/gen/pb-go/flyteidl/admin" + "github.com/lyft/flytestdlib/promutils" + "github.com/pkg/errors" + "github.com/stretchr/testify/assert" +) + +func getNotificationsConfig() runtimeInterfaces.NotificationsConfig { + return runtimeInterfaces.NotificationsConfig{ + NotificationsEmailerConfig: runtimeInterfaces.NotificationsEmailerConfig{ + Body: "Execution \"{{ name }}\" has succeeded in \"{{ domain }}\". View details at " + + "" + + "https://example.com/executions/{{ project }}/{{ domain }}/{{ name }}.", + Sender: "no-reply@example.com", + Subject: "Notice: Execution \"{{ name }}\" has succeeded in \"{{ domain }}\".", + }, + } +} + +func TestAwsEmailer_SendEmail(t *testing.T) { + mockAwsEmail := mocks.SESClient{} + var awsSES sesiface.SESAPI = &mockAwsEmail + expectedSenderEmail := "no-reply@example.com" + emailNotification := admin.EmailMessage{ + SubjectLine: "Notice: Execution \"name\" has succeeded in \"domain\".", + SenderEmail: "no-reply@example.com", + RecipientsEmail: []string{ + "my@example.com", + "john@example.com", + }, + Body: "Execution \"name\" has succeeded in \"domain\". View details at " + + "" + + "https://example.com/executions/T/B/D.", + } + + sendEmailValidationFunc := func(input *ses.SendEmailInput) (*ses.SendEmailOutput, error) { + assert.Equal(t, *input.Source, expectedSenderEmail) + assert.Equal(t, *input.Message.Body.Html.Data, emailNotification.Body) + assert.Equal(t, *input.Message.Subject.Data, emailNotification.SubjectLine) + for _, toEmail := range input.Destination.ToAddresses { + var foundEmail = false + for _, verifyToEmail := range emailNotification.RecipientsEmail { + if *toEmail == verifyToEmail { + foundEmail = true + } + } + assert.Truef(t, foundEmail, "To Email address [%s] wasn't apart of original inputs.", *toEmail) + } + assert.Equal(t, len(input.Destination.ToAddresses), len(emailNotification.RecipientsEmail)) + return &ses.SendEmailOutput{}, nil + } + mockAwsEmail.SetSendEmailFunc(sendEmailValidationFunc) + testEmail := NewAwsEmailer(getNotificationsConfig(), promutils.NewTestScope(), awsSES) + + assert.Nil(t, testEmail.SendEmail(context.Background(), emailNotification)) +} + +func TestAwsEmailer_SendEmailError(t *testing.T) { + mockAwsEmail := mocks.SESClient{} + var awsSES sesiface.SESAPI + emailError := errors.New("error sending email") + sendEmailErrorFunc := func(input *ses.SendEmailInput) (*ses.SendEmailOutput, error) { + return nil, emailError + } + mockAwsEmail.SetSendEmailFunc(sendEmailErrorFunc) + awsSES = &mockAwsEmail + + testEmail := NewAwsEmailer(getNotificationsConfig(), promutils.NewTestScope(), awsSES) + + emailNotification := admin.EmailMessage{ + SubjectLine: "Notice: Execution \"name\" has succeeded in \"domain\".", + SenderEmail: "no-reply@example.com", + RecipientsEmail: []string{ + "my@example.com", + "john@example.com", + }, + Body: "Execution \"name\" has succeeded in \"domain\". View details at " + + "" + + "https://example.com/executions/T/B/D.", + } + assert.EqualError(t, testEmail.SendEmail(context.Background(), emailNotification), "errors were seen while sending emails") +} + +func TestAwsEmailer_SendEmailEmailOutput(t *testing.T) { + mockAwsEmail := mocks.SESClient{} + var awsSES sesiface.SESAPI + emailOutput := ses.SendEmailOutput{ + MessageId: aws.String("1234"), + } + sendEmailErrorFunc := func(input *ses.SendEmailInput) (*ses.SendEmailOutput, error) { + return &emailOutput, nil + } + mockAwsEmail.SetSendEmailFunc(sendEmailErrorFunc) + awsSES = &mockAwsEmail + + testEmail := NewAwsEmailer(getNotificationsConfig(), promutils.NewTestScope(), awsSES) + + emailNotification := admin.EmailMessage{ + SubjectLine: "Notice: Execution \"name\" has succeeded in \"domain\".", + SenderEmail: "no-reply@example.com", + RecipientsEmail: []string{ + "my@example.com", + "john@example.com", + }, + Body: "Execution \"name\" has succeeded in \"domain\". View details at " + + "" + + "https://example.com/executions/T/B/D.", + } + assert.Nil(t, testEmail.SendEmail(context.Background(), emailNotification)) +} diff --git a/pkg/async/notifications/implementations/noop_notifications.go b/pkg/async/notifications/implementations/noop_notifications.go new file mode 100644 index 000000000..f9a0e1595 --- /dev/null +++ b/pkg/async/notifications/implementations/noop_notifications.go @@ -0,0 +1,54 @@ +package implementations + +import ( + "context" + + "github.com/lyft/flyteadmin/pkg/async/notifications/interfaces" + + "github.com/lyft/flyteidl/gen/pb-go/flyteidl/admin" + "github.com/lyft/flytestdlib/logger" + + "strings" + + "github.com/golang/protobuf/proto" +) + +// Email to use when there is no email configuration. +type NoopEmail struct{} + +func (n *NoopEmail) SendEmail(ctx context.Context, email admin.EmailMessage) error { + logger.Debugf(ctx, "received noop SendEmail request with subject [%s] and recipient [%s]", + email.SubjectLine, strings.Join(email.RecipientsEmail, ",")) + return nil +} + +func NewNoopEmail() interfaces.Emailer { + return &NoopEmail{} +} + +type NoopPublish struct{} + +func (n *NoopPublish) Publish(ctx context.Context, notificationType string, msg proto.Message) error { + logger.Debugf(ctx, "call to noop publish with notification type [%s] and proto message [%s]", notificationType, msg.String()) + return nil +} + +func NewNoopPublish() interfaces.Publisher { + return &NoopPublish{} +} + +type NoopProcess struct{} + +func (n *NoopProcess) StartProcessing() error { + logger.Debug(context.Background(), "call to noop start processing.") + return nil +} + +func (n *NoopProcess) StopProcessing() error { + logger.Debug(context.Background(), "call to noop stop processing.") + return nil +} + +func NewNoopProcess() interfaces.Processor { + return &NoopProcess{} +} diff --git a/pkg/async/notifications/implementations/processor.go b/pkg/async/notifications/implementations/processor.go new file mode 100644 index 000000000..a3d5ff9c8 --- /dev/null +++ b/pkg/async/notifications/implementations/processor.go @@ -0,0 +1,160 @@ +package implementations + +import ( + "context" + + "github.com/lyft/flyteadmin/pkg/async/notifications/interfaces" + + "encoding/base64" + "encoding/json" + + "github.com/NYTimes/gizmo/pubsub" + "github.com/golang/protobuf/proto" + "github.com/lyft/flyteidl/gen/pb-go/flyteidl/admin" + "github.com/lyft/flytestdlib/logger" + "github.com/lyft/flytestdlib/promutils" + "github.com/prometheus/client_golang/prometheus" +) + +type processorSystemMetrics struct { + Scope promutils.Scope + MessageTotal prometheus.Counter + MessageDoneError prometheus.Counter + MessageDecodingError prometheus.Counter + MessageDataError prometheus.Counter + MessageProcessorError prometheus.Counter + MessageSuccess prometheus.Counter + ChannelClosedError prometheus.Counter + StopError prometheus.Counter +} + +// TODO: Add a counter that encompasses the publisher stats grouped by project and domain. +type Processor struct { + sub pubsub.Subscriber + email interfaces.Emailer + systemMetrics processorSystemMetrics +} + +// Currently only email is the supported notification because slack and pagerduty both use +// email client to trigger those notifications. +// When Pagerduty and other notifications are supported, a publisher per type should be created. +func (p *Processor) StartProcessing() error { + var emailMessage admin.EmailMessage + var err error + for msg := range p.sub.Start() { + + p.systemMetrics.MessageTotal.Inc() + // Currently this is safe because Gizmo takes a string and casts it to a byte array. + var stringMsg = string(msg.Message()) + // Amazon doesn't provide a struct that can be used to unmarshall into. A generic JSON struct is used in its place. + var snsJSONFormat map[string]interface{} + + // At Lyft, SNS populates SQS. This results in the message body of SQS having the SNS message format. + // The message format is documented here: https://docs.aws.amazon.com/sns/latest/dg/sns-message-and-json-formats.html + // The notification published is stored in the message field after unmarshalling the SQS message. + if err := json.Unmarshal(msg.Message(), &snsJSONFormat); err != nil { + p.systemMetrics.MessageDecodingError.Inc() + logger.Errorf(context.Background(), "failed to unmarshall JSON message [%s] from processor with err: %v", stringMsg, err) + p.markMessageDone(msg) + continue + } + + var value interface{} + var ok bool + var valueString string + + if value, ok = snsJSONFormat["Message"]; !ok { + logger.Errorf(context.Background(), "failed to retrieve message from unmarshalled JSON object [%s]", stringMsg) + p.systemMetrics.MessageDataError.Inc() + p.markMessageDone(msg) + continue + } + + if valueString, ok = value.(string); !ok { + p.systemMetrics.MessageDataError.Inc() + logger.Errorf(context.Background(), "failed to retrieve notification message (in string format) from unmarshalled JSON object for message [%s]", stringMsg) + p.markMessageDone(msg) + continue + } + + // The Publish method for SNS Encodes the notification using Base64 then stringifies it before + // setting that as the message body for SNS. Do the inverse to retrieve the notification. + notificationBytes, err := base64.StdEncoding.DecodeString(valueString) + if err != nil { + logger.Errorf(context.Background(), "failed to Base64 decode from message string [%s] from message [%s] with err: %v", valueString, stringMsg, err) + p.systemMetrics.MessageDecodingError.Inc() + p.markMessageDone(msg) + continue + } + + if err = proto.Unmarshal(notificationBytes, &emailMessage); err != nil { + logger.Debugf(context.Background(), "failed to unmarshal to notification object from decoded string[%s] from message [%s] with err: %v", valueString, stringMsg, err) + p.systemMetrics.MessageDecodingError.Inc() + p.markMessageDone(msg) + continue + } + + if err = p.email.SendEmail(context.Background(), emailMessage); err != nil { + p.systemMetrics.MessageProcessorError.Inc() + logger.Errorf(context.Background(), "Error sending an email message for message [%s] with emailM with err: %v", emailMessage.String(), err) + } else { + p.systemMetrics.MessageSuccess.Inc() + } + + p.markMessageDone(msg) + + } + + // According to https://github.com/NYTimes/gizmo/blob/f2b3deec03175b11cdfb6642245a49722751357f/pubsub/pubsub.go#L36-L39, + // the channel backing the subscriber will just close if there is an error. The call to Err() is needed to identify + // there was an error in the channel or there are no more messages left (resulting in no errors when calling Err()). + if err = p.sub.Err(); err != nil { + p.systemMetrics.ChannelClosedError.Inc() + logger.Warningf(context.Background(), "The stream for the subscriber channel closed with err: %v", err) + } + + // If there are no errors, nil will be returned. + return err +} + +func (p *Processor) markMessageDone(message pubsub.SubscriberMessage) { + if err := message.Done(); err != nil { + p.systemMetrics.MessageDoneError.Inc() + logger.Errorf(context.Background(), "failed to mark message as Done() in processor with err: %v", err) + } +} + +func (p *Processor) StopProcessing() error { + // Note: If the underlying channel is already closed, then Stop() will return an error. + err := p.sub.Stop() + if err != nil { + p.systemMetrics.StopError.Inc() + logger.Errorf(context.Background(), "Failed to stop the subscriber channel gracefully with err: %v", err) + } + return err +} + +func newProcessorSystemMetrics(scope promutils.Scope) processorSystemMetrics { + return processorSystemMetrics{ + Scope: scope, + MessageTotal: scope.MustNewCounter("message_total", "overall count of messages processed"), + MessageDecodingError: scope.MustNewCounter("message_decoding_error", "count of messages with decoding errors"), + MessageDataError: scope.MustNewCounter("message_data_error", "count of message data processing errors experience when preparing the message to be notified."), + MessageDoneError: scope.MustNewCounter("message_done_error", + "count of message errors when marking it as done with underlying processor"), + MessageProcessorError: scope.MustNewCounter("message_processing_error", + "count of errors when interacting with notification processor"), + MessageSuccess: scope.MustNewCounter("message_ok", + "count of messages successfully processed by underlying notification mechanism"), + ChannelClosedError: scope.MustNewCounter("channel_closed_error", "count of channel closing errors"), + StopError: scope.MustNewCounter("stop_error", "count of errors in Stop() method"), + } +} + +func NewProcessor(sub pubsub.Subscriber, emailer interfaces.Emailer, scope promutils.Scope) interfaces.Processor { + return &Processor{ + sub: sub, + email: emailer, + systemMetrics: newProcessorSystemMetrics(scope.NewSubScope("processor")), + } +} diff --git a/pkg/async/notifications/implementations/processor_test.go b/pkg/async/notifications/implementations/processor_test.go new file mode 100644 index 000000000..c714b5480 --- /dev/null +++ b/pkg/async/notifications/implementations/processor_test.go @@ -0,0 +1,140 @@ +package implementations + +import ( + "context" + "errors" + "testing" + + "encoding/base64" + + "github.com/aws/aws-sdk-go/aws" + "github.com/lyft/flyteidl/gen/pb-go/flyteidl/admin" + + "github.com/lyft/flyteadmin/pkg/async/notifications/mocks" + "github.com/stretchr/testify/assert" +) + +var mockEmailer mocks.MockEmailer + +// This method should be invoked before every test to Subscriber. +func initializeProcessor() { + testSubscriber.GivenStopError = nil + testSubscriber.GivenErrError = nil + testSubscriber.FoundError = nil + testSubscriber.ProtoMessages = nil + testSubscriber.JSONMessages = nil +} + +func TestProcessor_StartProcessing(t *testing.T) { + initializeProcessor() + + // Because the message stored in Amazon SQS is a JSON of the SNS output, store the test output in the JSON Messages. + testSubscriber.JSONMessages = append(testSubscriber.JSONMessages, testSubscriberMessage) + + sendEmailValidationFunc := func(ctx context.Context, email admin.EmailMessage) error { + assert.Equal(t, email.Body, testEmail.Body) + assert.Equal(t, email.RecipientsEmail, testEmail.RecipientsEmail) + assert.Equal(t, email.SubjectLine, testEmail.SubjectLine) + assert.Equal(t, email.SenderEmail, testEmail.SenderEmail) + return nil + } + mockEmailer.SetSendEmailFunc(sendEmailValidationFunc) + // TODO Add test for metric inc for number of messages processed. + // Assert 1 message processed and 1 total. + assert.Nil(t, testProcessor.StartProcessing()) +} + +func TestProcessor_StartProcessingNoMessages(t *testing.T) { + initializeProcessor() + // Expect no errors are returned. + assert.Nil(t, testProcessor.StartProcessing()) + // TODO add test for metric inc() for number of messages processed. + // Assert 0 messages processed and 0 total. +} + +func TestProcessor_StartProcessingNoNotificationMessage(t *testing.T) { + var testMessage = map[string]interface{}{ + "Type": "Not a real notification", + "MessageId": "1234", + } + initializeProcessor() + testSubscriber.JSONMessages = append(testSubscriber.JSONMessages, testMessage) + assert.Nil(t, testProcessor.StartProcessing()) + // TODO add test for metric inc() for number of messages processed. + // Assert 1 messages error and 1 total. +} + +func TestProcessor_StartProcessingMessageWrongDataType(t *testing.T) { + var testMessage = map[string]interface{}{ + "Type": "Not a real notification", + "MessageId": "1234", + "Message": 12, + } + initializeProcessor() + testSubscriber.JSONMessages = append(testSubscriber.JSONMessages, testMessage) + assert.Nil(t, testProcessor.StartProcessing()) + // TODO add test for metric inc() for number of messages processed. + // Assert 1 messages error and 1 total. +} + +func TestProcessor_StartProcessingBase64DecodeError(t *testing.T) { + var testMessage = map[string]interface{}{ + "Type": "Not a real notification", + "MessageId": "1234", + "Message": "NotBase64encoded", + } + initializeProcessor() + testSubscriber.JSONMessages = append(testSubscriber.JSONMessages, testMessage) + assert.Nil(t, testProcessor.StartProcessing()) + // TODO add test for metric inc() for number of messages processed. + // Assert 1 messages error and 1 total. +} + +func TestProcessor_StartProcessingProtoMarshallError(t *testing.T) { + var badByte = []byte("atreyu") + var testMessage = map[string]interface{}{ + "Type": "Not a real notification", + "MessageId": "1234", + "Message": aws.String(base64.StdEncoding.EncodeToString(badByte)), + } + initializeProcessor() + testSubscriber.JSONMessages = append(testSubscriber.JSONMessages, testMessage) + assert.Nil(t, testProcessor.StartProcessing()) + // TODO add test for metric inc() for number of messages processed. + // Assert 1 messages error and 1 total. +} + +func TestProcessor_StartProcessingError(t *testing.T) { + initializeProcessor() + var ret = errors.New("err() returned an error") + // The error set by GivenErrError is returned by Err(). + // Err() is checked before Run() returning. + testSubscriber.GivenErrError = ret + assert.Equal(t, ret, testProcessor.StartProcessing()) +} + +func TestProcessor_StartProcessingEmailError(t *testing.T) { + initializeProcessor() + emailError := errors.New("error sending email") + sendEmailErrorFunc := func(ctx context.Context, email admin.EmailMessage) error { + return emailError + } + mockEmailer.SetSendEmailFunc(sendEmailErrorFunc) + testSubscriber.JSONMessages = append(testSubscriber.JSONMessages, testSubscriberMessage) + + // Even if there is an error in sending an email StartProcessing will return no errors. + // TODO: Once stats have been added check for an email error stat. + assert.Nil(t, testProcessor.StartProcessing()) +} + +func TestProcessor_StopProcessing(t *testing.T) { + initializeProcessor() + assert.Nil(t, testProcessor.StopProcessing()) +} + +func TestProcessor_StopProcessingError(t *testing.T) { + initializeProcessor() + var stopError = errors.New("stop() returns an error") + testSubscriber.GivenStopError = stopError + assert.Equal(t, stopError, testProcessor.StopProcessing()) +} diff --git a/pkg/async/notifications/implementations/publisher.go b/pkg/async/notifications/implementations/publisher.go new file mode 100644 index 000000000..63d00883c --- /dev/null +++ b/pkg/async/notifications/implementations/publisher.go @@ -0,0 +1,52 @@ +package implementations + +import ( + "context" + + "github.com/lyft/flyteadmin/pkg/async/notifications/interfaces" + + "github.com/NYTimes/gizmo/pubsub" + "github.com/golang/protobuf/proto" + "github.com/lyft/flytestdlib/logger" + "github.com/lyft/flytestdlib/promutils" + "github.com/prometheus/client_golang/prometheus" +) + +type publisherSystemMetrics struct { + Scope promutils.Scope + PublishTotal prometheus.Counter + PublishError prometheus.Counter +} + +// TODO: Add a counter that encompasses the publisher stats grouped by project and domain. +type Publisher struct { + pub pubsub.Publisher + systemMetrics publisherSystemMetrics +} + +// The key is the notification type as defined as an enum. +func (p *Publisher) Publish(ctx context.Context, notificationType string, msg proto.Message) error { + p.systemMetrics.PublishTotal.Inc() + logger.Debugf(ctx, "Publishing the following message [%s]", msg.String()) + err := p.pub.Publish(ctx, notificationType, msg) + if err != nil { + p.systemMetrics.PublishError.Inc() + logger.Errorf(ctx, "Failed to publish a message with key [%s] and message [%s] and error: %v", notificationType, msg.String(), err) + } + return err +} + +func newPublisherSystemMetrics(scope promutils.Scope) publisherSystemMetrics { + return publisherSystemMetrics{ + Scope: scope, + PublishTotal: scope.MustNewCounter("publish_total", "overall count of publish messages"), + PublishError: scope.MustNewCounter("publish_errors", "count of publish errors"), + } +} + +func NewPublisher(pub pubsub.Publisher, scope promutils.Scope) interfaces.Publisher { + return &Publisher{ + pub: pub, + systemMetrics: newPublisherSystemMetrics(scope.NewSubScope("publisher")), + } +} diff --git a/pkg/async/notifications/implementations/publisher_test.go b/pkg/async/notifications/implementations/publisher_test.go new file mode 100644 index 000000000..893a2e2c1 --- /dev/null +++ b/pkg/async/notifications/implementations/publisher_test.go @@ -0,0 +1,76 @@ +package implementations + +import ( + "context" + "errors" + "testing" + + "github.com/lyft/flyteadmin/pkg/async/notifications/mocks" + + "encoding/base64" + + "github.com/NYTimes/gizmo/pubsub" + "github.com/NYTimes/gizmo/pubsub/pubsubtest" + "github.com/aws/aws-sdk-go/aws" + "github.com/golang/protobuf/proto" + "github.com/lyft/flyteidl/gen/pb-go/flyteidl/admin" + "github.com/lyft/flytestdlib/promutils" + "github.com/stretchr/testify/assert" +) + +var testPublisher pubsubtest.TestPublisher +var mockPublisher pubsub.Publisher = &testPublisher +var currentPublisher = NewPublisher(mockPublisher, promutils.NewTestScope()) +var testEmail = admin.EmailMessage{ + RecipientsEmail: []string{ + "a@example.com", + "b@example.com", + }, + SenderEmail: "no-reply@example.com", + SubjectLine: "Test email", + Body: "This is a sample email.", +} + +var msg, _ = proto.Marshal(&testEmail) + +var testSubscriberMessage = map[string]interface{}{ + "Type": "Notification", + "MessageId": "1-a-3-c", + "TopicArn": "arn:aws:sns:my-region:123:flyte-test-notifications", + "Subject": "flyteidl.admin.EmailNotification", + "Message": aws.String(base64.StdEncoding.EncodeToString(msg)), + "Timestamp": "2019-01-04T22:59:32.849Z", + "SignatureVersion": "1", + "Signature": "some&ignature==", + "SigningCertURL": "https://sns.my-region.amazonaws.com/afdaf", + "UnsubscribeURL": "https://sns.my-region.amazonaws.com/sns:my-region:123:flyte-test-notifications:1-2-3-4-5", +} +var testSubscriber pubsubtest.TestSubscriber +var mockSub pubsub.Subscriber = &testSubscriber +var mockEmail mocks.MockEmailer +var testProcessor = NewProcessor(mockSub, &mockEmail, promutils.NewTestScope()) + +// This method should be invoked before every test around Publisher. +func initializePublisher() { + testPublisher.Published = nil + testPublisher.GivenError = nil + testPublisher.FoundError = nil +} + +func TestPublisher_PublishSuccess(t *testing.T) { + initializePublisher() + assert.Nil(t, currentPublisher.Publish(context.Background(), proto.MessageName(&testEmail), &testEmail)) + assert.Equal(t, 1, len(testPublisher.Published)) + assert.Equal(t, proto.MessageName(&testEmail), testPublisher.Published[0].Key) + marshalledData, err := proto.Marshal(&testEmail) + assert.Nil(t, err) + assert.Equal(t, marshalledData, testPublisher.Published[0].Body) + +} + +func TestPublisher_PublishError(t *testing.T) { + initializePublisher() + var publishError = errors.New("publish() returns an error") + testPublisher.GivenError = publishError + assert.Equal(t, publishError, currentPublisher.Publish(context.Background(), "test", &testEmail)) +} diff --git a/pkg/async/notifications/interfaces/emailer.go b/pkg/async/notifications/interfaces/emailer.go new file mode 100644 index 000000000..8970ff41f --- /dev/null +++ b/pkg/async/notifications/interfaces/emailer.go @@ -0,0 +1,13 @@ +package interfaces + +import ( + "context" + + "github.com/lyft/flyteidl/gen/pb-go/flyteidl/admin" +) + +// The implementation of Emailer needs to be passed to the implementation of Processor +// in order for emails to be sent. +type Emailer interface { + SendEmail(ctx context.Context, email admin.EmailMessage) error +} diff --git a/pkg/async/notifications/interfaces/processor.go b/pkg/async/notifications/interfaces/processor.go new file mode 100644 index 000000000..0a8534d57 --- /dev/null +++ b/pkg/async/notifications/interfaces/processor.go @@ -0,0 +1,17 @@ +package interfaces + +// Exposes the common methods required for a subscriber. +// There is one ProcessNotification per type. +type Processor interface { + + // Starts processing messages from the underlying subscriber. + // If the channel closes gracefully, no error will be returned. + // If the underlying channel experiences errors, + // an error is returned and the channel is closed. + StartProcessing() error + + // This should be invoked when the application is shutting down. + // If StartProcessing() returned an error, StopProcessing() will return an error because + // the channel was already closed. + StopProcessing() error +} diff --git a/pkg/async/notifications/interfaces/publisher.go b/pkg/async/notifications/interfaces/publisher.go new file mode 100644 index 000000000..94e45180f --- /dev/null +++ b/pkg/async/notifications/interfaces/publisher.go @@ -0,0 +1,27 @@ +package interfaces + +import ( + "context" + + "github.com/golang/protobuf/proto" +) + +// Note on Notifications + +// Notifications are handled in two steps. +// 1. Publishing a notification +// 2. Processing a notification + +// Publishing a notification enqueues a notification message to be processed. Currently there is only +// one publisher for all notification types with the type differing based on the key. +// The notification hasn't been delivered at this stage. +// Processing a notification takes a notification message from the publisher and will pass +// the notification using the desired delivery method (ex: email). There is one processor per +// notification type. + +// Publish a notification will differ between different types of notifications using the key +// The contract requires one subscription per type i.e. one for email one for slack, etc... +type Publisher interface { + // The notification type is inferred from the Notification object in the Execution Spec. + Publish(ctx context.Context, notificationType string, msg proto.Message) error +} diff --git a/pkg/async/notifications/mocks/emailer.go b/pkg/async/notifications/mocks/emailer.go new file mode 100644 index 000000000..3705d4935 --- /dev/null +++ b/pkg/async/notifications/mocks/emailer.go @@ -0,0 +1,24 @@ +package mocks + +import ( + "github.com/aws/aws-sdk-go/service/ses" + "github.com/aws/aws-sdk-go/service/ses/sesiface" +) + +type AwsSendEmailFunc func(input *ses.SendEmailInput) (*ses.SendEmailOutput, error) + +type SESClient struct { + sesiface.SESAPI + sendEmail AwsSendEmailFunc +} + +func (m *SESClient) SetSendEmailFunc(emailFunc AwsSendEmailFunc) { + m.sendEmail = emailFunc +} + +func (m *SESClient) SendEmail(input *ses.SendEmailInput) (*ses.SendEmailOutput, error) { + if m.sendEmail != nil { + return m.sendEmail(input) + } + return &ses.SendEmailOutput{}, nil +} diff --git a/pkg/async/notifications/mocks/processor.go b/pkg/async/notifications/mocks/processor.go new file mode 100644 index 000000000..9ef18a8da --- /dev/null +++ b/pkg/async/notifications/mocks/processor.go @@ -0,0 +1,47 @@ +package mocks + +import ( + "context" + + "github.com/lyft/flyteidl/gen/pb-go/flyteidl/admin" +) + +type RunFunc func() error + +type StopFunc func() error + +type MockSubscriber struct { + runFunc RunFunc + stopFunc StopFunc +} + +func (m *MockSubscriber) Run() error { + if m.runFunc != nil { + return m.runFunc() + } + return nil +} + +func (m *MockSubscriber) Stop() error { + if m.stopFunc != nil { + return m.stopFunc() + } + return nil +} + +type SendEmailFunc func(ctx context.Context, email admin.EmailMessage) error + +type MockEmailer struct { + sendEmailFunc SendEmailFunc +} + +func (m *MockEmailer) SetSendEmailFunc(sendEmail SendEmailFunc) { + m.sendEmailFunc = sendEmail +} + +func (m *MockEmailer) SendEmail(ctx context.Context, email admin.EmailMessage) error { + if m.sendEmailFunc != nil { + return m.sendEmailFunc(ctx, email) + } + return nil +} diff --git a/pkg/async/notifications/mocks/publisher.go b/pkg/async/notifications/mocks/publisher.go new file mode 100644 index 000000000..ccfa041eb --- /dev/null +++ b/pkg/async/notifications/mocks/publisher.go @@ -0,0 +1,24 @@ +package mocks + +import ( + "context" + + "github.com/golang/protobuf/proto" +) + +type PublishFunc func(ctx context.Context, key string, msg proto.Message) error + +type MockPublisher struct { + publishFunc PublishFunc +} + +func (m *MockPublisher) SetPublishCallback(publishFunction PublishFunc) { + m.publishFunc = publishFunction +} + +func (m *MockPublisher) Publish(ctx context.Context, notificationType string, msg proto.Message) error { + if m.publishFunc != nil { + return m.publishFunc(ctx, notificationType, msg) + } + return nil +} diff --git a/pkg/async/schedule/aws/cloud_watch_scheduler.go b/pkg/async/schedule/aws/cloud_watch_scheduler.go new file mode 100644 index 000000000..297a57b68 --- /dev/null +++ b/pkg/async/schedule/aws/cloud_watch_scheduler.go @@ -0,0 +1,263 @@ +package aws + +import ( + "context" + "fmt" + "strings" + + "github.com/lyft/flyteadmin/pkg/async/schedule/aws/interfaces" + scheduleInterfaces "github.com/lyft/flyteadmin/pkg/async/schedule/interfaces" + + "github.com/lyft/flytestdlib/promutils" + "github.com/prometheus/client_golang/prometheus" + + "github.com/aws/aws-sdk-go/aws/awserr" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/cloudwatchevents" + "github.com/lyft/flyteadmin/pkg/errors" + "github.com/lyft/flyteidl/gen/pb-go/flyteidl/admin" + "github.com/lyft/flytestdlib/logger" + "google.golang.org/grpc/codes" +) + +// To indicate that a schedule rule is enabled. +var enableState = "ENABLED" + +// CloudWatch schedule expressions. +const ( + cronExpression = "cron(%s)" + rateExpression = "rate(%v %s)" +) + +const timePlaceholder = "time" + +var timeValue = "$.time" + +const scheduleNameInputsFormat = "%s:%s:%s" +const scheduleDescriptionFormat = "Schedule for Project:%s Domain:%s Name:%s launch plan" +const scheduleNameFormat = "flyte_%d" + +// Container for initialized metrics objects +type cloudWatchSchedulerMetrics struct { + Scope promutils.Scope + InvalidSchedules prometheus.Counter + AddRuleFailures prometheus.Counter + AddTargetFailures prometheus.Counter + SchedulesAdded prometheus.Counter + + RemoveRuleFailures prometheus.Counter + RemoveRuleDoesntExist prometheus.Counter + RemoveTargetFailures prometheus.Counter + RemoveTargetDoesntExist prometheus.Counter + RemovedSchedules prometheus.Counter + + ActiveSchedules prometheus.Gauge +} + +// An AWS CloudWatch implementation of the EventScheduler. +type cloudWatchScheduler struct { + // The ARN of the IAM role associated with the scheduler. + scheduleRoleArn string + // The ARN of the SQS target used for registering schedule events. + targetSqsArn string + // AWS CloudWatchEvents service client. + cloudWatchEventClient interfaces.CloudWatchEventClient + // For emitting scheduler-related metrics + metrics cloudWatchSchedulerMetrics +} + +func getScheduleName(identifier admin.NamedEntityIdentifier) string { + hashedIdentifier := hashIdentifier(identifier) + return fmt.Sprintf(scheduleNameFormat, hashedIdentifier) +} + +func getScheduleDescription(identifier admin.NamedEntityIdentifier) string { + return fmt.Sprintf(scheduleDescriptionFormat, + identifier.Project, identifier.Domain, identifier.Name) +} + +func getScheduleExpression(schedule admin.Schedule) (string, error) { + if schedule.GetCronExpression() != "" { + return fmt.Sprintf(cronExpression, schedule.GetCronExpression()), nil + } + if schedule.GetRate() != nil { + // AWS uses pluralization for units of values not equal to 1. + // See https://docs.aws.amazon.com/lambda/latest/dg/tutorial-scheduled-events-schedule-expressions.html + unit := strings.ToLower(schedule.GetRate().Unit.String()) + if schedule.GetRate().Value != 1 { + unit = fmt.Sprintf("%ss", unit) + } + return fmt.Sprintf(rateExpression, schedule.GetRate().Value, unit), nil + } + logger.Debugf(context.Background(), "scheduler encountered invalid schedule expression: %s", schedule.String()) + return "", errors.NewFlyteAdminErrorf(codes.InvalidArgument, "unrecognized schedule expression") +} + +func formatEventScheduleInputs(inputTemplate *string) cloudwatchevents.InputTransformer { + inputsPathMap := map[string]*string{ + timePlaceholder: &timeValue, + } + return cloudwatchevents.InputTransformer{ + InputPathsMap: inputsPathMap, + InputTemplate: inputTemplate, + } +} + +func (s *cloudWatchScheduler) AddSchedule(ctx context.Context, input scheduleInterfaces.AddScheduleInput) error { + if input.Payload == nil { + logger.Debugf(ctx, "AddSchedule called with empty input payload: %+v", input) + return errors.NewFlyteAdminError(codes.InvalidArgument, "payload serialization function cannot be nil") + } + scheduleExpression, err := getScheduleExpression(input.ScheduleExpression) + if err != nil { + s.metrics.InvalidSchedules.Inc() + return err + } + scheduleName := getScheduleName(input.Identifier) + scheduleDescription := getScheduleDescription(input.Identifier) + // First define a rule which gets triggered on a schedule. + requestInput := cloudwatchevents.PutRuleInput{ + ScheduleExpression: &scheduleExpression, + Name: &scheduleName, + Description: &scheduleDescription, + RoleArn: &s.scheduleRoleArn, + State: &enableState, + } + putRuleOutput, err := s.cloudWatchEventClient.PutRule(&requestInput) + if err != nil { + logger.Infof(ctx, "Failed to add rule to cloudwatch for schedule [%+v] with name %s and expression %s with err: %v", + input.Identifier, scheduleName, scheduleExpression, err) + s.metrics.AddRuleFailures.Inc() + return errors.NewFlyteAdminErrorf(codes.Internal, "failed to add rule to cloudwatch with err: %v", err) + } + eventInputTransformer := formatEventScheduleInputs(input.Payload) + // Next, add a target which gets invoked when the above rule is triggered. + putTargetOutput, err := s.cloudWatchEventClient.PutTargets(&cloudwatchevents.PutTargetsInput{ + Rule: &scheduleName, + Targets: []*cloudwatchevents.Target{ + { + Arn: &s.targetSqsArn, + Id: &scheduleName, + InputTransformer: &eventInputTransformer, + }, + }, + }) + if err != nil { + logger.Infof(ctx, "Failed to add target for event schedule [%+v] with name %s with err: %v", + input.Identifier, scheduleName, err) + s.metrics.AddTargetFailures.Inc() + return errors.NewFlyteAdminErrorf(codes.Internal, "failed to add target for event schedule with err: %v", err) + } else if putTargetOutput.FailedEntryCount != nil && *putTargetOutput.FailedEntryCount > 0 { + logger.Infof(ctx, "Failed to add target for event schedule [%+v] with name %s with failed entries: %d", + input.Identifier, scheduleName, *putTargetOutput.FailedEntryCount) + s.metrics.AddTargetFailures.Inc() + return errors.NewFlyteAdminErrorf(codes.Internal, + "failed to add target for event schedule with %v errs", *putTargetOutput.FailedEntryCount) + } + var putRuleOutputName string + if putRuleOutput != nil && putRuleOutput.RuleArn != nil { + putRuleOutputName = *putRuleOutput.RuleArn + } + logger.Debugf(ctx, "Added schedule %s [%s] with arn: %s (%s)", + scheduleName, scheduleExpression, putRuleOutputName, scheduleDescription) + s.metrics.SchedulesAdded.Inc() + s.metrics.ActiveSchedules.Inc() + return nil +} + +func isResourceNotFoundException(err error) bool { + switch err.(type) { + case awserr.Error: + return err.(awserr.Error).Code() == cloudwatchevents.ErrCodeResourceNotFoundException + } + return false +} + +func (s *cloudWatchScheduler) RemoveSchedule(ctx context.Context, identifier admin.NamedEntityIdentifier) error { + name := getScheduleName(identifier) + // All outbound targets for a rule must be deleted before the rule itself can be deleted. + output, err := s.cloudWatchEventClient.RemoveTargets(&cloudwatchevents.RemoveTargetsInput{ + Ids: []*string{ + &name, + }, + Rule: &name, + }) + if err != nil { + if isResourceNotFoundException(err) { + s.metrics.RemoveTargetDoesntExist.Inc() + logger.Debugf(ctx, "Tried to remove cloudwatch target %s but it was not found", name) + } else { + s.metrics.RemoveTargetFailures.Inc() + logger.Errorf(ctx, "failed to remove cloudwatch target %s with err: %v", name, err) + return errors.NewFlyteAdminErrorf(codes.Internal, "failed to remove cloudwatch target %s with err: %v", name, err) + } + } + if output != nil && output.FailedEntryCount != nil && *output.FailedEntryCount > 0 { + s.metrics.RemoveTargetFailures.Inc() + logger.Errorf(ctx, "failed to remove cloudwatch target %s with %v errs", + name, *output.FailedEntryCount) + return errors.NewFlyteAdminErrorf(codes.Internal, "failed to remove cloudwatch target %s with %v errs", + name, *output.FailedEntryCount) + } + + // Output from the call to DeleteRule is an empty struct. + _, err = s.cloudWatchEventClient.DeleteRule(&cloudwatchevents.DeleteRuleInput{ + Name: &name, + }) + if err != nil { + if isResourceNotFoundException(err) { + s.metrics.RemoveRuleDoesntExist.Inc() + logger.Debugf(ctx, "Tried to remove cloudwatch rule %s but it was not found", name) + } else { + s.metrics.RemoveRuleFailures.Inc() + logger.Errorf(ctx, "failed to remove cloudwatch rule %s with err: %v", name, err) + return errors.NewFlyteAdminErrorf(codes.Internal, + "failed to remove cloudwatch rule %s with err: %v", name, err) + } + } + s.metrics.RemovedSchedules.Inc() + s.metrics.ActiveSchedules.Dec() + logger.Debugf(ctx, "Removed schedule %s for identifier [%+v]", name, identifier) + return nil +} + +// Initializes a new set of metrics specific to the cloudwatch scheduler implementation. +func newCloudWatchSchedulerMetrics(scope promutils.Scope) cloudWatchSchedulerMetrics { + return cloudWatchSchedulerMetrics{ + Scope: scope, + InvalidSchedules: scope.MustNewCounter("schedules_invalid", "count of invalid schedule expressions submitted"), + AddRuleFailures: scope.MustNewCounter("add_rule_failures", + "count of attempts to add a cloudwatch rule that have failed"), + AddTargetFailures: scope.MustNewCounter("add_target_failures", + "count of attempts to add a cloudwatch target that have failed"), + SchedulesAdded: scope.MustNewCounter("schedules_added", + "count of all schedules successfully added to cloudwatch"), + RemoveRuleFailures: scope.MustNewCounter("delete_rule_failures", + "count of attempts to remove a cloudwatch rule that have failed"), + RemoveRuleDoesntExist: scope.MustNewCounter("delete_rule_no_rule", + "count of attempts to remove a cloudwatch rule that doesn't exist"), + RemoveTargetFailures: scope.MustNewCounter("delete_target_failures", + "count of attempts to remove a cloudwatch target that have failed"), + RemoveTargetDoesntExist: scope.MustNewCounter("delete_target_no_target", + "count of attempts to remove a cloudwatch target that doesn't exist"), + RemovedSchedules: scope.MustNewCounter("schedules_removed", + "count of all schedules successfully removed from cloudwatch"), + ActiveSchedules: scope.MustNewGauge("active_schedules", + "count of all active schedules currently in cloudwatch"), + } +} + +func NewCloudWatchScheduler( + scheduleRoleArn, targetSqsArn string, session *session.Session, config *aws.Config, + scope promutils.Scope) scheduleInterfaces.EventScheduler { + cloudwatchEventClient := cloudwatchevents.New(session, config) + metrics := newCloudWatchSchedulerMetrics(scope) + return &cloudWatchScheduler{ + scheduleRoleArn: scheduleRoleArn, + targetSqsArn: targetSqsArn, + cloudWatchEventClient: cloudwatchEventClient, + metrics: metrics, + } +} diff --git a/pkg/async/schedule/aws/cloud_watch_scheduler_test.go b/pkg/async/schedule/aws/cloud_watch_scheduler_test.go new file mode 100644 index 000000000..91e00bfcc --- /dev/null +++ b/pkg/async/schedule/aws/cloud_watch_scheduler_test.go @@ -0,0 +1,272 @@ +package aws + +import ( + "context" + "fmt" + + "github.com/lyft/flyteadmin/pkg/async/schedule/aws/interfaces" + "github.com/lyft/flyteadmin/pkg/async/schedule/aws/mocks" + scheduleInterfaces "github.com/lyft/flyteadmin/pkg/async/schedule/interfaces" + + "github.com/lyft/flytestdlib/promutils" + + "github.com/aws/aws-sdk-go/aws/awserr" + + "testing" + + "github.com/aws/aws-sdk-go/service/cloudwatchevents" + flyteAdminErrors "github.com/lyft/flyteadmin/pkg/errors" + "github.com/lyft/flyteidl/gen/pb-go/flyteidl/admin" + "github.com/stretchr/testify/assert" + "google.golang.org/grpc/codes" +) + +const testScheduleName = "flyte_16301494360130577061" +const testScheduleDescription = "Schedule for Project:project Domain:domain Name:name launch plan" + +var expectedError = flyteAdminErrors.NewFlyteAdminError(codes.Internal, "foo") + +var testSerializedPayload = fmt.Sprintf("event triggered at '%s'", awsTimestampPlaceholder) + +var testSchedulerIdentifier = admin.NamedEntityIdentifier{ + Project: "project", + Domain: "domain", + Name: "name", +} + +var scope = promutils.NewScope("test_scheduler") + +var testCloudWatchSchedulerMetrics = newCloudWatchSchedulerMetrics(scope) + +func TestGetScheduleName(t *testing.T) { + scheduleName := getScheduleName(testSchedulerIdentifier) + assert.Equal(t, "flyte_16301494360130577061", scheduleName) +} + +func TestGetScheduleDescription(t *testing.T) { + scheduleDescription := getScheduleDescription(testSchedulerIdentifier) + assert.Equal(t, "Schedule for Project:project Domain:domain Name:name launch plan", scheduleDescription) +} + +func TestGetScheduleExpression(t *testing.T) { + expression, err := getScheduleExpression(admin.Schedule{ + ScheduleExpression: &admin.Schedule_CronExpression{ + CronExpression: "foo", + }, + }) + assert.Nil(t, err) + assert.Equal(t, "cron(foo)", expression) + + expression, err = getScheduleExpression(admin.Schedule{ + ScheduleExpression: &admin.Schedule_Rate{ + Rate: &admin.FixedRate{ + Value: 1, + Unit: admin.FixedRateUnit_DAY, + }, + }, + }) + assert.Nil(t, err) + assert.Equal(t, "rate(1 day)", expression) + + expression, err = getScheduleExpression(admin.Schedule{ + ScheduleExpression: &admin.Schedule_Rate{ + Rate: &admin.FixedRate{ + Value: 2, + Unit: admin.FixedRateUnit_HOUR, + }, + }, + }) + assert.Nil(t, err) + assert.Equal(t, "rate(2 hours)", expression) + + _, err = getScheduleExpression(admin.Schedule{}) + assert.Equal(t, codes.InvalidArgument, err.(flyteAdminErrors.FlyteAdminError).Code()) +} + +func TestFormatEventScheduleInputs(t *testing.T) { + inputTransformer := formatEventScheduleInputs(&testSerializedPayload) + assert.EqualValues(t, map[string]*string{ + "time": &timeValue, + }, inputTransformer.InputPathsMap) + assert.Equal(t, testSerializedPayload, *inputTransformer.InputTemplate) +} + +func getCloudWatchSchedulerForTest(client interfaces.CloudWatchEventClient) scheduleInterfaces.EventScheduler { + + return &cloudWatchScheduler{ + scheduleRoleArn: "ScheduleRole", + targetSqsArn: "TargetSqsArn", + cloudWatchEventClient: client, + metrics: testCloudWatchSchedulerMetrics, + } +} + +func TestAddSchedule(t *testing.T) { + mockCloudWatchEventClient := mocks.NewMockCloudWatchEventClient() + mockCloudWatchEventClient.(*mocks.MockCloudWatchEventClient).SetPutRuleFunc(func( + input *cloudwatchevents.PutRuleInput) (*cloudwatchevents.PutRuleOutput, error) { + assert.Equal(t, "rate(1 minute)", *input.ScheduleExpression) + assert.Equal(t, testScheduleName, *input.Name) + assert.Equal(t, testScheduleDescription, *input.Description) + assert.Equal(t, "ScheduleRole", *input.RoleArn) + assert.Equal(t, enableState, *input.State) + return &cloudwatchevents.PutRuleOutput{}, nil + }) + + mockCloudWatchEventClient.(*mocks.MockCloudWatchEventClient).SetPutTargetsFunc(func( + input *cloudwatchevents.PutTargetsInput) (*cloudwatchevents.PutTargetsOutput, error) { + assert.Equal(t, testScheduleName, *input.Rule) + assert.Len(t, input.Targets, 1) + assert.Equal(t, "TargetSqsArn", *input.Targets[0].Arn) + assert.Equal(t, testScheduleName, *input.Targets[0].Id) + assert.NotEmpty(t, *input.Targets[0].InputTransformer) + return &cloudwatchevents.PutTargetsOutput{}, nil + }) + + scheduler := getCloudWatchSchedulerForTest(mockCloudWatchEventClient) + assert.Nil(t, scheduler.AddSchedule(context.Background(), + scheduleInterfaces.AddScheduleInput{ + Identifier: testSchedulerIdentifier, + ScheduleExpression: admin.Schedule{ + ScheduleExpression: &admin.Schedule_Rate{ + Rate: &admin.FixedRate{ + Value: 1, + Unit: admin.FixedRateUnit_MINUTE, + }, + }, + }, + Payload: &testSerializedPayload, + })) +} + +func TestAddSchedule_InvalidScheduleExpression(t *testing.T) { + mockCloudWatchEventClient := mocks.NewMockCloudWatchEventClient() + scheduler := getCloudWatchSchedulerForTest(mockCloudWatchEventClient) + err := scheduler.AddSchedule(context.Background(), + scheduleInterfaces.AddScheduleInput{ + Identifier: testSchedulerIdentifier, + Payload: &testSerializedPayload, + }) + assert.Equal(t, codes.InvalidArgument, err.(flyteAdminErrors.FlyteAdminError).Code()) +} + +func TestAddSchedule_PutRuleError(t *testing.T) { + mockCloudWatchEventClient := mocks.NewMockCloudWatchEventClient() + mockCloudWatchEventClient.(*mocks.MockCloudWatchEventClient).SetPutRuleFunc(func( + input *cloudwatchevents.PutRuleInput) (*cloudwatchevents.PutRuleOutput, error) { + return nil, expectedError + }) + + scheduler := getCloudWatchSchedulerForTest(mockCloudWatchEventClient) + err := scheduler.AddSchedule(context.Background(), + scheduleInterfaces.AddScheduleInput{ + Identifier: testSchedulerIdentifier, + ScheduleExpression: admin.Schedule{ + ScheduleExpression: &admin.Schedule_Rate{ + Rate: &admin.FixedRate{ + Value: 1, + Unit: admin.FixedRateUnit_MINUTE, + }, + }, + }, + Payload: &testSerializedPayload, + }) + assert.Equal(t, codes.Internal, err.(flyteAdminErrors.FlyteAdminError).Code()) +} + +func TestAddSchedule_PutTargetsError(t *testing.T) { + mockCloudWatchEventClient := mocks.NewMockCloudWatchEventClient() + mockCloudWatchEventClient.(*mocks.MockCloudWatchEventClient).SetPutRuleFunc(func( + input *cloudwatchevents.PutRuleInput) (*cloudwatchevents.PutRuleOutput, error) { + return &cloudwatchevents.PutRuleOutput{}, nil + }) + mockCloudWatchEventClient.(*mocks.MockCloudWatchEventClient).SetPutTargetsFunc(func( + input *cloudwatchevents.PutTargetsInput) (*cloudwatchevents.PutTargetsOutput, error) { + return nil, expectedError + }) + scheduler := getCloudWatchSchedulerForTest(mockCloudWatchEventClient) + err := scheduler.AddSchedule(context.Background(), + scheduleInterfaces.AddScheduleInput{ + Identifier: testSchedulerIdentifier, + ScheduleExpression: admin.Schedule{ + ScheduleExpression: &admin.Schedule_Rate{ + Rate: &admin.FixedRate{ + Value: 1, + Unit: admin.FixedRateUnit_MINUTE, + }, + }, + }, + Payload: &testSerializedPayload, + }) + assert.Equal(t, codes.Internal, err.(flyteAdminErrors.FlyteAdminError).Code()) +} + +func TestRemoveSchedule(t *testing.T) { + mockCloudWatchEventClient := mocks.NewMockCloudWatchEventClient() + mockCloudWatchEventClient.(*mocks.MockCloudWatchEventClient).SetRemoveTargetsFunc(func( + input *cloudwatchevents.RemoveTargetsInput) (*cloudwatchevents.RemoveTargetsOutput, error) { + assert.Len(t, input.Ids, 1) + assert.Equal(t, testScheduleName, *input.Ids[0]) + assert.Equal(t, testScheduleName, *input.Rule) + return &cloudwatchevents.RemoveTargetsOutput{}, nil + }) + mockCloudWatchEventClient.(*mocks.MockCloudWatchEventClient).SetDeleteRuleFunc(func( + input *cloudwatchevents.DeleteRuleInput) (*cloudwatchevents.DeleteRuleOutput, error) { + assert.Equal(t, testScheduleName, *input.Name) + return &cloudwatchevents.DeleteRuleOutput{}, nil + }) + scheduler := getCloudWatchSchedulerForTest(mockCloudWatchEventClient) + assert.Nil(t, scheduler.RemoveSchedule(context.Background(), testSchedulerIdentifier)) +} + +func TestRemoveSchedule_RemoveTargetsError(t *testing.T) { + mockCloudWatchEventClient := mocks.NewMockCloudWatchEventClient() + mockCloudWatchEventClient.(*mocks.MockCloudWatchEventClient).SetRemoveTargetsFunc(func( + input *cloudwatchevents.RemoveTargetsInput) (*cloudwatchevents.RemoveTargetsOutput, error) { + return nil, expectedError + }) + scheduler := getCloudWatchSchedulerForTest(mockCloudWatchEventClient) + err := scheduler.RemoveSchedule(context.Background(), testSchedulerIdentifier) + assert.Equal(t, codes.Internal, err.(flyteAdminErrors.FlyteAdminError).Code()) +} + +func TestRemoveSchedule_InvalidTarget(t *testing.T) { + mockCloudWatchEventClient := mocks.NewMockCloudWatchEventClient() + mockCloudWatchEventClient.(*mocks.MockCloudWatchEventClient).SetRemoveTargetsFunc(func( + input *cloudwatchevents.RemoveTargetsInput) (*cloudwatchevents.RemoveTargetsOutput, error) { + return nil, awserr.New(cloudwatchevents.ErrCodeResourceNotFoundException, "foo", expectedError) + }) + scheduler := getCloudWatchSchedulerForTest(mockCloudWatchEventClient) + err := scheduler.RemoveSchedule(context.Background(), testSchedulerIdentifier) + assert.Nil(t, err) +} + +func TestRemoveSchedule_DeleteRuleError(t *testing.T) { + mockCloudWatchEventClient := mocks.NewMockCloudWatchEventClient() + mockCloudWatchEventClient.(*mocks.MockCloudWatchEventClient).SetRemoveTargetsFunc(func( + input *cloudwatchevents.RemoveTargetsInput) (*cloudwatchevents.RemoveTargetsOutput, error) { + return &cloudwatchevents.RemoveTargetsOutput{}, nil + }) + mockCloudWatchEventClient.(*mocks.MockCloudWatchEventClient).SetDeleteRuleFunc(func( + input *cloudwatchevents.DeleteRuleInput) (*cloudwatchevents.DeleteRuleOutput, error) { + return nil, expectedError + }) + scheduler := getCloudWatchSchedulerForTest(mockCloudWatchEventClient) + err := scheduler.RemoveSchedule(context.Background(), testSchedulerIdentifier) + assert.Equal(t, codes.Internal, err.(flyteAdminErrors.FlyteAdminError).Code()) +} + +func TestRemoveSchedule_InvalidRule(t *testing.T) { + mockCloudWatchEventClient := mocks.NewMockCloudWatchEventClient() + mockCloudWatchEventClient.(*mocks.MockCloudWatchEventClient).SetRemoveTargetsFunc(func( + input *cloudwatchevents.RemoveTargetsInput) (*cloudwatchevents.RemoveTargetsOutput, error) { + return &cloudwatchevents.RemoveTargetsOutput{}, nil + }) + mockCloudWatchEventClient.(*mocks.MockCloudWatchEventClient).SetDeleteRuleFunc(func( + input *cloudwatchevents.DeleteRuleInput) (*cloudwatchevents.DeleteRuleOutput, error) { + return nil, awserr.New(cloudwatchevents.ErrCodeResourceNotFoundException, "foo", expectedError) + }) + scheduler := getCloudWatchSchedulerForTest(mockCloudWatchEventClient) + err := scheduler.RemoveSchedule(context.Background(), testSchedulerIdentifier) + assert.Nil(t, err) +} diff --git a/pkg/async/schedule/aws/interfaces/cloud_watch_event_client.go b/pkg/async/schedule/aws/interfaces/cloud_watch_event_client.go new file mode 100644 index 000000000..2ba47974e --- /dev/null +++ b/pkg/async/schedule/aws/interfaces/cloud_watch_event_client.go @@ -0,0 +1,11 @@ +package interfaces + +import "github.com/aws/aws-sdk-go/service/cloudwatchevents" + +// A subset of the AWS CloudWatchEvents service client. +type CloudWatchEventClient interface { + PutRule(input *cloudwatchevents.PutRuleInput) (*cloudwatchevents.PutRuleOutput, error) + PutTargets(input *cloudwatchevents.PutTargetsInput) (*cloudwatchevents.PutTargetsOutput, error) + DeleteRule(input *cloudwatchevents.DeleteRuleInput) (*cloudwatchevents.DeleteRuleOutput, error) + RemoveTargets(input *cloudwatchevents.RemoveTargetsInput) (*cloudwatchevents.RemoveTargetsOutput, error) +} diff --git a/pkg/async/schedule/aws/mocks/mock_cloud_watch_event_client.go b/pkg/async/schedule/aws/mocks/mock_cloud_watch_event_client.go new file mode 100644 index 000000000..0a85b60bb --- /dev/null +++ b/pkg/async/schedule/aws/mocks/mock_cloud_watch_event_client.go @@ -0,0 +1,71 @@ +package mocks + +import ( + "github.com/aws/aws-sdk-go/service/cloudwatchevents" + "github.com/lyft/flyteadmin/pkg/async/schedule/aws/interfaces" +) + +type putRuleFunc func(input *cloudwatchevents.PutRuleInput) (*cloudwatchevents.PutRuleOutput, error) +type putTargetsFunc func(input *cloudwatchevents.PutTargetsInput) (*cloudwatchevents.PutTargetsOutput, error) +type deleteRuleFunc func(input *cloudwatchevents.DeleteRuleInput) (*cloudwatchevents.DeleteRuleOutput, error) +type removeTargetsFunc func(input *cloudwatchevents.RemoveTargetsInput) (*cloudwatchevents.RemoveTargetsOutput, error) + +// A mock implementation of CloudWatchEventClient for use in tests. +type MockCloudWatchEventClient struct { + putRule putRuleFunc + putTargets putTargetsFunc + deleteRule deleteRuleFunc + removeTargets removeTargetsFunc +} + +func (c *MockCloudWatchEventClient) SetPutRuleFunc(putRule putRuleFunc) { + c.putRule = putRule +} + +func (c *MockCloudWatchEventClient) PutRule(input *cloudwatchevents.PutRuleInput) ( + *cloudwatchevents.PutRuleOutput, error) { + if c.putRule != nil { + return c.putRule(input) + } + return nil, nil +} + +func (c *MockCloudWatchEventClient) SetPutTargetsFunc(putTargets putTargetsFunc) { + c.putTargets = putTargets +} + +func (c *MockCloudWatchEventClient) PutTargets(input *cloudwatchevents.PutTargetsInput) ( + *cloudwatchevents.PutTargetsOutput, error) { + if c.putTargets != nil { + return c.putTargets(input) + } + return nil, nil +} + +func (c *MockCloudWatchEventClient) SetDeleteRuleFunc(deleteRule deleteRuleFunc) { + c.deleteRule = deleteRule +} + +func (c *MockCloudWatchEventClient) DeleteRule(input *cloudwatchevents.DeleteRuleInput) ( + *cloudwatchevents.DeleteRuleOutput, error) { + if c.deleteRule != nil { + return c.deleteRule(input) + } + return nil, nil +} + +func (c *MockCloudWatchEventClient) SetRemoveTargetsFunc(removeTargets removeTargetsFunc) { + c.removeTargets = removeTargets +} + +func (c *MockCloudWatchEventClient) RemoveTargets(input *cloudwatchevents.RemoveTargetsInput) ( + *cloudwatchevents.RemoveTargetsOutput, error) { + if c.removeTargets != nil { + return c.removeTargets(input) + } + return nil, nil +} + +func NewMockCloudWatchEventClient() interfaces.CloudWatchEventClient { + return &MockCloudWatchEventClient{} +} diff --git a/pkg/async/schedule/aws/serialization.go b/pkg/async/schedule/aws/serialization.go new file mode 100644 index 000000000..e6b455a9d --- /dev/null +++ b/pkg/async/schedule/aws/serialization.go @@ -0,0 +1,86 @@ +// Functions for serializing and deserializing scheduled events in AWS. +package aws + +import ( + "context" + "encoding/base64" + "encoding/json" + "fmt" + "time" + + "github.com/lyft/flytestdlib/logger" + + "github.com/golang/protobuf/proto" + "github.com/lyft/flyteadmin/pkg/errors" + "github.com/lyft/flyteidl/gen/pb-go/flyteidl/admin" + "google.golang.org/grpc/codes" +) + +const awsTimestampPlaceholder = "