From ed9b9e9c8fa3882db0c4a43101d9c446250d0733 Mon Sep 17 00:00:00 2001 From: Kirill Salnikov Date: Thu, 21 Nov 2024 17:29:39 +0300 Subject: [PATCH] =?UTF-8?q?GEFEST-856=20=D0=94=D0=B5=D0=BF=D0=BB=D0=BE?= =?UTF-8?q?=D0=B9=20counter=20=D0=B8=20tls=20=D0=B4=D0=BB=D1=8F=20=D0=BA?= =?UTF-8?q?=D0=BB=D0=B8=D0=B5=D0=BD=D1=82=D0=BE=D0=B2=20kafka?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- charts/keys/Chart.yaml | 2 +- charts/keys/README.md | 67 ++++++-- charts/keys/templates/api/deployment.yaml | 38 ++++- .../keys/templates/counter/statefulset.yaml | 85 ++++++++++ charts/keys/templates/helpers.tpl | 153 ++++++++++++++++-- charts/keys/templates/secret-kafka-audit.yaml | 20 +++ charts/keys/templates/secret-kafka-main.yaml | 20 +++ charts/keys/values.yaml | 130 ++++++++++++++- 8 files changed, 486 insertions(+), 29 deletions(-) create mode 100644 charts/keys/templates/counter/statefulset.yaml create mode 100644 charts/keys/templates/secret-kafka-audit.yaml create mode 100644 charts/keys/templates/secret-kafka-main.yaml diff --git a/charts/keys/Chart.yaml b/charts/keys/Chart.yaml index a8acd5144..02d73ff49 100644 --- a/charts/keys/Chart.yaml +++ b/charts/keys/Chart.yaml @@ -4,7 +4,7 @@ type: application description: A Helm chart for Kubernetes to deploy API Keys service version: 1.31.0 -appVersion: 1.89.0 +appVersion: 1.102.3 maintainers: - name: 2gis diff --git a/charts/keys/README.md b/charts/keys/README.md index 93192e295..c4476f98b 100644 --- a/charts/keys/README.md +++ b/charts/keys/README.md @@ -31,7 +31,7 @@ See the [documentation](https://docs.2gis.com/en/on-premise/keys) to learn about | `imagePullSecrets` | Kubernetes image pull secrets. | `[]` | | `imagePullPolicy` | Pull policy. | `IfNotPresent` | | `backend.image.repository` | Backend service image repository. | `2gis-on-premise/keys-backend` | -| `backend.image.tag` | Backend service image tag. | `1.89.0` | +| `backend.image.tag` | Backend service image tag. | `1.102.3` | | `admin.image.repository` | Admin service image repository. | `2gis-on-premise/keys-ui` | | `admin.image.tag` | Admin service image tag. | `0.8.0` | | `redis.image.repository` | Redis image repository. | `2gis-on-premise/keys-redis` | @@ -184,6 +184,34 @@ See the [documentation](https://docs.2gis.com/en/on-premise/keys) to learn about | `dispatcher.cleaner.cron.successfulJobsHistoryLimit` | Specifies the number of successful finished jobs to keep. See [jobs history limits](https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/#jobs-history-limits). | `3` | | `dispatcher.cleaner.cron.suspend` | You can suspend execution of Jobs for a CronJob, by setting the field to true. See [schedule suspension](https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/#schedule-suspension). | `false` | | `dispatcher.cleaner.nodeSelector` | Kubernetes [node selectors](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector). | `{}` | +| `counter.enabled` | Counter worker is enabled. | `false` | +| `counter.replicas` | A replica count for the pod. | `1` | +| `counter.prometheus.scrape` | If Prometheus scrape is enabled. | `true` | +| `counter.resources.requests.cpu` | A CPU request. | `20m` | +| `counter.resources.requests.memory` | A memory request. | `32Mi` | +| `counter.resources.limits.cpu` | A CPU limit. | `1000m` | +| `counter.resources.limits.memory` | A memory limit. | `512Mi` | +| `counter.logLevel` | Log level for the worker. Can be: `trace`, `debug`, `info`, `warning`, `error`, `fatal`. | `warning` | +| `counter.preloader.refreshTick` | Refresh interval for in-memory cache with keys limitations info. The smaller the interval, the faster the worker will know about changes in limitations. | `1m` | +| `counter.updateStatusQueryTimeout` | Timeout for database queries to update key status. | `1s` | +| `counter.buffer` | **Settings for in-memory buffer for statistics data.** | | +| `counter.buffer.size` | The maximum size of the buffer. When the limit is reached, the data from the buffer is transferred to Redis. | `1000` | +| `counter.buffer.delay` | The maximum interval between data transfer operations from the buffer to Redis. | `1s` | + +### Redis settings + +| Name | Description | Value | +| ------------------------------- | --------------------------------------------------------------------------------------------------------------------------- | ------- | +| `counter.redis.retries` | | `5` | +| `counter.redis.minRetryBackoff` | | `100ms` | +| `counter.redis.maxRetryBackoff` | | `3s` | +| `counter.annotations` | Kubernetes [annotations](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/). | `{}` | +| `counter.labels` | Kubernetes [labels](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/). | `{}` | +| `counter.podAnnotations` | Kubernetes [pod annotations](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/). | `{}` | +| `counter.podLabels` | Kubernetes [pod labels](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/). | `{}` | +| `counter.nodeSelector` | Kubernetes [node selectors](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector). | `{}` | +| `counter.affinity` | Kubernetes pod [affinity settings](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity). | `{}` | +| `counter.tolerations` | Kubernetes [tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) settings. | `{}` | ### Redis settings @@ -226,15 +254,34 @@ See the [documentation](https://docs.2gis.com/en/on-premise/keys) to learn about ### Kafka settings -| Name | Description | Value | -| ------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------- | ------ | -| `kafka.audit` | **Settings for sending audit messages.** | | -| `kafka.audit.bootstrapServers` | Comma-separated list of host and port pairs that are the addresses of the Kafka brokers (e.g. 'localhost:9092,localhost:9093'). | `""` | -| `kafka.audit.username` | Username for authorization (SASL/PLAINTEXT SHA-512). | `""` | -| `kafka.audit.password` | Password for authorization (SASL/PLAINTEXT SHA-512). | `""` | -| `kafka.audit.topic` | Topic to produce audit messages. | `""` | -| `kafka.audit.produce.retryCount` | Number of retries to produce a message. | `5` | -| `kafka.audit.produce.idempotentWrite` | Flag to enable/disable [idempotent write](https://docs.confluent.io/platform/current/installation/configuration/producer-configs.html#enable-idempotence). | `true` | +| Name | Description | Value | +| --------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------ | +| `kafka.main` | **Settings for read stat messages.** | | +| `kafka.main.clientPrefix` | Client prefix name. | `production` | +| `kafka.main.clientId` | Client id. If empty, then hostname will be used. | `""` | +| `kafka.main.brokers` | Comma-separated list of host and port pairs that are the addresses of the Kafka brokers (e.g. 'localhost:9092,localhost:9093'). | `""` | +| `kafka.main.securityProtocol` | Protocol used to communicate with brokers. Valid values are: PLAINTEXT, SSL, SASL_PLAINTEXT, SASL_SSL. Default: PLAINTEXT. | `PLAINTEXT` | +| `kafka.main.SASLMechanism` | Authentication mechanism when security_protocol is configured for SASL_PLAINTEXT or SASL_SSL. Valid values are: PLAIN, SCRAM-SHA-256, SCRAM-SHA-512. | `PLAIN` | +| `kafka.main.username` | Username for authorization (SASL). | `""` | +| `kafka.main.password` | Password for authorization (SASL). | `""` | +| `kafka.main.tls.skipServerCertificateVerify` | Controls whether a client verifies the server's certificate chain and host name. | `false` | +| `kafka.main.tls.rootCert` | Root certificate. | `""` | +| `kafka.main.tls.cert` | Client certificate. | `""` | +| `kafka.main.tls.key` | Client key. | `""` | +| `kafka.main.topics.stats` | Topic to consume stat messages. | `""` | +| `kafka.audit` | **Settings for sending audit messages.** | | +| `kafka.audit.bootstrapServers` | Comma-separated list of host and port pairs that are the addresses of the Kafka brokers (e.g. 'localhost:9092,localhost:9093'). | `""` | +| `kafka.audit.securityProtocol` | Protocol used to communicate with brokers. Valid values are: PLAINTEXT, SSL, SASL_PLAINTEXT, SASL_SSL. Default: PLAINTEXT. | `PLAINTEXT` | +| `kafka.audit.SASLMechanism` | Authentication mechanism when security_protocol is configured for SASL_PLAINTEXT or SASL_SSL. Valid values are: PLAIN, SCRAM-SHA-256, SCRAM-SHA-512. | `PLAIN` | +| `kafka.audit.username` | Username for authorization (SASL). | `""` | +| `kafka.audit.password` | Password for authorization (SASL). | `""` | +| `kafka.audit.topic` | Topic to produce audit messages. | `""` | +| `kafka.audit.tls.skipServerCertificateVerify` | Controls whether a client verifies the server's certificate chain and host name. | `false` | +| `kafka.audit.tls.rootCert` | Root certificate. | `""` | +| `kafka.audit.tls.cert` | Client certificate. | `""` | +| `kafka.audit.tls.key` | Client key. | `""` | +| `kafka.audit.produce.retryCount` | Number of retries to produce a message. | `5` | +| `kafka.audit.produce.idempotentWrite` | Flag to enable/disable [idempotent write](https://docs.confluent.io/platform/current/installation/configuration/producer-configs.html#enable-idempotence). | `true` | ### LDAP connection settings diff --git a/charts/keys/templates/api/deployment.yaml b/charts/keys/templates/api/deployment.yaml index e9d1d4f56..b406142a6 100644 --- a/charts/keys/templates/api/deployment.yaml +++ b/charts/keys/templates/api/deployment.yaml @@ -38,6 +38,25 @@ spec: {{- toYaml . | nindent 8 }} {{- end }} spec: + {{- if or (eq .Values.kafka.main.securityProtocol "SSL") (eq .Values.kafka.main.securityProtocol "SASL_SSL") }} + initContainers: + - name: copy-kafka-audit-certs + image: '{{ required "A valid .Values.dgctlDockerRegistry entry required" .Values.dgctlDockerRegistry }}/{{ .Values.backend.image.repository }}:{{ .Values.backend.image.tag | default .Chart.AppVersion }}' + imagePullPolicy: {{ .Values.imagePullPolicy }} + resources: + {{- toYaml .Values.api.resources | nindent 12 }} + command: + - /bin/sh + - -ec + - | + cp /tmp/certs/* /etc/2gis/secret/kafka-audit/ + chmod 600 /etc/2gis/secret/kafka-audit/client.key + volumeMounts: + - name: {{ include "keys.kafka-audit-raw.name" . | quote }} + mountPath: /tmp/certs + - name: {{ include "keys.kafka-audit.name" . | quote }} + mountPath: /etc/2gis/secret/kafka-audit + {{- end }} containers: - name: keys-api image: {{ required "A valid .Values.dgctlDockerRegistry entry required" .Values.dgctlDockerRegistry }}/{{ .Values.backend.image.repository }}:{{ .Values.backend.image.tag }} @@ -62,13 +81,28 @@ spec: {{- if .Values.customCAs.bundle }} {{- include "keys.env.custom.ca.path" . | nindent 12 }} {{- end }} - {{- if .Values.customCAs.bundle }} + {{- if or (.Values.customCAs.bundle) (eq .Values.kafka.audit.securityProtocol "SSL") (eq .Values.kafka.audit.securityProtocol "SASL_SSL") }} volumeMounts: + {{- if .Values.customCAs.bundle }} {{- include "keys.custom.ca.volumeMounts" . | nindent 12 }} + {{- end }} + {{- if or (eq .Values.kafka.audit.securityProtocol "SSL") (eq .Values.kafka.audit.securityProtocol "SASL_SSL") }} + - name: {{ printf "%s-kafka-audit" (include "keys.name" .) | quote }} + mountPath: /etc/2gis/secret/kafka-audit + {{- end }} {{- end }} - {{- if .Values.customCAs.bundle }} + {{- if or (.Values.customCAs.bundle) (eq .Values.kafka.audit.securityProtocol "SSL") (eq .Values.kafka.audit.securityProtocol "SASL_SSL") }} volumes: + {{- if .Values.customCAs.bundle }} {{- include "keys.custom.ca.deploys.volumes" . | nindent 8 }} + {{- end }} + {{- if or (eq .Values.kafka.audit.securityProtocol "SSL") (eq .Values.kafka.audit.securityProtocol "SASL_SSL") }} + - name: {{ include "keys.kafka-audit-raw.name" . | quote }} + secret: + secretName: {{ include "keys.kafka-audit.name" . | quote }} + - name: {{ include "keys.kafka-audit.name" . | quote }} + emptyDir: {} + {{- end }} {{- end }} {{- with .Values.api.nodeSelector }} nodeSelector: diff --git a/charts/keys/templates/counter/statefulset.yaml b/charts/keys/templates/counter/statefulset.yaml new file mode 100644 index 000000000..a76cf9384 --- /dev/null +++ b/charts/keys/templates/counter/statefulset.yaml @@ -0,0 +1,85 @@ +{{- if .Values.counter.enabled }} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ include "keys.counter.name" . }} + labels: + {{- include "keys.counter.labels" . | nindent 4 }} + {{- with .Values.counter.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + serviceName: {{ include "keys.counter.name" . }} + replicas: {{ .Values.counter.replicas }} + selector: + matchLabels: + {{- include "keys.counter.selectorLabels" . | nindent 6 }} + template: + metadata: + labels: + {{- include "keys.counter.selectorLabels" . | nindent 8 }} + spec: + {{- if or (eq .Values.kafka.main.securityProtocol "SSL") (eq .Values.kafka.main.securityProtocol "SASL_SSL") }} + initContainers: + - name: copy-kafka-main-certs + image: '{{ required "A valid .Values.dgctlDockerRegistry entry required" .Values.dgctlDockerRegistry }}/{{ .Values.backend.image.repository }}:{{ .Values.backend.image.tag | default .Chart.AppVersion }}' + imagePullPolicy: {{ .Values.imagePullPolicy }} + resources: + {{- toYaml .Values.counter.resources | nindent 12 }} + command: + - /bin/sh + - -ec + - | + cp /tmp/certs/* /etc/2gis/secret/kafka-main/ + chmod 600 /etc/2gis/secret/kafka-main/client.key + volumeMounts: + - name: {{ include "keys.kafka-main-raw.name" . | quote }} + mountPath: /tmp/certs + - name: {{ include "keys.kafka-main.name" . | quote }} + mountPath: /etc/2gis/secret/kafka-main + {{- end }} + containers: + - name: counter + image: {{ required "A valid .Values.dgctlDockerRegistry entry required" .Values.dgctlDockerRegistry }}/{{ .Values.backend.image.repository }}:{{ .Values.backend.image.tag }} + imagePullPolicy: {{ .Values.imagePullPolicy }} + command: [ "keys-counter" ] + ports: + - name: http + containerPort: 8090 + resources: + {{- toYaml .Values.counter.resources | nindent 12 }} + env: + {{- include "keys.env.db.deploys" . | nindent 12 }} + {{- include "keys.env.redis" . | nindent 12 }} + {{- include "keys.env.counter" . | nindent 12 }} + {{- if or (eq .Values.kafka.main.securityProtocol "SSL") (eq .Values.kafka.main.securityProtocol "SASL_SSL") }} + volumeMounts: + - name: {{ printf "%s-kafka" (include "keys.counter.name" .) | quote }} + mountPath: /etc/2gis/secret/kafka-main + {{- end }} + {{- if or (eq .Values.kafka.main.securityProtocol "SSL") (eq .Values.kafka.main.securityProtocol "SASL_SSL") }} + volumes: + - name: {{ include "keys.kafka-main-raw.name" . | quote }} + secret: + secretName: {{ include "keys.kafka-main.name" . | quote }} + - name: {{ include "keys.kafka-main.name" . | quote }} + emptyDir: {} + {{- end }} + {{- with .Values.counter.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.counter.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.counter.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} +{{- end }} diff --git a/charts/keys/templates/helpers.tpl b/charts/keys/templates/helpers.tpl index 6f6f7c3b5..73ade13f1 100644 --- a/charts/keys/templates/helpers.tpl +++ b/charts/keys/templates/helpers.tpl @@ -18,6 +18,10 @@ {{ include "keys.name" . }}-cleaner {{- end }} +{{- define "keys.counter.name" -}} +{{ include "keys.name" . }}-counter +{{- end }} + {{- define "keys.migrate.name" -}} {{ include "keys.name" . }}-migrate {{- end }} @@ -42,6 +46,39 @@ {{ include "keys.name" . }}-jobs {{- end }} +{{- /* +Name for kafka main intermediate volume for copy secrets +*/ -}} + +{{- define "keys.kafka-main-raw.name" -}} +{{- printf "%s-kafka-main-raw" (include "keys.name" .) -}} +{{- end }} + +{{- /* +Name for kafka main secret and volume +*/ -}} + +{{- define "keys.kafka-main.name" -}} +{{- printf "%s-kafka-main" (include "keys.name" .) -}} +{{- end }} + +{{- /* +Name for kafka audit intermediate volume for copy secrets +*/ -}} + +{{- define "keys.kafka-audit-raw.name" -}} +{{- printf "%s-kafka-audit-raw" (include "keys.name" .) -}} +{{- end }} + +{{- /* +Name for kafka audit secret and volume +*/ -}} + +{{- define "keys.kafka-audit.name" -}} +{{- printf "%s-kafka-audit" (include "keys.name" .) -}} +{{- end }} + + {{- define "keys.selectorLabels" -}} app.kubernetes.io/name: {{ .Chart.Name }} app.kubernetes.io/instance: {{ .Release.Name }} @@ -104,6 +141,16 @@ app.kubernetes.io/instance: {{ .Release.Name }} app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} {{- end }} +{{- define "keys.counter.selectorLabels" -}} +app.kubernetes.io/name: {{ .Chart.Name }}-counter +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{- define "keys.counter.labels" -}} +{{ include "keys.counter.selectorLabels" . }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} + {{- define "keys.import.labels" -}} app.kubernetes.io/name: {{ .Chart.Name }}-import app.kubernetes.io/instance: {{ .Release.Name }} @@ -199,10 +246,10 @@ app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} value: "{{ .Values.postgres.rw.schema }}" - name: KEYS_DB_RW_USERNAME value: "{{ required "A valid .Values.postgres.rw.username required" .Values.postgres.rw.username }}" -{{- end }} +{{- end -}} {{- define "keys.env.db.deploys" -}} -{{ include "keys.env.db" . }} +{{- include "keys.env.db" . }} - name: KEYS_DB_RO_PASSWORD valueFrom: secretKeyRef: @@ -213,10 +260,10 @@ app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} secretKeyRef: name: {{ include "keys.secret.deploys.name" . }} key: dbRWPassword -{{- end }} +{{- end -}} {{- define "keys.env.db.jobs" -}} -{{ include "keys.env.db" . }} +{{- include "keys.env.db" . }} - name: KEYS_DB_RO_PASSWORD valueFrom: secretKeyRef: @@ -227,31 +274,31 @@ app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} secretKeyRef: name: {{ include "keys.secret.jobs.name" . }} key: dbRWPassword -{{- end }} +{{- end -}} {{- define "keys.env.redis" -}} -{{- if .Values.redis.useExternalRedis }} +{{- if .Values.redis.useExternalRedis -}} - name: KEYS_REDIS_HOST value: "{{ .Values.redis.host }}" - name: KEYS_REDIS_DB value: "{{ .Values.redis.db }}" -{{- else }} +{{- else -}} - name: KEYS_REDIS_HOST value: "{{ include "keys.redis.name" . }}" -{{- end }} +{{- end }} - name: KEYS_REDIS_PORT value: "{{ .Values.redis.port }}" -{{- if .Values.redis.password }} +{{- if .Values.redis.password -}} - name: KEYS_REDIS_PASSWORD valueFrom: secretKeyRef: name: {{ include "keys.secret.deploys.name" . }} key: redisPassword -{{- end }} -{{- end }} +{{- end -}} +{{- end -}} {{- define "keys.env.auth" -}} -{{- if .Values.api.adminUsers }} +{{- if .Values.api.adminUsers -}} - name: KEYS_ADMIN_USERS valueFrom: secretKeyRef: @@ -308,6 +355,57 @@ app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} value: "{{ .Values.admin.badge.backgroundColor }}" {{- end }} +{{- define "keys.env.counter" -}} +- name: KEYS_LOG_LEVEL + value: "{{ .Values.counter.logLevel }}" +- name: KEYS_COUNTER_BUFFER_SIZE + value: "{{ .Values.counter.buffer.size }}" +- name: KEYS_COUNTER_BUFFER_DELAY + value: "{{ .Values.counter.buffer.delay }}" +- name: KEYS_COUNTER_PRELOADER_REFRESH_TICK + value: "{{ .Values.counter.preloader.refreshTick }}" +- name: KEYS_COUNTER_UPDATE_STATUS_QUERY_TIMEOUT + value: "{{ .Values.counter.updateStatusQueryTimeout }}" +- name: KEYS_KAFKA_MAIN_BROKERS + value: "{{ .Values.kafka.main.brokers }}" +- name: KEYS_KAFKA_MAIN_CLIENT_PREFIX + value: "{{ .Values.kafka.main.clientPrefix }}" +- name: KEYS_KAFKA_MAIN_CLIENT_ID + value: "{{ .Values.kafka.main.clientId }}" +- name: KEYS_KAFKA_MAIN_STATS_TOPIC + value: "{{ .Values.kafka.main.topics.stats }}" +- name: KEYS_KAFKA_MAIN_USERNAME + value: "{{ .Values.kafka.main.username }}" +{{- if .Values.kafka.main.password }} +- name: KEYS_KAFKA_MAIN_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "keys.kafka-main.name" . }} + key: kafkaPassword +{{- end }} +- name: KEYS_REDIS_RETRIES + value: "{{ .Values.counter.redis.retries }}" +- name: KEYS_REDIS_MIN_RETRY_BACKOFF + value: "{{ .Values.counter.redis.minRetryBackoff }}" +- name: KEYS_REDIS_MAX_RETRY_BACKOFF + value: "{{ .Values.counter.redis.maxRetryBackoff }}" +- name: KEYS_KAFKA_MAIN_SECURITY_PROTOCOL + value: "{{ .Values.kafka.main.securityProtocol }}" +- name: KEYS_KAFKA_MAIN_SASL_MECHANISM + value: "{{ .Values.kafka.main.SASLMechanism }}" +{{- $sslEnabled := include "kafka.ssl.enabled" (dict "global" $ "variation" "main") }} +{{- if $sslEnabled }} +- name: KEYS_KAFKA_MAIN_TLS_SKIP_SERVER_CERTIFICATE_VERIFY + value: "{{ .Values.kafka.main.tls.skipServerCertificateVerify }}" +- name: KEYS_KAFKA_MAIN_TLS_CLIENT_CERTIFICATE_PATH + value: "/etc/2gis/secret/kafka-main/client.crt" +- name: KEYS_KAFKA_MAIN_TLS_CLIENT_KEY_PATH + value: "/etc/2gis/secret/kafka-main/client.key" +- name: KEYS_KAFKA_MAIN_TLS_CA_CERT_PATH + value: "/etc/2gis/secret/kafka-main/ca.crt" +{{- end }} +{{- end }} + {{- define "keys.env.predef" -}} {{ range $service, $key := .Values.predefined.service.keys }} - name: KEYS_PREDEF_SERVICE_KEY_{{ $service | upper }} @@ -349,8 +447,28 @@ app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} value: "{{ .Values.kafka.audit.bootstrapServers }}" - name: KEYS_KAFKA_AUDIT_USERNAME value: "{{ .Values.kafka.audit.username }}" +{{- if .Values.kafka.audit.password }} - name: KEYS_KAFKA_AUDIT_PASSWORD - value: "{{ .Values.kafka.audit.password }}" + valueFrom: + secretKeyRef: + name: {{ include "keys.kafka-audit.name" . }} + key: kafkaPassword +{{- end }} +- name: KEYS_KAFKA_AUDIT_SECURITY_PROTOCOL + value: "{{ .Values.kafka.audit.securityProtocol }}" +- name: KEYS_KAFKA_AUDIT_SASL_MECHANISM + value: "{{ .Values.kafka.audit.SASLMechanism }}" +{{- $sslEnabled := include "kafka.ssl.enabled" (dict "global" $ "variation" "audit") }} +{{- if $sslEnabled }} +- name: KEYS_KAFKA_AUDIT_TLS_SKIP_SERVER_CERTIFICATE_VERIFY + value: "{{ .Values.kafka.audit.tls.skipServerCertificateVerify }}" +- name: KEYS_KAFKA_AUDIT_TLS_CLIENT_CERTIFICATE_PATH + value: "/etc/2gis/secret/kafka-audit/client.crt" +- name: KEYS_KAFKA_AUDIT_TLS_CLIENT_KEY_PATH + value: "/etc/2gis/secret/kafka-audit/client.key" +- name: KEYS_KAFKA_AUDIT_TLS_CA_CERT_PATH + value: "/etc/2gis/secret/kafka-audit/ca.crt" +{{- end }} - name: KEYS_KAFKA_AUDIT_TOPIC value: "{{ .Values.kafka.audit.topic }}" - name: KEYS_KAFKA_AUDIT_PRODUCE_RETRY_COUNT @@ -424,3 +542,12 @@ Return the appropriate apiVersion for Horizontal Pod Autoscaler. {{- define "keys.configmap.deploys.name" -}} {{ include "keys.name" . }}-configmap-deploys {{- end -}} + +{{- define "kafka.ssl.enabled" }} +{{- $global := required "Global cursor is required in dict!" (get . "global") -}} +{{- $variation := required "Kafka variant is required in dict!" (get . "variation") -}} +{{- $securityProtocol := index $global.Values.kafka $variation "securityProtocol" -}} +{{- $isEnabled := or (eq $securityProtocol "SSL") (eq $securityProtocol "SASL_SSL") -}} +{{/* Converting bool to "thruthy" string cause "include" can only return string. */}} +{{- ternary "true" "" $isEnabled }} +{{- end -}} diff --git a/charts/keys/templates/secret-kafka-audit.yaml b/charts/keys/templates/secret-kafka-audit.yaml new file mode 100644 index 000000000..51e0dd254 --- /dev/null +++ b/charts/keys/templates/secret-kafka-audit.yaml @@ -0,0 +1,20 @@ +{{- $sslEnabled := include "kafka.ssl.enabled" (dict "global" $ "variation" "audit") }} +{{- $password := .Values.kafka.audit.password }} +{{- if or ($sslEnabled) (not (empty $password)) }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "keys.kafka-audit.name" . | quote }} + labels: + {{- include "keys.labels" . | nindent 4 }} +type: Opaque +data: + {{- if $sslEnabled }} + client.crt: {{ required "A valid kafka.audit.tls.cert entry required" .Values.kafka.audit.tls.cert | b64enc | quote }} + client.key: {{ required "A valid kafka.audit.tls.key entry required" .Values.kafka.audit.tls.key | b64enc | quote }} + ca.crt: {{ required "A valid kafka.audit.tls.rootCert entry required" .Values.kafka.audit.tls.rootCert | b64enc | quote }} + {{- end }} + {{- with $password }} + password: {{ $password | b64enc }} + {{- end }} +{{- end }} diff --git a/charts/keys/templates/secret-kafka-main.yaml b/charts/keys/templates/secret-kafka-main.yaml new file mode 100644 index 000000000..3a1c2cfeb --- /dev/null +++ b/charts/keys/templates/secret-kafka-main.yaml @@ -0,0 +1,20 @@ +{{- $sslEnabled := include "kafka.ssl.enabled" (dict "global" $ "variation" "main") }} +{{- $password := .Values.kafka.main.password }} +{{- if or ($sslEnabled) (not (empty $password)) }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "keys.kafka-main.name" . | quote }} + labels: + {{- include "keys.labels" . | nindent 4 }} +type: Opaque +data: + {{- if $sslEnabled }} + client.crt: {{ required "A valid kafka.main.tls.cert entry required" .Values.kafka.main.tls.cert | b64enc | quote }} + client.key: {{ required "A valid kafka.main.tls.key entry required" .Values.kafka.main.tls.key | b64enc | quote }} + ca.crt: {{ required "A valid kafka.main.tls.rootCert entry required" .Values.kafka.main.tls.rootCert | b64enc | quote }} + {{- end }} + {{- with $password }} + password: {{ $password | b64enc }} + {{- end }} +{{- end }} diff --git a/charts/keys/values.yaml b/charts/keys/values.yaml index 9f25e2ddc..1c91e6b1a 100644 --- a/charts/keys/values.yaml +++ b/charts/keys/values.yaml @@ -31,7 +31,7 @@ featureFlags: backend: image: repository: 2gis-on-premise/keys-backend - tag: 1.89.0 + tag: 1.102.3 # @section Admin service settings @@ -437,6 +437,86 @@ dispatcher: nodeSelector: {} +counter: + + # @param counter.enabled Counter worker is enabled. + + enabled: false + + # @param counter.replicas A replica count for the pod. + + replicas: 1 + + # @param counter.prometheus.scrape If Prometheus scrape is enabled. + + prometheus: + scrape: true + + # @param counter.resources.requests.cpu A CPU request. + # @param counter.resources.requests.memory A memory request. + # @param counter.resources.limits.cpu A CPU limit. + # @param counter.resources.limits.memory A memory limit. + + resources: + requests: + cpu: 20m + memory: 32Mi + limits: + cpu: 1000m + memory: 512Mi + + # @param counter.logLevel Log level for the worker. Can be: `trace`, `debug`, `info`, `warning`, `error`, `fatal`. + logLevel: warning + + # @param counter.preloader.refreshTick Refresh interval for in-memory cache with keys limitations info. The smaller the interval, the faster the worker will know about changes in limitations. + + preloader: + refreshTick: 1m + + # @param counter.updateStatusQueryTimeout Timeout for database queries to update key status. + + updateStatusQueryTimeout: 1s + + # @extra counter.buffer **Settings for in-memory buffer for statistics data.** + + # @param counter.buffer.size The maximum size of the buffer. When the limit is reached, the data from the buffer is transferred to Redis. + # @param counter.buffer.delay The maximum interval between data transfer operations from the buffer to Redis. + + buffer: + size: 1000 + delay: 1s + + # @section Redis settings + + # @param counter.redis.retries + # @param counter.redis.minRetryBackoff + # @param counter.redis.maxRetryBackoff + + redis: + retries: 5 + minRetryBackoff: 100ms + maxRetryBackoff: 3s + + # @param counter.annotations Kubernetes [annotations](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/). + # @param counter.labels Kubernetes [labels](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/). + + annotations: {} + labels: {} + + # @param counter.podAnnotations Kubernetes [pod annotations](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/). + # @param counter.podLabels Kubernetes [pod labels](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/). + + podAnnotations: {} + podLabels: {} + + # @param counter.nodeSelector Kubernetes [node selectors](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector). + # @param counter.affinity Kubernetes pod [affinity settings](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity). + # @param counter.tolerations Kubernetes [tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) settings. + + nodeSelector: {} + affinity: {} + tolerations: {} + # @section Redis settings redis: @@ -538,20 +618,64 @@ postgres: kafka: + # @extra kafka.main **Settings for read stat messages.** + + # @param kafka.main.clientPrefix Client prefix name. + # @param kafka.main.clientId Client id. If empty, then hostname will be used. + # @param kafka.main.brokers Comma-separated list of host and port pairs that are the addresses of the Kafka brokers (e.g. 'localhost:9092,localhost:9093'). + # @param kafka.main.securityProtocol Protocol used to communicate with brokers. Valid values are: PLAINTEXT, SSL, SASL_PLAINTEXT, SASL_SSL. Default: PLAINTEXT. + # @param kafka.main.SASLMechanism Authentication mechanism when security_protocol is configured for SASL_PLAINTEXT or SASL_SSL. Valid values are: PLAIN, SCRAM-SHA-256, SCRAM-SHA-512. + # @param kafka.main.username Username for authorization (SASL). + # @param kafka.main.password Password for authorization (SASL). + # @param kafka.main.tls.skipServerCertificateVerify Controls whether a client verifies the server's certificate chain and host name. + # @param kafka.main.tls.rootCert Root certificate. + # @param kafka.main.tls.cert Client certificate. + # @param kafka.main.tls.key Client key. + # @param kafka.main.topics.stats Topic to consume stat messages. + + main: + clientPrefix: 'production' + clientId: '' + brokers: '' + securityProtocol: 'PLAINTEXT' + SASLMechanism: 'PLAIN' + username: '' + password: '' + tls: + skipServerCertificateVerify: false + rootCert: '' + cert: '' + key: '' + topics: + stats: '' + # @extra kafka.audit **Settings for sending audit messages.** # @param kafka.audit.bootstrapServers Comma-separated list of host and port pairs that are the addresses of the Kafka brokers (e.g. 'localhost:9092,localhost:9093'). - # @param kafka.audit.username Username for authorization (SASL/PLAINTEXT SHA-512). - # @param kafka.audit.password Password for authorization (SASL/PLAINTEXT SHA-512). + # @param kafka.audit.securityProtocol Protocol used to communicate with brokers. Valid values are: PLAINTEXT, SSL, SASL_PLAINTEXT, SASL_SSL. Default: PLAINTEXT. + # @param kafka.audit.SASLMechanism Authentication mechanism when security_protocol is configured for SASL_PLAINTEXT or SASL_SSL. Valid values are: PLAIN, SCRAM-SHA-256, SCRAM-SHA-512. + # @param kafka.audit.username Username for authorization (SASL). + # @param kafka.audit.password Password for authorization (SASL). # @param kafka.audit.topic Topic to produce audit messages. + # @param kafka.audit.tls.skipServerCertificateVerify Controls whether a client verifies the server's certificate chain and host name. + # @param kafka.audit.tls.rootCert Root certificate. + # @param kafka.audit.tls.cert Client certificate. + # @param kafka.audit.tls.key Client key. # @param kafka.audit.produce.retryCount Number of retries to produce a message. # @param kafka.audit.produce.idempotentWrite Flag to enable/disable [idempotent write](https://docs.confluent.io/platform/current/installation/configuration/producer-configs.html#enable-idempotence). audit: bootstrapServers: '' + securityProtocol: 'PLAINTEXT' + SASLMechanism: 'PLAIN' username: '' password: '' topic: '' + tls: + skipServerCertificateVerify: false + rootCert: '' + cert: '' + key: '' produce: retryCount: 5 idempotentWrite: true